diff --git a/.backportrc.json b/.backportrc.json index 72f12ec360..1d0daf7b32 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,15 +1,14 @@ { - "repoOwner": "spectrocloud", - "repoName": "librarium", - "editor": "code", - - - "targetBranchChoices": ["master", "version-4-0", "version-3-4", "version-3-3"], - - "autoMerge": true, - "autoMergeMethod": "squash", - - "branchLabelMapping": { - "^backport-(.+)$": "$1" - } - } \ No newline at end of file + "repoOwner": "spectrocloud", + "repoName": "librarium", + "editor": "code", + + "targetBranchChoices": ["master", "version-4-0", "version-3-4"], + + "autoMerge": true, + "autoMergeMethod": "squash", + + "branchLabelMapping": { + "^backport-(.+)$": "$1" + } +} diff --git a/.dockerignore b/.dockerignore index 8f62ede6eb..c75f282c3f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,4 +3,5 @@ README.md CHANGES .gitignore .cache -prow/ \ No newline at end of file +prow/ +.env \ No newline at end of file diff --git a/.eslintignore b/.eslintignore new file mode 100644 index 0000000000..b4393ae61c --- /dev/null +++ b/.eslintignore @@ -0,0 +1,25 @@ +build +node_modules +src/deprecated + +.docusaurus +sidebar.js +docusaurus.config.js +*.config.js + +# swizzled components +src/theme/SchemaItem/index.js +src/theme/CodeBlock/** +DocSidebarItem + +#test files + +*.test.tsx +*.test.ts + +*.test.js +*.test.jsx + +__mocks__ + +jest.setup.ts diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 index 0000000000..e9ce60dd7d --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,50 @@ +module.exports = { + env: { + browser: true, + es2015: true, + node: true, + }, + settings: { + "import/resolver": "webpack", + }, + extends: ["eslint:recommended", "plugin:react/recommended", "prettier"], + overrides: [ + { + files: ["./**/*.{ts,tsx}"], + env: { browser: true, es6: true, node: true }, + extends: [ + "eslint:recommended", + "plugin:@typescript-eslint/recommended", + "plugin:@typescript-eslint/eslint-recommended", + "plugin:@typescript-eslint/recommended-type-checked", + ], + parser: "@typescript-eslint/parser", + parserOptions: { + ecmaFeatures: { jsx: true, experimentalObjectRestSpread: true }, + ecmaVersion: 2020, + sourceType: "module", + project: "./tsconfig.eslint.json", + }, + plugins: ["@typescript-eslint"], + rules: { + "@typescript-eslint/no-explicit-any": "off", + "@typescript-eslint/no-unsafe-assignment": "off", + }, + overrides: [ + { + files: ["*.js", "*.jsx"], + extends: ["plugin:@typescript-eslint/disable-type-checked"], + }, + ], + }, + ], + parserOptions: { + sourceType: "module", + ecmaVersion: 2020, + }, + plugins: ["react"], + rules: { + "no-unused-vars": 1, + "react/prop-types": 0, + }, +}; diff --git a/.eslintrc.json b/.eslintrc.json deleted file mode 100755 index 3f8c7f7605..0000000000 --- a/.eslintrc.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "extends": [ - "react-app", - "prettier", - "plugin:prettier/recommended", - "plugin:react/recommended", - "plugin:jsx-a11y/recommended" - ], - "plugins": [ - "prettier" - ], - "rules": { - "prettier/prettier": 1, - "react/prop-types": 0, - "prefer-destructuring": 0, - "no-underscore-dangle": 0, - "import/prefer-default-export": 0, - "jsx-a11y/no-autofocus": [ - 1, - { - "ignoreNonDOM": true - } - ], - "no-async-promise-executor": 0, - "no-unused-vars": 1, - "no-console": [ - 1, - { - "allow": [ - "warn", - "error", - "trace" - ] - } - ], - "no-param-reassign": [ - "error", - { - "props": false - } - ] - }, - "settings": { - "import/resolver": { - "node": { - "paths": [ - "src", - "src/shared" - ] - } - } - }, - "env": { - "browser": true, - "es6": true, - "node": true - } -} \ No newline at end of file diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index 526c8a38d4..0000000000 --- a/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.sh text eol=lf \ No newline at end of file diff --git a/.github/workflows/aloglia_crawler.yaml b/.github/workflows/aloglia_crawler.yaml new file mode 100644 index 0000000000..2d5bbff84d --- /dev/null +++ b/.github/workflows/aloglia_crawler.yaml @@ -0,0 +1,31 @@ +name: Algolia Crawler + +on: + workflow_run: + workflows: ["Release to Production"] + types: [completed] +jobs: + docsearch: + runs-on: + group: Default + labels: docbot + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Run scraper + env: + APPLICATION_ID: ${{ secrets.ALGOLIA_APP_ID }} + API_KEY: ${{ secrets.ALGOLIA_ADMIN_KEY }} + run: docker run -e APPLICATION_ID=$APPLICATION_ID -e API_KEY=$API_KEY -e "CONFIG=$(cat ./docsearch.config.json | jq -r tostring)" algolia/docsearch-scraper + + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_USERNAME: "spectromate" + SLACK_ICON_EMOJI: ":robot_panic:" + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: 'The Docs Algolia crawler job failed. Please check the GitHub Actions logs for more details.' \ No newline at end of file diff --git a/.github/workflows/backport.yaml b/.github/workflows/backport.yaml index 47c922f88d..232af309df 100644 --- a/.github/workflows/backport.yaml +++ b/.github/workflows/backport.yaml @@ -16,18 +16,40 @@ jobs: || (github.event.action == 'closed') ) steps: + + - name: Retrieve Credentials + id: import-secrets + uses: hashicorp/vault-action@v2.7.3 + with: + url: https://vault.prism.spectrocloud.com + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + secrets: /providers/github/organizations/spectrocloud/token?org_name=spectrocloud token | VAULT_GITHUB_TOKEN + - name: Backport Action - uses: sqren/backport-github-action@v8.9.3 + uses: sqren/backport-github-action@v9.3.0-a with: - github_token: ${{ secrets.GITHUB_TOKEN }} + # We are using a PAT token through our Vault Operator to address the issue of PRs workflows not being triggered + # Refer to issue https://github.com/sqren/backport-github-action/issues/79 for more details. + github_token: ${{ steps.import-secrets.outputs.VAULT_GITHUB_TOKEN }} auto_backport_label_prefix: backport- add_original_reviewers: true - name: Info log if: ${{ success() }} run: cat ~/.backport/backport.info.log - + - name: Debug log if: ${{ failure() }} - run: cat ~/.backport/backport.debug.log - \ No newline at end of file + run: cat ~/.backport/backport.debug.log + + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_USERNAME: "spectromate" + SLACK_ICON_EMOJI: ":robot_panic:" + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: 'The backport automation flow failed. Review the GitHub Actions logs for more details.' diff --git a/.github/workflows/eslint-test.yaml b/.github/workflows/eslint-test.yaml new file mode 100644 index 0000000000..7d9a803917 --- /dev/null +++ b/.github/workflows/eslint-test.yaml @@ -0,0 +1,47 @@ +name: Eslint and Test cases + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + paths: + - "src/**" + +concurrency: + group: test-${{ github.ref }} + cancel-in-progress: true + +jobs: + run-ci: + runs-on: ubuntu-latest + defaults: + run: + shell: bash + if: ${{ !github.event.pull_request.draft }} + steps: + - run: exit 0 + + build: + name: Build + needs: [run-ci] + runs-on: + group: Default + labels: docbot + if: ${{ !github.event.pull_request.draft }} + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + + - name: Setup Node.js environment + uses: actions/setup-node@v3 + with: + node-version: "18" + cache: "npm" + + - run: npm ci + + - name: Eslint + run: | + npm run lint + - name: Test + run: | + npm run test diff --git a/.github/workflows/gitleaks.yaml b/.github/workflows/gitleaks.yaml index c745e1e518..8cebf33a1d 100644 --- a/.github/workflows/gitleaks.yaml +++ b/.github/workflows/gitleaks.yaml @@ -30,6 +30,6 @@ jobs: if [ "$total_failed_tests" -gt 0 ]; then echo "GitLeaks validation check failed with above findings..." exit 1 - else + else echo "GitLeaks validation check passed" - fi \ No newline at end of file + fi diff --git a/.github/workflows/image_optimizer.yaml b/.github/workflows/image_optimizer.yaml index 0798be1a0a..e19e6f2e7c 100644 --- a/.github/workflows/image_optimizer.yaml +++ b/.github/workflows/image_optimizer.yaml @@ -3,10 +3,10 @@ name: image_optimizer on: pull_request: paths: - - '**.jpg' - - '**.jpeg' - - '**.png' - - '**.webp' + - "**.jpg" + - "**.jpeg" + - "**.png" + - "**.webp" concurrency: group: image-${{ github.ref }} @@ -20,15 +20,17 @@ jobs: shell: bash if: ${{ !github.event.pull_request.draft }} steps: - # If the condition above is not met, aka, the PR is not in draft status, then this step is skipped. - # Because this step is part of the critical path, omission of this step will result in remaining CI steps not gettinge executed. - # As of 8/8/2022 there is now way to enforce this beahvior in GitHub Actions CI. + # If the condition above is not met, aka, the PR is not in draft status, then this step is skipped. + # Because this step is part of the critical path, omission of this step will result in remaining CI steps not gettinge executed. + # As of 8/8/2022 there is now way to enforce this beahvior in GitHub Actions CI. - run: exit 0 image-optimizer: name: Image Optimization needs: [run-ci] - runs-on: ubuntu-latest + runs-on: + group: Default + labels: docbot if: ${{ !github.event.pull_request.draft }} steps: - name: Checkout Repository @@ -38,4 +40,4 @@ jobs: uses: calibreapp/image-actions@main with: githubToken: ${{ secrets.GITHUB_TOKEN }} - ignorePaths: 'node_modules/**,build' + ignorePaths: "node_modules/**,build" diff --git a/.github/workflows/packs-data.yaml b/.github/workflows/packs-data.yaml new file mode 100644 index 0000000000..b646f895a7 --- /dev/null +++ b/.github/workflows/packs-data.yaml @@ -0,0 +1,52 @@ +name: Update Deprecated Packs + +on: + schedule: + # Runs at 5 minutes past the hour, every 6 hours. + - cron: "5 */6 * * *" + workflow_dispatch: +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_KEY }} + AWS_DEFAULT_REGION: us-east-1 + +jobs: + build: + name: Get Pakcs data + runs-on: ubuntu-latest + steps: + - name: Retrieve Credentials + id: import-secrets + uses: hashicorp/vault-action@v2.7.3 + with: + url: https://vault.prism.spectrocloud.com + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + secrets: /providers/github/organizations/spectrocloud/token?org_name=spectrocloud token | VAULT_GITHUB_TOKEN + + - uses: actions-hub/gcloud@master + env: + PROJECT_ID: spectro-common-dev + APPLICATION_CREDENTIALS: ${{ secrets.GOOGLE_APPLICATION_CREDENTIALS }} + with: + args: cp gs://docs-education-automation/packs_inventory/packs_report.json ./packs_report.json + cli: gsutil + + - name: Upload to S3 + run: | + aws s3 cp ./packs_report.json s3://docs.spectrocloud.com/packs-data/ + aws cloudfront create-invalidation --distribution-id ${{ secrets.DISTRIBUTION_ID }} --paths "/packs-data/packs_report.json" + aws s3 cp ./packs_report.json s3://docs-latest.spectrocloud.com/packs-data/ + aws cloudfront create-invalidation --distribution-id ${{ secrets.LATEST_DOCS_DISTRIBUTION_ID }} --paths "/packs-data/packs_report.json" + + - name: Slack Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_USERNAME: "spectromate" + SLACK_ICON_EMOJI: ":robot:" + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: 'The Docs cron job that generates the `packs.json` file failed. Please check the logs for more details.' \ No newline at end of file diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml index c19695529e..9e82b2a780 100644 --- a/.github/workflows/pull_request.yaml +++ b/.github/workflows/pull_request.yaml @@ -1,13 +1,25 @@ name: Pre-merge Checks -on: +on: pull_request: types: [opened, synchronize, reopened, ready_for_review] + branches-ignore: [ 'version-*' ] concurrency: group: ci-${{ github.ref }} cancel-in-progress: true +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_KEY }} + AWS_DEFAULT_REGION: us-east-1 + APPZI_TOKEN: ${{ secrets.APPZI_TOKEN }} + MENDABLE_API_KEY: ${{ secrets.MENDABLE_API_KEY }} + FULLSTORY_ORGID: ${{ secrets.FULLSTORY_ORGID }} + ALGOLIA_ADMIN_KEY: ${{ secrets.ALGOLIA_ADMIN_KEY }} + ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} + ALGOLIA_SEARCH_KEY: ${{ secrets.ALGOLIA_SEARCH_KEY }} jobs: run-ci: @@ -17,34 +29,12 @@ jobs: shell: bash if: ${{ !github.event.pull_request.draft }} steps: - # If the condition above is not met, aka, the PR is not in draft status, then this step is skipped. - # Because this step is part of the critical path, omission of this step will result in remaining CI steps not gettinge executed. - # As of 8/8/2022 there is now way to enforce this beahvior in GitHub Actions CI. + # If the condition above is not met, aka, the PR is not in draft status, then this step is skipped. + # Because this step is part of the critical path, omission of this step will result in remaining CI steps not gettinge executed. + # As of 8/8/2022 there is now way to enforce this beahvior in GitHub Actions CI. - run: exit 0 - - run-test: - name: Test - needs: [run-ci] - runs-on: ubuntu-latest - if: ${{ !github.event.pull_request.draft }} - steps: - - name: Checkout Repository - uses: actions/checkout@v3 - - - - name: Setup Node.js environment - uses: actions/setup-node@v3 - with: - node-version: '18' - cache: 'npm' - - - run: npm ci - - - name: Test - run: npm run test vale: - # Image Optimizer docs: https://github.com/calibreapp/image-actions name: Writing Checks needs: [run-ci] runs-on: ubuntu-latest @@ -65,8 +55,10 @@ jobs: build: name: Build - needs: [run-ci, run-test] - runs-on: ubuntu-latest + needs: [run-ci] + runs-on: + group: Default + labels: docbot if: ${{ !github.event.pull_request.draft }} steps: - name: Checkout Repository @@ -75,11 +67,11 @@ jobs: - name: Setup Node.js environment uses: actions/setup-node@v3 with: - node-version: '18' - cache: 'npm' + node-version: "18" + cache: "npm" - run: npm ci - name: Build run: | - make build \ No newline at end of file + npm run build diff --git a/.github/workflows/release-preview.yaml b/.github/workflows/release-preview.yaml index 87ad55b13c..5ca87214e5 100644 --- a/.github/workflows/release-preview.yaml +++ b/.github/workflows/release-preview.yaml @@ -3,50 +3,50 @@ name: Release Branch Preview on: push: branches: - - 'release-*' + - "release-*" env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_KEY }} AWS_DEFAULT_REGION: us-east-1 - GATSBY_APPZI_TOKEN: ${{ secrets.GATSBY_APPZI_TOKEN }} - GATSBY_ALGOLIA_SEARCH_KEY: ${{ secrets.DEV_GATSBY_ALGOLIA_SEARCH_KEY }} - GATSBY_ALGOLIA_APP_ID: ${{ secrets.DEV_GATSBY_ALGOLIA_APP_ID }} - ALGOLIA_ADMIN_KEY: ${{ secrets.DEV_ALGOLIA_ADMIN_KEY }} - GATSBY_MENDABLE_API_KEY: ${{ secrets.GATSBY_MENDABLE_API_KEY }} - NETLIFY_PREVIEW: false - # The GATSBY_FULLSTORY_ORGID is removed so that preview builds don't get recorded in FullStory + APPZI_TOKEN: ${{ secrets.APPZI_TOKEN }} + MENDABLE_API_KEY: ${{ secrets.MENDABLE_API_KEY }} + FULLSTORY_ORGID: ${{ secrets.FULLSTORY_ORGID }} + ALGOLIA_ADMIN_KEY: ${{ secrets.ALGOLIA_ADMIN_KEY }} + ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} + ALGOLIA_SEARCH_KEY: ${{ secrets.ALGOLIA_SEARCH_KEY }} + concurrency: group: preview-${{ github.ref }} cancel-in-progress: true - jobs: build: name: Build - runs-on: ubuntu-latest + runs-on: + group: Default + labels: docbot steps: - - name: Check out repository - uses: actions/checkout@v3 - - - - name: Setup Node.js environment - uses: actions/setup-node@v3 - with: - node-version: '18' - cache: 'npm' - - - run: npm ci - - - name: Build - run: | - make build - - - name: Deploy Preview - run: | - aws s3 sync --cache-control 'max-age=604800' --exclude '*.html' --exclude '*page-data/*' --exclude '*.txt' --exclude '*.xml' --exclude '*/sw.js' public/ s3://docs-latest.spectrocloud.com --delete - aws s3 sync --cache-control 'max-age=0, s-maxage=604800' public/ s3://docs-latest.spectrocloud.com --delete - aws cloudfront create-invalidation --distribution-id EV0DH5A7CFZBY --paths "/*" \ No newline at end of file + - name: Check out repository + uses: actions/checkout@v3 + + - name: Setup Node.js environment + uses: actions/setup-node@v3 + with: + node-version: "18" + cache: "npm" + + - run: npm ci + + - name: Build + run: | + make build + + - name: Deploy Preview + run: | + aws s3 sync --cache-control 'public, max-age=604800' --exclude '*.html' --exclude '*.xml' --exclude build/scripts/ build/ s3://docs-latest.spectrocloud.com --delete + aws s3 sync --cache-control 'public, max-age=0, s-maxage=604800' build/ s3://docs-latest.spectrocloud.com --delete + aws cloudfront create-invalidation --distribution-id ${{ secrets.LATEST_DOCS_DISTRIBUTION_ID }} --paths "/*" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 79ffe809e7..2462592930 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,73 +1,67 @@ name: Release to Production +# Run this workflow every time a new commit is pushed to the master branch +# or a version branch (e.g. version-4-0) + on: push: - branches: [ master ] + branches: ['master', 'version-**'] + workflow_dispatch: + env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_KEY }} AWS_DEFAULT_REGION: us-east-1 - GATSBY_APPZI_TOKEN: ${{ secrets.GATSBY_APPZI_TOKEN }} - GATSBY_ALGOLIA_SEARCH_KEY: ${{ secrets.GATSBY_ALGOLIA_SEARCH_KEY }} - GATSBY_ALGOLIA_APP_ID: ${{ secrets.GATSBY_ALGOLIA_APP_ID }} + APPZI_TOKEN: ${{ secrets.APPZI_TOKEN }} + MENDABLE_API_KEY: ${{ secrets.MENDABLE_API_KEY }} + FULLSTORY_ORGID: ${{ secrets.FULLSTORY_ORGID }} ALGOLIA_ADMIN_KEY: ${{ secrets.ALGOLIA_ADMIN_KEY }} - GATSBY_MENDABLE_API_KEY: ${{ secrets.GATSBY_MENDABLE_API_KEY }} - GATSBY_FULLSTORY_ORGID: ${{ secrets.GATSBY_FULLSTORY_ORGID }} + ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} + ALGOLIA_SEARCH_KEY: ${{ secrets.ALGOLIA_SEARCH_KEY }} -jobs: +concurrency: + group: production-${{ github.workflow }} + cancel-in-progress: true + +jobs: build: - name: Build - runs-on: ubuntu-latest + name: Build Website + runs-on: + group: Default + labels: docbot steps: - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: "master" + - name: Setup Node.js environment uses: actions/setup-node@v3 with: - node-version: '18' - cache: 'npm' + node-version: "18" + cache: "npm" - run: npm ci - - name: Build + - name: Compile run: | + make versions-ci make build - - name: Artifacts - uses: actions/upload-artifact@v2 - with: - name: public - path: public - retention-days: 1 - - - update-site: - name: Update Site - needs: [build] - runs-on: ubuntu-latest - steps: - - name: Checkout Repository - uses: actions/checkout@v3 - - - name: Download my-folder - uses: actions/download-artifact@v2 - with: - name: public - path: public - - name: Upload to AWS run: | - rm -f public/robots.txt - aws s3 sync --cache-control 'max-age=604800' --exclude '*.html' --exclude '*page-data/*' --exclude '*.txt' --exclude '*.xml' --exclude '*/sw.js' public/ s3://docs.spectrocloud.com --delete - aws s3 sync --cache-control 'max-age=0, s-maxage=604800' public/ s3://docs.spectrocloud.com --delete - aws cloudfront create-invalidation --distribution-id E1LK6TRNPR90DX --paths "/*" + aws s3 sync --cache-control 'public, max-age=604800' --exclude '*.html' --exclude '*.xml' --exclude build/scripts/ build/ s3://docs.spectrocloud.com --delete + aws s3 sync --cache-control 'public, max-age=0, s-maxage=604800' build/ s3://docs.spectrocloud.com --delete + aws cloudfront create-invalidation --distribution-id ${{ secrets.DISTRIBUTION_ID }} --paths "/*" release: name: "Release" + needs: [build] runs-on: ubuntu-latest steps: - id: checkout @@ -81,7 +75,7 @@ jobs: uses: actions/setup-node@v3 with: node-version: 18 - cache: 'npm' + cache: "npm" - name: Install dependencies run: npm ci diff --git a/.github/workflows/url-checks.yaml b/.github/workflows/url-checks.yaml index d3b8ed9786..42d6ff27ca 100644 --- a/.github/workflows/url-checks.yaml +++ b/.github/workflows/url-checks.yaml @@ -11,7 +11,9 @@ concurrency: name: Broken URL check jobs: markdown-link-check: - runs-on: ubuntu-latest + runs-on: + group: Default + labels: docbot steps: - id: checkout name: Checkout Repository @@ -36,4 +38,4 @@ jobs: ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }} OWNER: ${{ github.repository_owner }} REPO: ${{ github.event.repository.name }} - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} \ No newline at end of file diff --git a/.github/workflows/version-branch-update.yaml b/.github/workflows/version-branch-update.yaml index b29d63850f..d01f38b3e7 100644 --- a/.github/workflows/version-branch-update.yaml +++ b/.github/workflows/version-branch-update.yaml @@ -3,22 +3,42 @@ name: Version Branch Update on: pull_request: branches: - - 'backport/version-*' + - 'version-[0-9]-[0-9]' + types: [opened, synchronize, reopened] + workflow_dispatch: + concurrency: group: version-${{ github.ref }} cancel-in-progress: true +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_KEY }} + AWS_DEFAULT_REGION: us-east-1 + APPZI_TOKEN: ${{ secrets.APPZI_TOKEN }} + MENDABLE_API_KEY: ${{ secrets.MENDABLE_API_KEY }} + FULLSTORY_ORGID: ${{ secrets.FULLSTORY_ORGID }} + ALGOLIA_ADMIN_KEY: ${{ secrets.ALGOLIA_ADMIN_KEY }} + ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} + ALGOLIA_SEARCH_KEY: ${{ secrets.ALGOLIA_SEARCH_KEY }} + NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} + NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }} + jobs: build: name: Build - runs-on: ubuntu-latest + runs-on: + group: Default + labels: docbot + steps: - name: Check out repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 @@ -28,10 +48,36 @@ jobs: node-version: '18' cache: 'npm' - - run: npm ci - - name: Build + - name: Post Netlify progress + uses: mshick/add-pr-comment@v2 + with: + message: | + 🤖 Starting the Netlify preview build for commit ${{ github.sha }}. This may take a few minutes. + refresh-message-position: true + + - name: Netlify Build run: | - make build \ No newline at end of file + netlify build --context deploy-preview + + - name: Deploy to Netlify + id: netlify + uses: nwtgck/actions-netlify@v2.1.0 + with: + publish-dir: ./build + deploy-message: 'Manual Netlify deployment from GitHub Actions - ${{ github.sha }}' + enable-pull-request-comment: true + overwrites-pull-request-comment: true + enable-commit-comment: true + + - name: Post Netlify URL + uses: mshick/add-pr-comment@v2 + with: + message: | + 🚀 Netlify preview deployed succesfully for commit ${{ github.sha }}. Click [here](${{steps.netlify.outputs.deploy-url}}) to preview the changes. + message-failure: | + 👎 Uh oh! The Netlify Preview failed to deploy for commit ${{ github.sha }}. Please check the Netlify logs for more information. + refresh-message-position: true + update-only: true \ No newline at end of file diff --git a/.gitignore b/.gitignore old mode 100755 new mode 100644 index 6385313502..60435292c7 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,25 @@ +# Dependencies +/node_modules + +# Production +build + +# Generated files +.docusaurus +.cache-loader + +# Misc +.env +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* + public .cache node_modules @@ -12,9 +34,17 @@ link_report.* *-creds.tape palette-docs.pdf +docs/api-content/api-docs/v1/*.mdx +docs/api-content/api-docs/v1/sidebar.js -# Local Netlify folder -.netlify -# @netlify/plugin-gatsby ignores start -netlify/functions/gatsby -# @netlify/plugin-gatsby ignores end +# Versions Content +versions.json +versioned_docs/ +versioned_sidebars/ +api_versions.json +api_versioned_docs/ +api_versioned_sidebars/ +versioned_sidebars/* +temp.docusaurus.config.js +staging_docs/ +staging_sidebars/ \ No newline at end of file diff --git a/.gitleaksignore b/.gitleaksignore index d14fe7d873..93a3f79676 100644 --- a/.gitleaksignore +++ b/.gitleaksignore @@ -1,93 +1,94 @@ -17d6bc43db10d33d36d0215d74e1c540427dc60a:content/docs/06-integrations/00-spectro-k8s-dashboard.md:generic-api-key:179 -2e6256d3133f350673bcc16230387fb42e793692:content/docs/04-clusters/03-edge/02-native.md:generic-api-key:169 -b9bad96c09b73fb074ff265e5a6d9e9860d341fb:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:139 -45a506fa94cad89d6b84ed72241b0c472e206f55:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:508 -45a506fa94cad89d6b84ed72241b0c472e206f55:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:1126 -f564044a709e7f02f3cd9e424a461c61a3750a61:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:142 -45a506fa94cad89d6b84ed72241b0c472e206f55:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:1161 -1faea5d246ee4c45aba86b938a26ead7eeaadf87:content/docs/06-integrations/00-spectro-k8s-dashboard.md:generic-api-key:179 -bc629af73508f1005b2eadadc363e35e6ffbab4d:content/docs/04-clusters/03-edge/02-native.md:generic-api-key:169 -912f5409ced4bb90eb701efefa73e7d2008972ba:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:508 -912f5409ced4bb90eb701efefa73e7d2008972ba:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:1126 -912f5409ced4bb90eb701efefa73e7d2008972ba:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:1161 -031ef223cfb6ef4747f5179ef16e126c269afdab:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:139 -d748cceb3859df5c354b96a0c3d115513f6c123e:content/docs/04.5-devx/06-apps/05-deploy-app.md:generic-api-key:142 -404219eb2cad1fbd5e03d64783b1db285c8e08b3:content/docs/06-integrations/00-portworx_operator.md:private-key:613 -504642da118b99f12e838c6ff9a1166cc8fcff61:content/docs/06-integrations/00-portworx_operator.md:private-key:309 -86b8971f386ee3b00d2558a7fd6029f312b335cf:content/docs/06-integrations/00-portworx.md:private-key:269 -feaea7ee07b76653233cf4ff8376953bfb216c8d:content/docs/06-integrations/00-ubuntu.md:generic-api-key:65 -261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/04-clusters/3-cluster-management/9-cluster-rbac.md:generic-api-key:112 -261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/04-clusters/3-cluster-management/9-cluster-rbac.md:generic-api-key:113 -261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/04-clusters/3-cluster-management/9-cluster-rbac.md:generic-api-key:135 -261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/04-clusters/3-cluster-management/9-cluster-rbac.md:generic-api-key:205 -261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/04-clusters/3-cluster-management/9-cluster-rbac.md:generic-api-key:206 -9c7c6f5e1d5b461ecbc18eb655fcaed1938882a1:packages/docs/content/08-user-management/1-user-authentication.md:generic-api-key:88 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/citrix-ipam.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/falco.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/dex.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/istio.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/fluentbit.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/kong.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/kibana.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/kubernetes.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/kubernetes-dashboard.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/metallb.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/kubevious.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/permission-manager.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/nginx.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/storage-pack-csi.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/prometheus-operator.md:generic-api-key:8 -b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/06-integrations/vault.md:generic-api-key:8 -433734e7d6ad031962357e8c6e6921080e366f16:packages/docs/content/06-integrations/storage-pack-csi.md:generic-api-key:8 +17d6bc43db10d33d36d0215d74e1c540427dc60a:docs/docs-content/integrations/spectro-k8s-dashboard.md:generic-api-key:179 +2e6256d3133f350673bcc16230387fb42e793692:docs/docs-content/clusters/edge/native.md:generic-api-key:169 +b9bad96c09b73fb074ff265e5a6d9e9860d341fb:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:139 +45a506fa94cad89d6b84ed72241b0c472e206f55:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:508 +45a506fa94cad89d6b84ed72241b0c472e206f55:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:1126 +f564044a709e7f02f3cd9e424a461c61a3750a61:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:142 +45a506fa94cad89d6b84ed72241b0c472e206f55:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:1161 +1faea5d246ee4c45aba86b938a26ead7eeaadf87:docs/docs-content/integrations/spectro-k8s-dashboard.md:generic-api-key:179 +bc629af73508f1005b2eadadc363e35e6ffbab4d:docs/docs-content/clusters/edge/native.md:generic-api-key:169 +912f5409ced4bb90eb701efefa73e7d2008972ba:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:508 +912f5409ced4bb90eb701efefa73e7d2008972ba:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:1126 +912f5409ced4bb90eb701efefa73e7d2008972ba:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:1161 +031ef223cfb6ef4747f5179ef16e126c269afdab:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:139 +d748cceb3859df5c354b96a0c3d115513f6c123e:docs/docs-content/devx/apps/deploy-app.md:generic-api-key:142 +404219eb2cad1fbd5e03d64783b1db285c8e08b3:docs/docs-content/integrations/portworx_operator.md:private-key:613 +504642da118b99f12e838c6ff9a1166cc8fcff61:docs/docs-content/integrations/portworx_operator.md:private-key:309 +86b8971f386ee3b00d2558a7fd6029f312b335cf:docs/docs-content/integrations/portworx.md:private-key:269 +feaea7ee07b76653233cf4ff8376953bfb216c8d:docs/docs-content/integrations/ubuntu.md:generic-api-key:65 +261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/clusters/cluster-management/cluster-rbac.md:generic-api-key:112 +261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/clusters/cluster-management/cluster-rbac.md:generic-api-key:113 +261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/clusters/cluster-management/cluster-rbac.md:generic-api-key:135 +261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/clusters/cluster-management/cluster-rbac.md:generic-api-key:205 +261324e61a4dd4a3da73c8c03f45d3fcca826817:packages/docs/content/clusters/cluster-management/cluster-rbac.md:generic-api-key:206 +9c7c6f5e1d5b461ecbc18eb655fcaed1938882a1:packages/docs/content/user-management/user-authentication.md:generic-api-key:88 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/citrix-ipam.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/falco.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/dex.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/istio.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/fluentbit.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/kong.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/kibana.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/kubernetes.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/kubernetes-dashboard.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/metallb.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/kubevious.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/permission-manager.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/nginx.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/storage-pack-csi.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/prometheus-operator.md:generic-api-key:8 +b9b638b69fc0e3e6a05e6d6b4c50cb1e9e2a8d17:packages/docs/content/integrations/vault.md:generic-api-key:8 +433734e7d6ad031962357e8c6e6921080e366f16:packages/docs/content/integrations/storage-pack-csi.md:generic-api-key:8 a752c35dfc19b5efa94903fc13b43edaed1ee579:prow/functions.sh:aws-access-token:29 -a366d65523a8fb5466f2e0d857f9eb5c2c23f0d0:packages/docs/content/06-integrations/istio.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/citrix-ipam.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/kibana.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/fluentbit.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/kubernetes-dashboard.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/kong.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/kubevious.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/kubernetes.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/metallb.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/prometheus-operator.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/permission-manager.md:generic-api-key:8 -e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/06-integrations/vault.md:generic-api-key:8 -aa78e799a424e8c678ba59483413554a6ab6e015:packages/docs/content/06-integrations/falco.md:generic-api-key:8 -aa78e799a424e8c678ba59483413554a6ab6e015:packages/docs/content/06-integrations/dex.md:generic-api-key:8 -aa78e799a424e8c678ba59483413554a6ab6e015:packages/docs/content/06-integrations/nginx.md:generic-api-key:8 -aa78e799a424e8c678ba59483413554a6ab6e015:packages/docs/content/06-integrations/vault.md:generic-api-key:8 -ce669a28f46e5db41d142609cefcb79771c96fda:content/docs/04-clusters/03-edge/35-stylus-reference.md:generic-api-key:93 -2dae9a7f97386b468dcf80f4432ee78117197a66:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:222 -7252e2d39132eb04c58fc04ccf874e091a10dcc1:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:222 -2dae9a7f97386b468dcf80f4432ee78117197a66:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:424 -2dae9a7f97386b468dcf80f4432ee78117197a66:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:625 -7252e2d39132eb04c58fc04ccf874e091a10dcc1:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:424 -7252e2d39132eb04c58fc04ccf874e091a10dcc1:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:625 -7252e2d39132eb04c58fc04ccf874e091a10dcc1:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:222 -d4027c775e23bdf0dd9356b4b57952b53818b03f:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:222 -d4027c775e23bdf0dd9356b4b57952b53818b03f:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:424 -4027c775e23bdf0dd9356b4b57952b53818b03f:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:625 -202fb21ef249562ae79456cfa65032ea8123ea69:content/docs/04-clusters/03-edge/04-edgeforge-workflow/11-artifacts-with-content-bundle:generic-api-key:145 -d4027c775e23bdf0dd9356b4b57952b53818b03f:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:625 -9abb65bf6c8406ffd6a4d3052a7bf89df61cb221:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:460 -9abb65bf6c8406ffd6a4d3052a7bf89df61cb221:content/docs/06-integrations/00-kubernetes-generic.md:generic-api-key:682 -c3d3d6feb509d01b863747fc377203ce9f72017d:content/docs/06-integrations/00-kubernetes.md:generic-api-key:226 -c3d3d6feb509d01b863747fc377203ce9f72017d:content/docs/06-integrations/00-kubernetes.md:generic-api-key:338 -c8f3f91e82568a2a0763726ec1ff517d9c6a7df0:content/docs/06-integrations/00-kubernetes.md:generic-api-key:338 -dfd21bc72fb6f44a010fce408a329da869e0b29a:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:253 -060a86832764c08ef3054b627c09e4a806af974c:content/docs/12-enterprise-version/07.1-reverse-proxy.md:private-key:252 -9f7fb23d556f1f7d0c0719f78267f82537aef49d:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:152 -9f7fb23d556f1f7d0c0719f78267f82537aef49d:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:179 -d916ea8726a0c226beb82fef8567877f5f5ef3f0:content/docs/12-enterprise-version/07.1-reverse-proxy.md:private-key:152 -1a57adcd59badaa0fb0a7e1550b258df264a24f1:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:153 -1a57adcd59badaa0fb0a7e1550b258df264a24f1:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:180 -1a57adcd59badaa0fb0a7e1550b258df264a24f1:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:253 -1a57adcd59badaa0fb0a7e1550b258df264a24f1:content/docs/12-enterprise-version/07.1-reverse-proxy.md:private-key:153 -9093a9c52d9823c89ad51e2d74448a60a30e30bb:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:153 -9093a9c52d9823c89ad51e2d74448a60a30e30bb:content/docs/12-enterprise-version/07.1-reverse-proxy.md:private-key:153 -9093a9c52d9823c89ad51e2d74448a60a30e30bb:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:180 -9093a9c52d9823c89ad51e2d74448a60a30e30bb:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:253 -414bf547fcd11c8fb3a7da928c19a9ec763e5bbd:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:153 -414bf547fcd11c8fb3a7da928c19a9ec763e5bbd:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:180 -414bf547fcd11c8fb3a7da928c19a9ec763e5bbd:content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md:private-key:253 -414bf547fcd11c8fb3a7da928c19a9ec763e5bbd:content/docs/12-enterprise-version/07.1-reverse-proxy.md:private-key:153 \ No newline at end of file +a366d65523a8fb5466f2e0d857f9eb5c2c23f0d0:packages/docs/content/integrations/istio.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/citrix-ipam.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/kibana.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/fluentbit.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/kubernetes-dashboard.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/kong.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/kubevious.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/kubernetes.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/metallb.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/prometheus-operator.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/permission-manager.md:generic-api-key:8 +e7aad5032fa6c7d6ca53f7974f3f3afa13be961a:packages/docs/content/integrations/vault.md:generic-api-key:8 +aa78e799a424e8c678ba59483413554a6ab6e015:packages/docs/content/integrations/falco.md:generic-api-key:8 +aa78e799a424e8c678ba59483413554a6ab6e015:packages/docs/content/integrations/dex.md:generic-api-key:8 +aa78e799a424e8c678ba59483413554a6ab6e015:packages/docs/content/integrations/nginx.md:generic-api-key:8 +aa78e799a424e8c678ba59483413554a6ab6e015:packages/docs/content/integrations/vault.md:generic-api-key:8 +ce669a28f46e5db41d142609cefcb79771c96fda:docs/docs-content/clusters/edge/stylus-reference.md:generic-api-key:93 +2dae9a7f97386b468dcf80f4432ee78117197a66:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:222 +7252e2d39132eb04c58fc04ccf874e091a10dcc1:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:222 +2dae9a7f97386b468dcf80f4432ee78117197a66:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:424 +2dae9a7f97386b468dcf80f4432ee78117197a66:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:625 +7252e2d39132eb04c58fc04ccf874e091a10dcc1:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:424 +7252e2d39132eb04c58fc04ccf874e091a10dcc1:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:625 +7252e2d39132eb04c58fc04ccf874e091a10dcc1:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:222 +d4027c775e23bdf0dd9356b4b57952b53818b03f:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:222 +d4027c775e23bdf0dd9356b4b57952b53818b03f:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:424 +4027c775e23bdf0dd9356b4b57952b53818b03f:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:625 +202fb21ef249562ae79456cfa65032ea8123ea69:docs/docs-content/clusters/edge/edgeforge-workflow/artifacts-with-content-bundle:generic-api-key:145 +d4027c775e23bdf0dd9356b4b57952b53818b03f:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:625 +9abb65bf6c8406ffd6a4d3052a7bf89df61cb221:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:460 +9abb65bf6c8406ffd6a4d3052a7bf89df61cb221:docs/docs-content/integrations/kubernetes-generic.md:generic-api-key:682 +c3d3d6feb509d01b863747fc377203ce9f72017d:docs/docs-content/integrations/kubernetes.md:generic-api-key:226 +c3d3d6feb509d01b863747fc377203ce9f72017d:docs/docs-content/integrations/kubernetes.md:generic-api-key:338 +c8f3f91e82568a2a0763726ec1ff517d9c6a7df0:docs/docs-content/integrations/kubernetes.md:generic-api-key:338 +dfd21bc72fb6f44a010fce408a329da869e0b29a:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:253 +060a86832764c08ef3054b627c09e4a806af974c:docs/docs-content/enterprise-version/reverse-proxy.md:private-key:252 +9f7fb23d556f1f7d0c0719f78267f82537aef49d:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:152 +9f7fb23d556f1f7d0c0719f78267f82537aef49d:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:179 +d916ea8726a0c226beb82fef8567877f5f5ef3f0:docs/docs-content/enterprise-version/reverse-proxy.md:private-key:152 +1a57adcd59badaa0fb0a7e1550b258df264a24f1:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:153 +1a57adcd59badaa0fb0a7e1550b258df264a24f1:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:180 +1a57adcd59badaa0fb0a7e1550b258df264a24f1:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:253 +1a57adcd59badaa0fb0a7e1550b258df264a24f1:docs/docs-content/enterprise-version/reverse-proxy.md:private-key:153 +9093a9c52d9823c89ad51e2d74448a60a30e30bb:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:153 +9093a9c52d9823c89ad51e2d74448a60a30e30bb:docs/docs-content/enterprise-version/reverse-proxy.md:private-key:153 +9093a9c52d9823c89ad51e2d74448a60a30e30bb:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:180 +9093a9c52d9823c89ad51e2d74448a60a30e30bb:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:253 +414bf547fcd11c8fb3a7da928c19a9ec763e5bbd:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:153 +414bf547fcd11c8fb3a7da928c19a9ec763e5bbd:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:180 +414bf547fcd11c8fb3a7da928c19a9ec763e5bbd:docs/docs-content/vertex/system-management/reverse-proxy.md:private-key:253 +414bf547fcd11c8fb3a7da928c19a9ec763e5bbd:docs/docs-content/enterprise-version/reverse-proxy.md:private-key:153 +698d7cbdcc26f9af98f623effce32ae337898c25:docusaurus.config.js:generic-api-key:282 \ No newline at end of file diff --git a/.husky/pre-commit b/.husky/pre-commit new file mode 100755 index 0000000000..3eaf0b3bc4 --- /dev/null +++ b/.husky/pre-commit @@ -0,0 +1,6 @@ +#!/usr/bin/env sh +. "$(dirname -- "$0")/_/husky.sh" + +npx lint-staged + +npm run clean-api-docs \ No newline at end of file diff --git a/.husky/pre-push b/.husky/pre-push new file mode 100755 index 0000000000..bb58b3dcef --- /dev/null +++ b/.husky/pre-push @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +. "$(dirname -- "$0")/_/husky.sh" + +./scripts/check-branch.sh diff --git a/.lintstagedrc.json b/.lintstagedrc.json new file mode 100644 index 0000000000..5f8517b0e4 --- /dev/null +++ b/.lintstagedrc.json @@ -0,0 +1,5 @@ +{ + "*.ts": ["prettier --write", "eslint"], + "*.html": ["eslint", "prettier --write"], + "*.scss": "prettier --write" +} diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000000..d5416bf2ec --- /dev/null +++ b/.prettierignore @@ -0,0 +1,4 @@ +.docusaurus +build +node_modules +Dockerfile diff --git a/.releaserc.yaml b/.releaserc.yaml index ec0f004349..e9fc79095b 100644 --- a/.releaserc.yaml +++ b/.releaserc.yaml @@ -1,19 +1,19 @@ branches: [master] repositoryUrl: https://github.com/spectrocloud/librarium plugins: -- "@semantic-release/commit-analyzer" -- "@semantic-release/release-notes-generator" -- "@semantic-release/changelog" -- - "@semantic-release/exec" - - analyzeCommitsCmd: echo 'NEW_VERSION=false' > VERSION.env - verifyReleaseCmd: |- - echo 'export VERSION=${nextRelease.version} - NEW_VERSION=true' > VERSION.env -- - "@semantic-release/github" - - assets: - - "*.zip" -- - "@semantic-release/git" - - assets: - - CHANGELOG.md -- - "@semantic-release/npm" - - npmPublish: false \ No newline at end of file + - "@semantic-release/commit-analyzer" + - "@semantic-release/release-notes-generator" + - "@semantic-release/changelog" + - - "@semantic-release/exec" + - analyzeCommitsCmd: echo 'NEW_VERSION=false' > VERSION.env + verifyReleaseCmd: |- + echo 'export VERSION=${nextRelease.version} + NEW_VERSION=true' > VERSION.env + - - "@semantic-release/github" + - assets: + - "*.zip" + - - "@semantic-release/git" + - assets: + - CHANGELOG.md + - - "@semantic-release/npm" + - npmPublish: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ac1967f72..27d5426f68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +# 1.0.0 (2023-09-01) + + +### Features + +* **release:** add release 4.1.0 ([#13](https://github.com/spectrocloud/docs-prototype/issues/13)) ([712f435](https://github.com/spectrocloud/docs-prototype/commit/712f4354da6701c28080957a7bfc72c0ad6be5e3)) + # [3.4.0](https://github.com/spectrocloud/librarium/compare/v3.3.0...v3.4.0) (2023-05-23) diff --git a/Dockerfile b/Dockerfile index 22e657b787..943538d053 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,9 +8,10 @@ RUN apk add util-linux && \ chmod +x /entry.sh && \ mkdir .cache && \ npm ci && \ -chown -R node:node /librarium +chown -R node:node /librarium && \ +echo -e "ALGOLIA_APP_ID=1234567890\nALGOLIA_SEARCH_KEY=1234567890" > .env EXPOSE 9000 USER node ENTRYPOINT ["/entry.sh"] -CMD ["npm", "run", "start"] \ No newline at end of file +CMD ["npm", "run", "start"] diff --git a/Makefile b/Makefile index 7c45ed4293..4c7bffcb9f 100644 --- a/Makefile +++ b/Makefile @@ -4,28 +4,52 @@ IMAGE:=spectrocloud/librarium # Retrieve all modified files in the content folder and compare the difference between the master branch git tree blob AND this commit's git tree blob CHANGED_FILE=$(shell git diff-tree -r --no-commit-id --name-only master HEAD | grep content) +TEMP_DIR=$(shell $TMPDIR) + help: ## Display this help @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[0m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +initialize: ## Initialize the repository dependencies + @echo "initializing npm dependencies" + npm ci + touch .env + npx husky-init + clean: ## Clean build artifacts - rm -rf node_modules build public .cache - docker image rm $(IMAGE) + rm -rf node_modules build public .cache .docusaurus + docker image rm $(IMAGE) || echo "No image exists." + +clean-versions: ## Clean Docusarus content versions + @echo "cleaning versions" + rm -rf api_versions.json versions.json versioned_docs versioned_sidebars api_versioned_sidebars api_versioned_docs + git checkout -- docusaurus.config.js ##@ npm Targets -initialize: ## Initialize npm dependencies +init: ## Initialize npm dependencies @echo "initializing npm dependencies" npm ci + npx husky install start: ## Start a local development server npm run start build: ## Run npm build @echo "building site" - npm run clean - rm -rf public + npm run clear + rm -rf build npm run build +versions: ## Create Docusarus content versions + @echo "creating versions" + ./scripts/versions.sh $(TMPDIR) + + +versions-ci: ## Create Docusarus content versions in a GitHub Actions CI environment + @echo "creating versions" + ./scripts/versions.sh $$RUNNER_TEMP + ##@ Git Targets commit: ## Add a Git commit. Usage: make commit MESSAGE="" @@ -40,9 +64,31 @@ docker-image: ## Build the docker image docker build -t $(IMAGE) . docker-start: docker-image ## Start a local development container - docker run --rm -it -v $(CURDIR)/content:/librarium/content/ -p 9000:9000 $(IMAGE) + docker run --rm -it -v $(CURDIR)/docs:/librarium/docs/ -p 9000:9000 $(IMAGE) + -##@ Lint Targets +##@ Writing Checks + +sync-vale: ## Install Vale plugins + vale sync + +check-writing: ## Run Vale lint checks + vale $(CHANGED_FILE) + + +##@ Clean Server Artifacts + +fix-server: ## Fix server issues by removing the cache folder and reinstalling node modules + @echo "fixing server" + rm -rfv node_modules && npm ci && npm run clear + +###@ PDF Generation + +pdf: ## Generate PDF from docs + @echo "generating pdf" + npx docs-to-pdf docusaurus --initialDocURLs="https://docs.spectrocloud.com" --contentSelector="article" --paginationSelector="a.pagination-nav__link.pagination-nav__link--next" --excludeSelectors=".margin-vert--xl a,[class^='tocCollapsible'],.breadcrumbs,.theme-edit-this-page" --protocolTimeout=e00 --outputPDFFilename=palette-docs.pdf --coverTitle="Palette Documentation" --coverImage=https://docs.spectrocloud.com/assets/images/docs_introduction_product-overview-80d5488097f9e227a325e252dda42f85.png + +###@ URL Checks verify-url-links: ## Check for broken URLs in production rm link_report.csv || echo "No report exists. Proceeding to scan step" @@ -60,18 +106,4 @@ verify-url-links-ci: ## Check for broken URLs in production verify-url-links-local: build ## Check for broken URLs locally rm link_report.csv || echo "No report exists. Proceeding to scan step" - npm run test-links - -sync-vale: ## Install Vale plugins - vale sync - -check-writing: ## Run Vale lint checks - vale $(CHANGED_FILE) - -fix-server: ## Fix server issues by removing the cache folder and reinstalling node modules - @echo "fixing server" - rm -rfv node_modules && rm -rfv .cache/ && npm ci - -pdf: ## Generate PDF from docs - @echo "generating pdf" - npx docs-to-pdf docusaurus --initialDocURLs="https://docs.spectrocloud.com" --contentSelector="article" --paginationSelector="a.pagination-nav__link.pagination-nav__link--next" --excludeSelectors=".margin-vert--xl a,[class^='tocCollapsible'],.breadcrumbs,.theme-edit-this-page" --protocolTimeout=900000 --outputPDFFilename=palette-docs.pdf --coverTitle="Palette Documentation" --coverImage=https://new.docs-test.spectrocloud.com/assets/images/docs_introduction_product-overview-80d5488097f9e227a325e252dda42f85.png \ No newline at end of file + npm run test-links \ No newline at end of file diff --git a/README.md b/README.md index 4ef99fee4d..90e08f2b5d 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # Overview -![Spectro Cloud logo with docs inline](/assets/logo_landscape_for_white.png) +![Spectro Cloud logo with docs inline](/static/img/spectrocloud-logo-light.svg) Welcome to the Spectro Cloud documentation repository. To get started with contributions, please review the entire README. @@ -13,15 +13,24 @@ There are two local development paths available; Docker based, and non-Docker ba To contribute, we recommend having the following software installed locally on your workstation. -- Text Editor +- VScode or a text editor + - [Docker](https://docs.docker.com/desktop/) + - git configured and access to github repository -- node and npm (optional) + +- Node.js and npm (optional) ## Local Development (Docker) To get started with the Docker based local development approach ensure you are in the root context of this repository. +Initailize the repository by issuing the following command: + +```shell +make init +``` + Next, issue the following command to build the Docker image. **Note**: The first time issuing the command may take several minutes. @@ -32,75 +41,73 @@ make docker-image To start the Dockererized local development server, issue the command: -``` +```shell make docker-start ``` -The local development server is ready when the following output is displayed in your terminal. +The local development server is ready when the following output is displayed in your terminal. ```shell -You can now view root in the browser. -⠀ - Local: http://localhost:9000/ - On Your Network: http://172.17.0.2:9000/ -⠀ -View GraphiQL, an in-browser IDE, to explore your site's data and schema -⠀ - Local: http://localhost:9000/___graphql - On Your Network: http://172.17.0.2:9000/___graphql -⠀ -Note that the development build is not optimized. -To create a production build, use gatsby build -``` - -Visit [http://localhost:9000](http://localhost:9000) to view the local development documentation site. +> spectro-cloud-docs@4.0.0 start +> docusaurus start --host 0.0.0.0 --port 9000 -To exit from the local development Docker container. Press `Ctrl + Z`. - -## Local Development Setup (Non-Docker) +[INFO] Starting the development server... +[SUCCESS] Docusaurus website is running at: http://localhost:9000/ -Make a folder somewhere you can easily find +✔ Client + Compiled successfully in 8.39s -```sh -mkdir ~/Work +client (webpack 5.88.2) compiled successfully ``` +Open up a browser and navigate to [http://localhost:9000](http://localhost:9000) to view the documentation website. + +To exit from the local development Docker container. Press `Ctrl + Z`. + +## Local Development Setup (Non-Docker) + Clone the repository and run the initialization script ```sh cd Work git clone https://github.com/spectrocloud/librarium.git cd librarium -make initialize +make init +``` + +Next, populate the `.env` file with the following content. The local development server will not start without the required environment variables. The values are not important for local development. + +```shell +ALGOLIA_APP_ID=1234567890 +ALGOLIA_SEARCH_KEY=1234567890 ``` -# Documentation Content +## Documentation Content -Create a branch if needed. This will keep your work separated from the rest of your changes. +Create a branch to keep track of all your changes. ```sh git checkout -b ``` -To preview your changes use the following. +Make changes to any markdown files in the [`docs/docs-content`](./docs/docs-content/) folder. + + +Start the local development server and preview your changes by navigating to the documentation page you modified. +You can start the local development server by issuing the following command: + ```sh make start ``` -This will open your browser to this address: http://localhost:9000 - -Open `~/Work/librarium/content/docs` in your editor and make changes. They should be synced up in the browser window. - -When you are done with some changes you can create a commit +When you are done with your changes, stage your changes and create a commit ```sh -make commit MESSAGE="" +git add -A && git commit -m "docs: your commit message here" ``` -This will open your browser with the commit. Once the pull request is created a link will be added in the comments to preview the change in a staging environment. - -## Creating pages +## Creating Pages The documentation website is structured in a sidebar with main pages and sub-pages. Main pages will contain an overview of the its sub pages. @@ -108,76 +115,158 @@ The documentation website is structured in a sidebar with main pages and sub-pag The **navigation** sidebar will be something across all pages. -The **header** will have a search bar and some links to different other sections of the documentation (api, glossary, integrations) +The **header** will have a search bar and some links to different other sections of the documentation (api) The page **content** will be displayed under the header and next to the sidebar. On it's right there will be a **table of contents** menu that will extract all of the headers inside the content and display them in a list. This will follow the user as he scroll the page. On top of the table of contents there will be a **github link** to the content of the file. This can be used by users to submit changes to different sections of our documentation -### Main pages +### Main Pages -You can create a main page by creating a `-.md` file in the root of the `content` directory. -The number will be the position of the item in the menu. Each of the main pages can be configured by sending attributes at the start of the file"s content. +Create a page with the filename `.md` in the `docs-content` folder of the `content` directory. For positioning the document in the sidebar, you can use `sidebar_position: 1` in the front matter. To manage folders, create a `_category_.json` file with `{position: 1}` inside the desired directory. **Example of attributes** ```markdown --- -title: "Home" -metaTitle: "spectrocloud docs" -metaDescription: "This is the meta description" -icon: "home" -hideToC: true -fullWidth: true +title: "Introduction" +sidebar_label: "Introduction" +description: "Palette API Introduction" +hide_table_of_contents: false +sidebar_custom_props: + icon: "graph" --- ``` -| attribute | type | description | -| --------------- | ------- | ----------------------------------------------------------------------------------------------------------- | -| title | string | used as the label for navigation | -| metaTitle | string | will appear on the browser window / tab as the title | -| metaDescription | string | the text to display when a page is shared in social media platforms | -| icon | string | one of icons from https://fontawesome.com/icons?d=gallery | -| hideToC | boolean | setting this to `false` will hide the page from the navigation | -| fullWidth | boolean | setting this to `false` this can se set to use the full width of the page and there is no table of contents | + +#### Front Matter Attributes + +| attribute | type | description | +| ------------------------------ | ------- | ----------------------------------------------------------------------------------------------------------- | +| `sidebar_label` | string | used as the label for navigation | +| `title` | string | will appear on the browser window / tab as the title | +| `description` | string | the text to display when a page is shared in social media platforms | +| `sidebar_custom_props:`
` icon: "graph"` | string | one of icons from https://fontawesome.com/icons?d=gallery | +| `hide_table_of_contents` | boolean | setting this to `false` will hide the page from the navigation | +| `sidebar_position` | number | the position of the page in the navigation sidebar. The pages are sorted ascending by this value | +| `toc_min_heading_level` | number | the minimum heading level to show in the table of contents. | +| `toc_max_heading_level` | number | the maximum heading level to show in the table of contents. | +| `tags` | array | A list of string that can be used for additonal categorization of content. | +| `keywords` | array | A list of strings that areused for SEO purposes. | ### Sub pages -Create a folder using the **same name** of the main page. Inside of it use the same name convention (`-.md`) to create subpages. -These pages will have the same attributes as for the main page. +Create a folder using the **same name** of the main page. Inside of it use the same name convention (`.md`) to create subpages. + +The index document for a folder follows the naming convention below. Here are some examples: + +- Named as index (case-insensitive): `docs/Guides/index.md` +- Named as README (case-insensitive): `docs/Guides/README.mdx` +- Same name as the parent folder: `docs/Guides/Guides.md` + +### Markdown Links and URLs +Markdown links use file path references to link to other documentation pages. +The markdown link is composed of the file path to the page in context from the current file. All references to a another documentation page must end with the `.md` extension. Docusaurus will automatically remove the `.md` extension from the URL during the compile. The file path is needed for Docuasurus to generate the correct URL for the page when versioning is enabled. -#### Referencing a page -The url of a page will be composed from the path of the file relative to the `content` directory. The "number" used for ordering the menu will be stripped. -**Example** docs/content/1-introduction/1-what-is.md will have http://localhost:9000/introduction/what-is as the url +The following example shows how to reference a page in various scenarios. Assume you have the following folder structure when reviewing the examples below: -In markdown you can reference this page relatively to the root of the domain using this syntax: +```shell +. +└── docs + └── docs-content + ├── architecture + │   ├── grpc.md + │   └── ip-addresses.md + ├── aws + │   └── iam-permissions.md + ├── clusters + └── security.md +``` + +#### Same Folder + +To link to a file in the same folder, you can use the following syntax: ```md -[Go to introduction](/introduction/what-is) +![Insert a description here](name_of_file.md) ``` -You can also reference pages that reside in the root `/docs` folder, such as index pages. An example is the Dev Engine index page `/docs/04.5-devx.md`. To reference the Dev Engine index page in a documentat page, referce the page by the title. +Because the file is in the same folder, you do not need to specify the path to the file. Docusaurus will automatically search the current folder for the file when compiling the markdown content. + +So, if you are in the file `grpc.md` and want to reference the file `ip-addresses.md`, you would use the following syntax: ```md -[Go to Dev Enging](/devx) +![A list of all Palette public IP addresses](ip-addresses.md) +``` + +#### Different Folder + +If you want to link to a file in a different folder, you have to specify the path to the file from where the current markdown file is located. + +If you are in the file `security.md` and want to reference the file `iam-permissions.md`, you have to use the following syntax: + +```md +![A list of all required IAM permissions for Palette](aws/iam-permissions.md) +``` + +If you are in the file `grpc.md` and want to reference the file `iam-permissions.md`, you have to use the following syntax: + +```md +![A list of all required IAM permissions for Palette](../aws/iam-permissions.md) +``` + +#### A Heading in the Same File + +To link to a heading in the same file, you can use the following syntax: + +```md +[Link to a heading in the same file](#heading-name) +``` + +The `#` symbol is used to reference a heading in the same file. The heading name must be in lowercase and spaces must be replaced with a `-` symbol. Docusaurs by default uses dashes to separate words in the URL. + +#### A Heading in a Different File + +To link to a heading in a different file, you can use the following syntax: + +```md +[Link to a heading in a different file](name_of_file.md#heading-name) +``` +For example, if you are in the file `grpc.md` and want to reference the heading `Palette gRPC API` in the file `security.md`, you would use the following syntax: + +```md +[Link to a heading in a different file](../security.md#palette-grpc-api) +``` +The important thing to remember is that the `#` comes after the file name and before the heading name. + +#### Exceptions + +As of Docusarus `2.4.1`, the ability to link to documentation pages that belong to another plugin is unavailable. To work around this limitation, reference a documentation page by the URL path versus the file path. + +```md +[Link to a page in another plugin](/api-content/authentication#api-key) ``` + +> [!WARNING] +> Be aware that this approach will break versioning. The user experience will be impacted as the user will be redirected to the latest version of the page. + + +In future releases, Docusaurus will support linking pages from other Docusarus plugins. Once this feature is available, this documentation will be updated. ### Redirects To add a redirect to an existing documentation page you must add an entry to the [redirects.js](/src/shared/utils/redirects.js) file. Below is an example of what a redirect entry should look like. ```js { - fromPath: `/clusters/nested-clusters/`, - toPath: `/clusters/sandbox-clusters`, - redirectInBrowser: true, - isPermanent: true, + from: `/clusters/nested-clusters/`, + to: `/clusters/sandbox-clusters`, }, ``` -#### Multi Object Selector +### Multi Object Selector The Packs integration page and the Service Listings page use a component to display the various offerings. Packs intergations use the `` component, whereas the Service Tiers from App Mode use the `` component. @@ -196,7 +285,7 @@ To add a Service to the Service List complete the following actions: - Populate the page with content. -#### Images or other assets +### Images or other assets All images must reside in the [`assets/docs/images`](./assets/docs/images/) folder. @@ -207,68 +296,51 @@ All images must reside in the [`assets/docs/images`](./assets/docs/images/) fold You can add a directory to to the images folder. ```md -![alt text](/introduction/clusterprofiles.png "#title=cluster profiles example") +![alt text](/introduction/clusterprofiles.png "cluster profiles example") ``` -**Image size** -Image size can be customized. You can provider either the width or the height. Units: '%', 'px' etc +**Image Loading** +Image size loading can be customised. You can provide eager-load to images in the first fold of the image with high priority as LCP (Largest contentful Paint) for the page will not be affected ```md -![alt text](/clusterprofiles.png "#width=120px") +![alt text eager-load](/clusterprofiles.png) ``` -#### Tabs component +### Tabs component To use the tabs component you have to import it from the _shared_ folder -```js -import Tabs from "shared/components/ui/Tabs"; -``` - After that, you can use it like this ```js - - + + # AWS cluster Lorem ipsum dolor sit amet, consectetur adipiscing elit. - - + + # VMware cluster Lorem ipsum dolor sit amet, consectetur adipiscing elit. - + ``` **Note**: If you want to navigate from one page to another(which has tabs) and default tab to specific key then you must -- provide an identifier to the `Tabs` component `...` +- provide an identifier to the `Tabs` component `...` - when creating the link to this page, include (in the query) the identifier provided and the **value** you want (eg: /clusters?clusterType=aws#section1) - the values can be one of the tab panel keys - additionally you may refer to different sections from the inner tab using the anchor points(using the #section-1) -#### YouTube Video +### YouTube Video To use a Youtube video us the YouTube component. -First import the component. - -```js -import YouTube from 'shared/components/Video'; -``` - -Next, in your markdown file, use the component and ensure you specify a URL. +In your markdown file, use the component and ensure you specify a URL. ```js ``` -### Points of interest component - -To use this components you will have to import if from the _shared_ folder - -```js -import PointsOfInterest from "shared/components/common/PointOfInterest"; -``` +### Points of Interest -After that you can use it like this ```js tooltip content ``` @@ -336,11 +406,11 @@ import Tooltip from "shared/components/ui/Tooltip"; Hello tooltip content! It's me Mario ``` -### Code lines highlighter +### Code Lines Highlighter You can highlight specific lines in a block of code by adding **coloredLines** prop. -_Example_: ` ```js coloredLines=2-4,5-7`. +_Example_: ` ```js {2-4,5-7}`. This will color the lines from 2 to 4 and from 5 to 7. _Components_: @@ -351,7 +421,7 @@ _Components_: Example -![Example usage of codeblocks with highlighting.](assets/docs/images/readme_codeblocks_example.png) +https://docusaurus.io/docs/markdown-features/code-blocks#highlighting-with-comments #### Hide ClipBoard Button @@ -359,96 +429,59 @@ Example The copy button is shown by default in all code blocks. You can disable the copy button by passing in the parameter value `hideClipboard` in the markdown declaration of the code blocks. Example -![Example](assets/docs/images/hide_copy_button_example.png) +![Example](static/assets/docs/images/hide_copy_button_example.png) Result -![Result](assets/docs/images/hide_copy_button.png) +![Result](static/assets/docs/images/hide_copy_button.png) -### Using Warning Box compponent/Info Box component or any component that wraps content +### Admonitions - Warning / Info / Tip / Danger -To use these components you will have to import them from the shared folder: -```js -import WarningBox from "@librarium/shared/src/components/WarningBox"; -import InfoBox from "@librarium/shared/src/components/InfoBox"; -``` -After that you can use them like this: +:::caution -```js - +Some **content** with _Markdown_ `syntax`. - *Markdown cotent* +::: - - +:::tip - *Markdown content* +Some **content** with _Markdown_ `syntax`. - -``` +::: -The content must have a new line at the beginning and at the end of the tag like this: -Example: +:::danger -```js - +Some **content** with _Markdown_ `syntax`. - - Point 1 - - Point 2 - - ... +::: - +https://docusaurus.io/docs/markdown-features/admonitions - - - Point 1 - - Point 2 - - ... +The content must have a new line at the beginning and at the end of the tag. - -``` ### Video To add a video, use the following syntax: ``` -`video: title: " ``` -## Check for Broken URLs -To check for broken URLs in production issue the following command but be aware this will take approximately two to three minutes. -```shell -make verify-url-links ``` - -If you want to check against your current local branch then use the following command. **Ensure the local server is stopped prior to issuing the command**. - -```shell -make verify-url-links-local + ``` -An auto generated spreedsheet is created with the name **link_report.csv**. To find broken URLs filter by the status code column. Anything with a status code not in the `200` range or with the state "broken" should be inspected. - ## Netlify Previews - By default Netlify previews are enabled for pull requests. However, some branches do not require Netlify previews. In the [netlify.toml](./netlify.toml) file, a custom script is used to determine if a Netlify preview should be created. The script is located in the [scripts/netlify.sh](./scripts/netlify.sh) file. If you need to disable Netlify previews for a branch, add the branch name to the `allowed_branches` variable in the [scripts/netlify.sh](./scripts/netlify.sh) file. -### Cron Job - -Every Monday at 6 AM UTC a GitHub Actions cron job is triggered. The cron job logic can be found in the file [url-checks.yaml](.github/workflows/url-checks.yaml). The core logic resides in [url-checker.sh](/scripts/url-checker.sh). The Slackbot application **Docs bot** is used to post the messages to the `#docs` channel. - ## Approvers/Reviewers The content in the `docs/` folder require approval from the documentation team. The list of approvers and reviewers can be found in the [OWNERS_ALIAS](./content/OWNER_ALIASES) file. Only members of the documentation team may modify this file. @@ -500,7 +533,7 @@ Approved words can be found in the [accept.txt](/vale/styles/Vocab/Internal/acce Rejected words automatically get flagged by Vale. To modify the list of rejected words, modify the [reject.txt](/vale/styles/Vocab/Internal/reject.txt) file. -# Release +## Release To create a new release, use the following steps: @@ -509,8 +542,60 @@ To create a new release, use the following steps: 3. Push up the commit and create a new pull request (PR). 4. Merge PRs related to the upcoming release into the `release-X-X` branch. 5. Merge the release branch. +6. Create a new branch from the `master` branch. Use the following naming pattern `version-X-X`. This brach is used for versioning the documentation. +7. Push the new version branch to the remote repository. +8. Trigger a new build so that the new version is published. The semantic-release logic and the GitHub Actions in the [release.yaml](.github/workflows/release.yaml) will ensure the new release tag is created. > **Warning** > Do not use `feat`,`perf` or `fix` or other semantic-release key words that trigger a version change. Use the commit message prefix `docs: yourMessageHere` for regular documentation commits. + +## Versioning + +> [!NOTE] +> Detailed documentation for versioning can be found in the internal [Versioning](https://spectrocloud.atlassian.net/wiki/spaces/DE/pages/1962639377/Versioning) guide. + +All versioned content belongs to a specific version branch. The version branch name follows the naming convention `version-X-X`. The version branch is used to generate versioned content. + +There are three files that are used for generating versioned content: + +- [`versions.sh`](./scripts/versions.sh) - A bash script that loops through all the version branches and generates the versionioned content. + +- [`update_docusaurs_config.js`](./docsearch.config.json) - A node script that updates the `docusaurus.config.js` file with all the required vesioning parameters. + +- [`versionsOverride.json`](./versionsOverride.json) - A JSON file that contains the versioning overrides. These values are used to update the `docusaurus.config.js` file with non-default values. + + +### Build Versioned Content Locally + +To build versioned content locally, use the following steps: + +1. Issue the following command to generate the versioned content. + +```shell +make versions +``` + +2. Start a local development server to view the versioned content. + +```shell +make start +``` + + +3. Compile the versioned content to ensure a successful build. + +```shell +make build +``` + +4. Remove the `versions.json` file and discard the changes to the `docusaurus.config.js` file. + +```shell +rm versions.json +``` + + +> [!WARNING] +> The `docuasurus.config.js` file is updated by the [`update_docusaurs_config.js`](./docusaurus.config.js) script. DO NOT commit this file with the updated changes. \ No newline at end of file diff --git a/__mocks__/__gatsby__.js b/__mocks__/__gatsby__.js deleted file mode 100644 index f759ef4812..0000000000 --- a/__mocks__/__gatsby__.js +++ /dev/null @@ -1,32 +0,0 @@ -const React = require("react"); -const gatsby = jest.requireActual("gatsby"); - -module.exports = { - ...gatsby, - graphql: jest.fn(), - Link: jest.fn().mockImplementation( - // these props are invalid for an `a` tag - ({ - activeClassName, - activeStyle, - getProps, - innerRef, - partiallyActive, - ref, - replace, - to, - ...rest - }) => - React.createElement("a", { - ...rest, - href: to, - }) - ), - Slice: jest.fn().mockImplementation(({ alias, ...rest }) => - React.createElement("div", { - ...rest, - "data-test-slice-alias": alias, - }) - ), - useStaticQuery: jest.fn(), -}; diff --git a/__mocks__/file-mock.ts b/__mocks__/file-mock.ts new file mode 100644 index 0000000000..0a445d0600 --- /dev/null +++ b/__mocks__/file-mock.ts @@ -0,0 +1 @@ +module.exports = "test-file-stub"; diff --git a/__mocks__/style-mock.ts b/__mocks__/style-mock.ts new file mode 100644 index 0000000000..ff8b4c5632 --- /dev/null +++ b/__mocks__/style-mock.ts @@ -0,0 +1 @@ +export default {}; diff --git a/__tests__/hitComps.test.js b/__tests__/hitComps.test.js deleted file mode 100644 index 69ef00fc8c..0000000000 --- a/__tests__/hitComps.test.js +++ /dev/null @@ -1,23 +0,0 @@ -import React from "react"; -import { PageHit } from "../src/shared/layouts/Default/search/hitComps"; -import { render, fireEvent } from "@testing-library/react"; -import { InstantSearch } from "react-instantsearch-dom"; -import algoliasearch from "algoliasearch/lite"; - -describe("Displays hitComps Component", () => { - it("renders the link with the correct URL", () => { - const mockNavigate = jest.fn(); - window.___navigate = mockNavigate; - const searchClient = algoliasearch("1234", "abcd"); - const hit = { slug: "/example-slug" }; - const { getByRole } = render( - - - - ); - const link = getByRole("link"); - expect(link.getAttribute("href")).toBe("/example-slug"); - fireEvent.click(link); - expect(window.___navigate.mock.calls[0][0]).toBe("/example-slug"); - }); -}); diff --git a/__tests__/pre.test.js b/__tests__/pre.test.js deleted file mode 100644 index bc618f9117..0000000000 --- a/__tests__/pre.test.js +++ /dev/null @@ -1,39 +0,0 @@ -import React from "react"; -import { render } from "@testing-library/react"; -import Pre from "../src/shared/mdx/components/Pre"; -import Clipboard from "clipboard"; - -jest.mock("clipboard"); - -const TextComponent = (props) => { - return
{props.text}
; -}; - -describe("Displays the correct title", () => { - afterEach(() => { - Clipboard.mockRestore(); - }); - - it("renders the component with default props", () => { - const { getByText, getByRole } = render( -
-        
-      
- ); - - expect(getByText("Testing code")).toBeInTheDocument(); - - expect(getByRole("button", { name: /copy/i })).toBeInTheDocument(); - expect(Clipboard).toBeCalled(); - }); - - it("does not render the copy button when hideClipboard prop is true", () => { - const { queryByRole } = render( -
-        
-      
- ); - expect(queryByRole("button", { name: /copy/i })).toBeNull(); - expect(Clipboard).not.toHaveBeenCalled(); - }); -}); diff --git a/api.mustache b/api.mustache new file mode 100644 index 0000000000..9a78cab49c --- /dev/null +++ b/api.mustache @@ -0,0 +1,39 @@ +--- +id: {{{id}}} +title: "{{{title}}}" +description: "{{{frontMatter.description}}}" +{{^api}} +sidebar_label: Introduction +{{/api}} +{{#api}} +sidebar_label: "{{{title}}}" +{{/api}} +{{^api}} +sidebar_position: 0 +{{/api}} +hide_title: true +{{#api}} +hide_table_of_contents: true +{{/api}} +{{#json}} +api: {{{json}}} +{{/json}} +{{#api.method}} +sidebar_class_name: "{{{api.method}}} api-method" +{{/api.method}} +{{#infoPath}} +info_path: {{{infoPath}}} +{{/infoPath}} +custom_edit_url: null +{{#frontMatter.proxy}} +proxy: {{{frontMatter.proxy}}} +{{/frontMatter.proxy}} +{{#frontMatter.hide_send_button}} +hide_send_button: true +{{/frontMatter.hide_send_button}} +{{#frontMatter.show_extensions}} +show_extensions: true +{{/frontMatter.show_extensions}} +--- + +{{{markdown}}} \ No newline at end of file diff --git a/apisidebar.js b/apisidebar.js new file mode 100644 index 0000000000..7f63040c75 --- /dev/null +++ b/apisidebar.js @@ -0,0 +1,47 @@ +// const paletteAPIVersions = [ +// { +// version: "1.0.0", +// label: "V1", +// baseUrl: "/api/introduction", +// }, +// ]; + +// const { +// versionSelector, +// versionCrumb, +// } = require("docusaurus-plugin-openapi-docs/lib/sidebars/utils"); + +module.exports = { + apiSidebar: [ + { + type: "doc", + id: "introduction", + label: "Introduction", + }, + { + type: "doc", + id: "samples", + label: "Example Usage", + }, + { + type: "doc", + id: "postman-collection", + label: "Postman collection", + }, + { + type: "category", + label: "Palette API V1", + link: { + type: "generated-index", + title: "Palette API V1", + }, + items: (() => { + try { + return require("./docs/api-content/api-docs/v1/sidebar.js"); + } catch (error) { + return []; + } + })(), + }, + ], +}; diff --git a/assets/blue-hero-background.png b/assets/blue-hero-background.png deleted file mode 100644 index 67d37f7e1f..0000000000 Binary files a/assets/blue-hero-background.png and /dev/null differ diff --git a/assets/clouds/aws.png b/assets/clouds/aws.png deleted file mode 100644 index 25eedf36f8..0000000000 Binary files a/assets/clouds/aws.png and /dev/null differ diff --git a/assets/clouds/azure.png b/assets/clouds/azure.png deleted file mode 100644 index 6583da36ed..0000000000 Binary files a/assets/clouds/azure.png and /dev/null differ diff --git a/assets/clouds/google_cloud.png b/assets/clouds/google_cloud.png deleted file mode 100644 index 04a4513614..0000000000 Binary files a/assets/clouds/google_cloud.png and /dev/null differ diff --git a/assets/clouds/maas.png b/assets/clouds/maas.png deleted file mode 100644 index 147c73acbf..0000000000 Binary files a/assets/clouds/maas.png and /dev/null differ diff --git a/assets/clouds/openshift.png b/assets/clouds/openshift.png deleted file mode 100644 index 255db197a5..0000000000 Binary files a/assets/clouds/openshift.png and /dev/null differ diff --git a/assets/clouds/openstack.png b/assets/clouds/openstack.png deleted file mode 100644 index 3c2b9a6f56..0000000000 Binary files a/assets/clouds/openstack.png and /dev/null differ diff --git a/assets/clouds/vmware.png b/assets/clouds/vmware.png deleted file mode 100644 index 758acc3114..0000000000 Binary files a/assets/clouds/vmware.png and /dev/null differ diff --git a/assets/docs/images/045-devx_resource-quota_evaluation-process.png b/assets/docs/images/045-devx_resource-quota_evaluation-process.png deleted file mode 100644 index 693605e996..0000000000 Binary files a/assets/docs/images/045-devx_resource-quota_evaluation-process.png and /dev/null differ diff --git a/assets/docs/images/045-devx_resource-quota_is-beehive-enabled.png b/assets/docs/images/045-devx_resource-quota_is-beehive-enabled.png deleted file mode 100644 index 73e4a8868e..0000000000 Binary files a/assets/docs/images/045-devx_resource-quota_is-beehive-enabled.png and /dev/null differ diff --git a/assets/docs/images/2-intro.png b/assets/docs/images/2-intro.png deleted file mode 100644 index 66fea802fb..0000000000 Binary files a/assets/docs/images/2-intro.png and /dev/null differ diff --git a/assets/docs/images/2-what-is-sc.png b/assets/docs/images/2-what-is-sc.png deleted file mode 100644 index ff47c3482e..0000000000 Binary files a/assets/docs/images/2-what-is-sc.png and /dev/null differ diff --git a/assets/docs/images/3-intro.png b/assets/docs/images/3-intro.png deleted file mode 100644 index 3c04f15e94..0000000000 Binary files a/assets/docs/images/3-intro.png and /dev/null differ diff --git a/assets/docs/images/4-intro.png b/assets/docs/images/4-intro.png deleted file mode 100644 index 53f6c2b35f..0000000000 Binary files a/assets/docs/images/4-intro.png and /dev/null differ diff --git a/assets/docs/images/5-intro.png b/assets/docs/images/5-intro.png deleted file mode 100644 index 5c3f0ffb77..0000000000 Binary files a/assets/docs/images/5-intro.png and /dev/null differ diff --git a/assets/docs/images/addon_profile.png b/assets/docs/images/addon_profile.png deleted file mode 100644 index 344178dcf9..0000000000 Binary files a/assets/docs/images/addon_profile.png and /dev/null differ diff --git a/assets/docs/images/admin-dashboard.png b/assets/docs/images/admin-dashboard.png deleted file mode 100644 index a7b98aa010..0000000000 Binary files a/assets/docs/images/admin-dashboard.png and /dev/null differ diff --git a/assets/docs/images/admin_dashboard.png b/assets/docs/images/admin_dashboard.png deleted file mode 100644 index ee625cc791..0000000000 Binary files a/assets/docs/images/admin_dashboard.png and /dev/null differ diff --git a/assets/docs/images/architecture_architecture-on-prem-detailed.png b/assets/docs/images/architecture_architecture-on-prem-detailed.png deleted file mode 100644 index 5c3f0ffb77..0000000000 Binary files a/assets/docs/images/architecture_architecture-on-prem-detailed.png and /dev/null differ diff --git a/assets/docs/images/architecture_architecture-overview-deployment-models.png b/assets/docs/images/architecture_architecture-overview-deployment-models.png deleted file mode 100644 index beb7c4ec6d..0000000000 Binary files a/assets/docs/images/architecture_architecture-overview-deployment-models.png and /dev/null differ diff --git a/assets/docs/images/architecture_architecture-overview_on-prem.png b/assets/docs/images/architecture_architecture-overview_on-prem.png deleted file mode 100644 index 53f6c2b35f..0000000000 Binary files a/assets/docs/images/architecture_architecture-overview_on-prem.png and /dev/null differ diff --git a/assets/docs/images/architecture_architecture-overview_saas.png b/assets/docs/images/architecture_architecture-overview_saas.png deleted file mode 100644 index 3c04f15e94..0000000000 Binary files a/assets/docs/images/architecture_architecture-overview_saas.png and /dev/null differ diff --git a/assets/docs/images/architecture_networking-ports_network-diagram.png b/assets/docs/images/architecture_networking-ports_network-diagram.png deleted file mode 100644 index 3e76a1dc78..0000000000 Binary files a/assets/docs/images/architecture_networking-ports_network-diagram.png and /dev/null differ diff --git a/assets/docs/images/architecture_networking-ports_network-diagram_nats.png b/assets/docs/images/architecture_networking-ports_network-diagram_nats.png deleted file mode 100644 index eb6c17583a..0000000000 Binary files a/assets/docs/images/architecture_networking-ports_network-diagram_nats.png and /dev/null differ diff --git a/assets/docs/images/architecture_networking-ports_on_prem_network-diagram.png b/assets/docs/images/architecture_networking-ports_on_prem_network-diagram.png deleted file mode 100644 index eb6c17583a..0000000000 Binary files a/assets/docs/images/architecture_networking-ports_on_prem_network-diagram.png and /dev/null differ diff --git a/assets/docs/images/architecture_networking-ports_saas-network-diagram-edge.png b/assets/docs/images/architecture_networking-ports_saas-network-diagram-edge.png deleted file mode 100644 index 0f17d6940b..0000000000 Binary files a/assets/docs/images/architecture_networking-ports_saas-network-diagram-edge.png and /dev/null differ diff --git a/assets/docs/images/architecture_networking-ports_saas-network-diagram-edge_nats.png b/assets/docs/images/architecture_networking-ports_saas-network-diagram-edge_nats.png deleted file mode 100644 index f68d453466..0000000000 Binary files a/assets/docs/images/architecture_networking-ports_saas-network-diagram-edge_nats.png and /dev/null differ diff --git a/assets/docs/images/architecture_networking-ports_saas-network-diagram.png b/assets/docs/images/architecture_networking-ports_saas-network-diagram.png deleted file mode 100644 index 05143c73bf..0000000000 Binary files a/assets/docs/images/architecture_networking-ports_saas-network-diagram.png and /dev/null differ diff --git a/assets/docs/images/architecture_orchestartion-spectrocloud_distributed-flow.png b/assets/docs/images/architecture_orchestartion-spectrocloud_distributed-flow.png deleted file mode 100644 index 97ee1891ed..0000000000 Binary files a/assets/docs/images/architecture_orchestartion-spectrocloud_distributed-flow.png and /dev/null differ diff --git a/assets/docs/images/architecture_orchestartion-spectrocloud_provision-flow.png b/assets/docs/images/architecture_orchestartion-spectrocloud_provision-flow.png deleted file mode 100644 index 3d0ead1fe4..0000000000 Binary files a/assets/docs/images/architecture_orchestartion-spectrocloud_provision-flow.png and /dev/null differ diff --git a/assets/docs/images/aws-tech-partner.png b/assets/docs/images/aws-tech-partner.png deleted file mode 100644 index 177a8ea1c6..0000000000 Binary files a/assets/docs/images/aws-tech-partner.png and /dev/null differ diff --git a/assets/docs/images/azure-app-registration.png b/assets/docs/images/azure-app-registration.png deleted file mode 100644 index 8f74b81e43..0000000000 Binary files a/assets/docs/images/azure-app-registration.png and /dev/null differ diff --git a/assets/docs/images/cks-tutorial-images/cks-bastion-host.png b/assets/docs/images/cks-tutorial-images/cks-bastion-host.png deleted file mode 100644 index 4956674e8a..0000000000 Binary files a/assets/docs/images/cks-tutorial-images/cks-bastion-host.png and /dev/null differ diff --git a/assets/docs/images/cks-tutorial-images/cks-cleanup.png b/assets/docs/images/cks-tutorial-images/cks-cleanup.png deleted file mode 100644 index db5c16bc53..0000000000 Binary files a/assets/docs/images/cks-tutorial-images/cks-cleanup.png and /dev/null differ diff --git a/assets/docs/images/cks-tutorial-images/cks-instance-ip.png b/assets/docs/images/cks-tutorial-images/cks-instance-ip.png deleted file mode 100644 index 4aad5f6a63..0000000000 Binary files a/assets/docs/images/cks-tutorial-images/cks-instance-ip.png and /dev/null differ diff --git a/assets/docs/images/cks-tutorial-images/cks-instance-summary.png b/assets/docs/images/cks-tutorial-images/cks-instance-summary.png deleted file mode 100644 index d5d5cf1cd6..0000000000 Binary files a/assets/docs/images/cks-tutorial-images/cks-instance-summary.png and /dev/null differ diff --git a/assets/docs/images/cks-tutorial-images/cks-preview-stack.png b/assets/docs/images/cks-tutorial-images/cks-preview-stack.png deleted file mode 100644 index f71b0aa197..0000000000 Binary files a/assets/docs/images/cks-tutorial-images/cks-preview-stack.png and /dev/null differ diff --git a/assets/docs/images/cks-tutorial-images/cks-workshop.png b/assets/docs/images/cks-tutorial-images/cks-workshop.png deleted file mode 100644 index 41c2c98cdd..0000000000 Binary files a/assets/docs/images/cks-tutorial-images/cks-workshop.png and /dev/null differ diff --git a/assets/docs/images/cks-tutorial-images/my-cks-cloud-account.png b/assets/docs/images/cks-tutorial-images/my-cks-cloud-account.png deleted file mode 100644 index 4a1590e22c..0000000000 Binary files a/assets/docs/images/cks-tutorial-images/my-cks-cloud-account.png and /dev/null differ diff --git a/assets/docs/images/cks-tutorial-images/my-cks-workshop-basic.png b/assets/docs/images/cks-tutorial-images/my-cks-workshop-basic.png deleted file mode 100644 index e48ef46083..0000000000 Binary files a/assets/docs/images/cks-tutorial-images/my-cks-workshop-basic.png and /dev/null differ diff --git a/assets/docs/images/client-config.png b/assets/docs/images/client-config.png deleted file mode 100644 index 01d471fe51..0000000000 Binary files a/assets/docs/images/client-config.png and /dev/null differ diff --git a/assets/docs/images/cluster-profiles_byoos_image-builder_workflow-diagram.png b/assets/docs/images/cluster-profiles_byoos_image-builder_workflow-diagram.png deleted file mode 100644 index 4d5890ceac..0000000000 Binary files a/assets/docs/images/cluster-profiles_byoos_image-builder_workflow-diagram.png and /dev/null differ diff --git a/assets/docs/images/cluster_conditions.png b/assets/docs/images/cluster_conditions.png deleted file mode 100644 index 6b35c527ab..0000000000 Binary files a/assets/docs/images/cluster_conditions.png and /dev/null differ diff --git a/assets/docs/images/cluster_config_override.png b/assets/docs/images/cluster_config_override.png deleted file mode 100644 index 0b4ff408c2..0000000000 Binary files a/assets/docs/images/cluster_config_override.png and /dev/null differ diff --git a/assets/docs/images/cluster_list_update_available.png b/assets/docs/images/cluster_list_update_available.png deleted file mode 100644 index 4211c5dbb8..0000000000 Binary files a/assets/docs/images/cluster_list_update_available.png and /dev/null differ diff --git a/assets/docs/images/cluster_profile.png b/assets/docs/images/cluster_profile.png deleted file mode 100644 index dd60fd7ba0..0000000000 Binary files a/assets/docs/images/cluster_profile.png and /dev/null differ diff --git a/assets/docs/images/cluster_profile_azure.png b/assets/docs/images/cluster_profile_azure.png deleted file mode 100644 index 1fcf5f7270..0000000000 Binary files a/assets/docs/images/cluster_profile_azure.png and /dev/null differ diff --git a/assets/docs/images/cluster_profile_gcp.png b/assets/docs/images/cluster_profile_gcp.png deleted file mode 100644 index 8843b32382..0000000000 Binary files a/assets/docs/images/cluster_profile_gcp.png and /dev/null differ diff --git a/assets/docs/images/cluster_profile_new.png b/assets/docs/images/cluster_profile_new.png deleted file mode 100644 index c97eef2e01..0000000000 Binary files a/assets/docs/images/cluster_profile_new.png and /dev/null differ diff --git a/assets/docs/images/cluster_profiles.png b/assets/docs/images/cluster_profiles.png deleted file mode 100644 index b55e145e17..0000000000 Binary files a/assets/docs/images/cluster_profiles.png and /dev/null differ diff --git a/assets/docs/images/cluster_services.png b/assets/docs/images/cluster_services.png deleted file mode 100644 index 1f3e7e3a07..0000000000 Binary files a/assets/docs/images/cluster_services.png and /dev/null differ diff --git a/assets/docs/images/cluster_update_available_detail.png b/assets/docs/images/cluster_update_available_detail.png deleted file mode 100644 index 407e2d197f..0000000000 Binary files a/assets/docs/images/cluster_update_available_detail.png and /dev/null differ diff --git a/assets/docs/images/cluster_usage_metrics.png b/assets/docs/images/cluster_usage_metrics.png deleted file mode 100644 index 9e649cac66..0000000000 Binary files a/assets/docs/images/cluster_usage_metrics.png and /dev/null differ diff --git a/assets/docs/images/clusters_aws_architecture_aws_cluster_architecture.png b/assets/docs/images/clusters_aws_architecture_aws_cluster_architecture.png deleted file mode 100644 index d38831cfb4..0000000000 Binary files a/assets/docs/images/clusters_aws_architecture_aws_cluster_architecture.png and /dev/null differ diff --git a/assets/docs/images/clusters_aws_create-and-manage-aws-eks-cluster_architecture.png b/assets/docs/images/clusters_aws_create-and-manage-aws-eks-cluster_architecture.png deleted file mode 100644 index 45adce56ee..0000000000 Binary files a/assets/docs/images/clusters_aws_create-and-manage-aws-eks-cluster_architecture.png and /dev/null differ diff --git a/assets/docs/images/clusters_azure_architecture_aks-diagram.png b/assets/docs/images/clusters_azure_architecture_aks-diagram.png deleted file mode 100644 index aade0de41e..0000000000 Binary files a/assets/docs/images/clusters_azure_architecture_aks-diagram.png and /dev/null differ diff --git a/assets/docs/images/clusters_azure_architecture_iaas-overview.png b/assets/docs/images/clusters_azure_architecture_iaas-overview.png deleted file mode 100644 index 3bd4e8cc35..0000000000 Binary files a/assets/docs/images/clusters_azure_architecture_iaas-overview.png and /dev/null differ diff --git a/assets/docs/images/clusters_byoos_image-builder_cluster-profile-byoos-yaml.png b/assets/docs/images/clusters_byoos_image-builder_cluster-profile-byoos-yaml.png deleted file mode 100644 index 8323a5d112..0000000000 Binary files a/assets/docs/images/clusters_byoos_image-builder_cluster-profile-byoos-yaml.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-groups_cluster-group-backups_backup-overview.png b/assets/docs/images/clusters_cluster-groups_cluster-group-backups_backup-overview.png deleted file mode 100644 index 8e28022150..0000000000 Binary files a/assets/docs/images/clusters_cluster-groups_cluster-group-backups_backup-overview.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-groups_index-page.png b/assets/docs/images/clusters_cluster-groups_index-page.png deleted file mode 100644 index 29c587dd42..0000000000 Binary files a/assets/docs/images/clusters_cluster-groups_index-page.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_certificate-management_certificate-renew-page.png b/assets/docs/images/clusters_cluster-management_certificate-management_certificate-renew-page.png deleted file mode 100644 index ed79036a66..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_certificate-management_certificate-renew-page.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_certificate-management_cluster-details-page.png b/assets/docs/images/clusters_cluster-management_certificate-management_cluster-details-page.png deleted file mode 100644 index 9ce787879b..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_certificate-management_cluster-details-page.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_certificate-management_control-plane-only-change.png b/assets/docs/images/clusters_cluster-management_certificate-management_control-plane-only-change.png deleted file mode 100644 index c6df295033..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_certificate-management_control-plane-only-change.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_cluster-rbac_cluster-creation-settings.png b/assets/docs/images/clusters_cluster-management_cluster-rbac_cluster-creation-settings.png deleted file mode 100644 index 1f8fe45fc4..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_cluster-rbac_cluster-creation-settings.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_cluster-rbac_cluster-settings.png b/assets/docs/images/clusters_cluster-management_cluster-rbac_cluster-settings.png deleted file mode 100644 index 3010b50f51..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_cluster-rbac_cluster-settings.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_cluster-rbac_cluster-subject-group.png b/assets/docs/images/clusters_cluster-management_cluster-rbac_cluster-subject-group.png deleted file mode 100644 index 9549c999cc..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_cluster-rbac_cluster-subject-group.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_grafana_spectro_metrics.png b/assets/docs/images/clusters_cluster-management_grafana_spectro_metrics.png deleted file mode 100644 index 4dc9caae16..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_grafana_spectro_metrics.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_image-swap_kubernetes-layer-yaml.png b/assets/docs/images/clusters_cluster-management_image-swap_kubernetes-layer-yaml.png deleted file mode 100644 index 4bdb7d5e9d..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_image-swap_kubernetes-layer-yaml.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_namespace-create.png b/assets/docs/images/clusters_cluster-management_namespace-create.png deleted file mode 100644 index 003b3d13e0..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_namespace-create.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_ns-resource-quota.png b/assets/docs/images/clusters_cluster-management_ns-resource-quota.png deleted file mode 100644 index 94346d7a7c..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_ns-resource-quota.png and /dev/null differ diff --git a/assets/docs/images/clusters_cluster-management_palette-webctl_cluster-details-overview.png b/assets/docs/images/clusters_cluster-management_palette-webctl_cluster-details-overview.png deleted file mode 100644 index c44889c694..0000000000 Binary files a/assets/docs/images/clusters_cluster-management_palette-webctl_cluster-details-overview.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge-forge-workflow_build-images_edge-artifact-result.png b/assets/docs/images/clusters_edge-forge-workflow_build-images_edge-artifact-result.png deleted file mode 100644 index 240c9e0500..0000000000 Binary files a/assets/docs/images/clusters_edge-forge-workflow_build-images_edge-artifact-result.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge-forge-workflow_build-images_edge-cli-output.png b/assets/docs/images/clusters_edge-forge-workflow_build-images_edge-cli-output.png deleted file mode 100644 index ac0faaf2e0..0000000000 Binary files a/assets/docs/images/clusters_edge-forge-workflow_build-images_edge-cli-output.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge-forge-workflow_build-images_edge-cli-show.png b/assets/docs/images/clusters_edge-forge-workflow_build-images_edge-cli-show.png deleted file mode 100644 index 15aacf749f..0000000000 Binary files a/assets/docs/images/clusters_edge-forge-workflow_build-images_edge-cli-show.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge-forge-workflow_edgeforge-workflow_components-diagram.png b/assets/docs/images/clusters_edge-forge-workflow_edgeforge-workflow_components-diagram.png deleted file mode 100644 index 17fca922bc..0000000000 Binary files a/assets/docs/images/clusters_edge-forge-workflow_edgeforge-workflow_components-diagram.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge_cloud-init_boot-order-squence.png b/assets/docs/images/clusters_edge_cloud-init_boot-order-squence.png deleted file mode 100644 index 38beb1178c..0000000000 Binary files a/assets/docs/images/clusters_edge_cloud-init_boot-order-squence.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge_cloud-init_cloud-init-stages-supported.png b/assets/docs/images/clusters_edge_cloud-init_cloud-init-stages-supported.png deleted file mode 100644 index be32486385..0000000000 Binary files a/assets/docs/images/clusters_edge_cloud-init_cloud-init-stages-supported.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge_edge-arch-drawing.png b/assets/docs/images/clusters_edge_edge-arch-drawing.png deleted file mode 100644 index 201929d781..0000000000 Binary files a/assets/docs/images/clusters_edge_edge-arch-drawing.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge_edge-forge-workflow_build-images_build-artifacts_overarching.png b/assets/docs/images/clusters_edge_edge-forge-workflow_build-images_build-artifacts_overarching.png deleted file mode 100644 index 1d514b860a..0000000000 Binary files a/assets/docs/images/clusters_edge_edge-forge-workflow_build-images_build-artifacts_overarching.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge_edge-forge-workflow_build-images_build-artifacts_url.png b/assets/docs/images/clusters_edge_edge-forge-workflow_build-images_build-artifacts_url.png deleted file mode 100644 index 37e1470479..0000000000 Binary files a/assets/docs/images/clusters_edge_edge-forge-workflow_build-images_build-artifacts_url.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge_edge-forge-workflow_build-images_build-project_id.png b/assets/docs/images/clusters_edge_edge-forge-workflow_build-images_build-project_id.png deleted file mode 100644 index a06d1a4d74..0000000000 Binary files a/assets/docs/images/clusters_edge_edge-forge-workflow_build-images_build-project_id.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge_edgeforge-workflow_iso-diagram.png b/assets/docs/images/clusters_edge_edgeforge-workflow_iso-diagram.png deleted file mode 100644 index 81d71ee550..0000000000 Binary files a/assets/docs/images/clusters_edge_edgeforge-workflow_iso-diagram.png and /dev/null differ diff --git a/assets/docs/images/clusters_edge_edgeforge-workflow_provider-diagram.png b/assets/docs/images/clusters_edge_edgeforge-workflow_provider-diagram.png deleted file mode 100644 index 389a644bab..0000000000 Binary files a/assets/docs/images/clusters_edge_edgeforge-workflow_provider-diagram.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_attach-add-on-profile_cluster-details-app-deployed.png b/assets/docs/images/clusters_imported-clusters_attach-add-on-profile_cluster-details-app-deployed.png deleted file mode 100644 index 692e5e9249..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_attach-add-on-profile_cluster-details-app-deployed.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_attach-add-on-profile_cluster-details-profile-tab.png b/assets/docs/images/clusters_imported-clusters_attach-add-on-profile_cluster-details-profile-tab.png deleted file mode 100644 index 49863bb5f7..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_attach-add-on-profile_cluster-details-profile-tab.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_attach-add-on-profile_manfest-view.png b/assets/docs/images/clusters_imported-clusters_attach-add-on-profile_manfest-view.png deleted file mode 100644 index 00c7d00a8d..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_attach-add-on-profile_manfest-view.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_full-migration-with-app.png b/assets/docs/images/clusters_imported-clusters_full-migration-with-app.png deleted file mode 100644 index 900560c506..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_full-migration-with-app.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_full-permissions-instructions.png b/assets/docs/images/clusters_imported-clusters_full-permissions-instructions.png deleted file mode 100644 index 3b1658aded..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_full-permissions-instructions.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_full-permissions.png b/assets/docs/images/clusters_imported-clusters_full-permissions.png deleted file mode 100644 index 2773e0da73..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_full-permissions.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_migrate-full-permissions_cluster-details-page-import-complete.png b/assets/docs/images/clusters_imported-clusters_migrate-full-permissions_cluster-details-page-import-complete.png deleted file mode 100644 index efca7572d3..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_migrate-full-permissions_cluster-details-page-import-complete.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_migrate-full-permissions_cluster-details-page.png b/assets/docs/images/clusters_imported-clusters_migrate-full-permissions_cluster-details-page.png deleted file mode 100644 index 5701510a87..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_migrate-full-permissions_cluster-details-page.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_read-only-instructions.png b/assets/docs/images/clusters_imported-clusters_read-only-instructions.png deleted file mode 100644 index 9efb6f2684..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_read-only-instructions.png and /dev/null differ diff --git a/assets/docs/images/clusters_imported-clusters_read-only.png b/assets/docs/images/clusters_imported-clusters_read-only.png deleted file mode 100644 index 6145df72f7..0000000000 Binary files a/assets/docs/images/clusters_imported-clusters_read-only.png and /dev/null differ diff --git a/assets/docs/images/clusters_maas_install-manage-mass-pcg_diagram-of-mass-with-pcg.png b/assets/docs/images/clusters_maas_install-manage-mass-pcg_diagram-of-mass-with-pcg.png deleted file mode 100644 index f4f31729f8..0000000000 Binary files a/assets/docs/images/clusters_maas_install-manage-mass-pcg_diagram-of-mass-with-pcg.png and /dev/null differ diff --git a/assets/docs/images/clusters_maas_maas-dns-setup.png b/assets/docs/images/clusters_maas_maas-dns-setup.png deleted file mode 100644 index 8d14e1eb54..0000000000 Binary files a/assets/docs/images/clusters_maas_maas-dns-setup.png and /dev/null differ diff --git a/assets/docs/images/clusters_monitoring_deploy-monitor-stack_https-architecture.png b/assets/docs/images/clusters_monitoring_deploy-monitor-stack_https-architecture.png deleted file mode 100644 index 03dbbf3dda..0000000000 Binary files a/assets/docs/images/clusters_monitoring_deploy-monitor-stack_https-architecture.png and /dev/null differ diff --git a/assets/docs/images/clusters_monitoring_deploy-monitor-stack_loadbalancers.png b/assets/docs/images/clusters_monitoring_deploy-monitor-stack_loadbalancers.png deleted file mode 100644 index 5550f577c8..0000000000 Binary files a/assets/docs/images/clusters_monitoring_deploy-monitor-stack_loadbalancers.png and /dev/null differ diff --git a/assets/docs/images/clusters_site-deployment_model-profile_byoos-pack-yaml.png b/assets/docs/images/clusters_site-deployment_model-profile_byoos-pack-yaml.png deleted file mode 100644 index b1dadd6ef6..0000000000 Binary files a/assets/docs/images/clusters_site-deployment_model-profile_byoos-pack-yaml.png and /dev/null differ diff --git a/assets/docs/images/clusters_site-deployment_prepare-edge-configuration_install-flow-with-more-user-data.png b/assets/docs/images/clusters_site-deployment_prepare-edge-configuration_install-flow-with-more-user-data.png deleted file mode 100644 index e07bc35fbd..0000000000 Binary files a/assets/docs/images/clusters_site-deployment_prepare-edge-configuration_install-flow-with-more-user-data.png and /dev/null differ diff --git a/assets/docs/images/clusters_site-deployment_prepare-edge-configuration_install-flow.png b/assets/docs/images/clusters_site-deployment_prepare-edge-configuration_install-flow.png deleted file mode 100644 index d7b9fb2b57..0000000000 Binary files a/assets/docs/images/clusters_site-deployment_prepare-edge-configuration_install-flow.png and /dev/null differ diff --git a/assets/docs/images/color-tracking.png b/assets/docs/images/color-tracking.png deleted file mode 100644 index 7cec9aff75..0000000000 Binary files a/assets/docs/images/color-tracking.png and /dev/null differ diff --git a/assets/docs/images/conformance.png b/assets/docs/images/conformance.png deleted file mode 100644 index 43959a7793..0000000000 Binary files a/assets/docs/images/conformance.png and /dev/null differ diff --git a/assets/docs/images/containerized-edge.png b/assets/docs/images/containerized-edge.png deleted file mode 100644 index f83bfb6bf8..0000000000 Binary files a/assets/docs/images/containerized-edge.png and /dev/null differ diff --git a/assets/docs/images/create-host-cluster.png b/assets/docs/images/create-host-cluster.png deleted file mode 100644 index 24d62d6c91..0000000000 Binary files a/assets/docs/images/create-host-cluster.png and /dev/null differ diff --git a/assets/docs/images/create_profile.png b/assets/docs/images/create_profile.png deleted file mode 100644 index dd60fd7ba0..0000000000 Binary files a/assets/docs/images/create_profile.png and /dev/null differ diff --git a/assets/docs/images/crn.png b/assets/docs/images/crn.png deleted file mode 100644 index 84b262b726..0000000000 Binary files a/assets/docs/images/crn.png and /dev/null differ diff --git a/assets/docs/images/deploy-nested-cluster.png b/assets/docs/images/deploy-nested-cluster.png deleted file mode 100644 index 1a0a56e4ae..0000000000 Binary files a/assets/docs/images/deploy-nested-cluster.png and /dev/null differ diff --git a/assets/docs/images/dev-enterprise.png b/assets/docs/images/dev-enterprise.png deleted file mode 100644 index 7ac4f010f0..0000000000 Binary files a/assets/docs/images/dev-enterprise.png and /dev/null differ diff --git a/assets/docs/images/dev_profile.png b/assets/docs/images/dev_profile.png deleted file mode 100644 index 9ddd3e7b72..0000000000 Binary files a/assets/docs/images/dev_profile.png and /dev/null differ diff --git a/assets/docs/images/dev_profile_new.png b/assets/docs/images/dev_profile_new.png deleted file mode 100644 index 624a3b7592..0000000000 Binary files a/assets/docs/images/dev_profile_new.png and /dev/null differ diff --git a/assets/docs/images/development.png b/assets/docs/images/development.png deleted file mode 100644 index 1208b86d59..0000000000 Binary files a/assets/docs/images/development.png and /dev/null differ diff --git a/assets/docs/images/devx-services-connectivity-container-env-example.png b/assets/docs/images/devx-services-connectivity-container-env-example.png deleted file mode 100644 index ff39e58940..0000000000 Binary files a/assets/docs/images/devx-services-connectivity-container-env-example.png and /dev/null differ diff --git a/assets/docs/images/devx-services-connectivity-helm-env-example.png b/assets/docs/images/devx-services-connectivity-helm-env-example.png deleted file mode 100644 index ad2c3e11cb..0000000000 Binary files a/assets/docs/images/devx-services-connectivity-helm-env-example.png and /dev/null differ diff --git a/assets/docs/images/devx-services-connectivity-output-variables-example.png b/assets/docs/images/devx-services-connectivity-output-variables-example.png deleted file mode 100644 index 6c9f2806d9..0000000000 Binary files a/assets/docs/images/devx-services-connectivity-output-variables-example.png and /dev/null differ diff --git a/assets/docs/images/devx-start.png b/assets/docs/images/devx-start.png deleted file mode 100644 index 1d0d95c49f..0000000000 Binary files a/assets/docs/images/devx-start.png and /dev/null differ diff --git a/assets/docs/images/devx_app-profile_create-app-profile_app-layer-infoboxes.png b/assets/docs/images/devx_app-profile_create-app-profile_app-layer-infoboxes.png deleted file mode 100644 index 7066d193a6..0000000000 Binary files a/assets/docs/images/devx_app-profile_create-app-profile_app-layer-infoboxes.png and /dev/null differ diff --git a/assets/docs/images/devx_devx_cli-display.png b/assets/docs/images/devx_devx_cli-display.png deleted file mode 100644 index 2923bacfaf..0000000000 Binary files a/assets/docs/images/devx_devx_cli-display.png and /dev/null differ diff --git a/assets/docs/images/devx_manage-dev-engine_sso_display-oidc-page.png b/assets/docs/images/devx_manage-dev-engine_sso_display-oidc-page.png deleted file mode 100644 index 2912f532c5..0000000000 Binary files a/assets/docs/images/devx_manage-dev-engine_sso_display-oidc-page.png and /dev/null differ diff --git a/assets/docs/images/devx_manage-dev-engine_sso_palette-login-view.png b/assets/docs/images/devx_manage-dev-engine_sso_palette-login-view.png deleted file mode 100644 index 86dec8c3e9..0000000000 Binary files a/assets/docs/images/devx_manage-dev-engine_sso_palette-login-view.png and /dev/null differ diff --git a/assets/docs/images/dfd_on_prem_vmware.png b/assets/docs/images/dfd_on_prem_vmware.png deleted file mode 100644 index 3e0726809f..0000000000 Binary files a/assets/docs/images/dfd_on_prem_vmware.png and /dev/null differ diff --git a/assets/docs/images/dfd_saas_to_aws.png b/assets/docs/images/dfd_saas_to_aws.png deleted file mode 100644 index 6ee91f9895..0000000000 Binary files a/assets/docs/images/dfd_saas_to_aws.png and /dev/null differ diff --git a/assets/docs/images/dfd_saas_to_vmware.png b/assets/docs/images/dfd_saas_to_vmware.png deleted file mode 100644 index 07d5ecf8a1..0000000000 Binary files a/assets/docs/images/dfd_saas_to_vmware.png and /dev/null differ diff --git a/assets/docs/images/doc_cluster_clusters-cluster-heart-beat.png b/assets/docs/images/doc_cluster_clusters-cluster-heart-beat.png deleted file mode 100644 index 1e046a25c0..0000000000 Binary files a/assets/docs/images/doc_cluster_clusters-cluster-heart-beat.png and /dev/null differ diff --git a/assets/docs/images/docs_architecture-overview_components-overview.png b/assets/docs/images/docs_architecture-overview_components-overview.png deleted file mode 100644 index b1f279c58b..0000000000 Binary files a/assets/docs/images/docs_architecture-overview_components-overview.png and /dev/null differ diff --git a/assets/docs/images/docs_compliance_compliance_fips-logo.png b/assets/docs/images/docs_compliance_compliance_fips-logo.png deleted file mode 100644 index 23f3ee060f..0000000000 Binary files a/assets/docs/images/docs_compliance_compliance_fips-logo.png and /dev/null differ diff --git a/assets/docs/images/docs_devx_pde-dashboard-utilization.png b/assets/docs/images/docs_devx_pde-dashboard-utilization.png deleted file mode 100644 index c5c518ad86..0000000000 Binary files a/assets/docs/images/docs_devx_pde-dashboard-utilization.png and /dev/null differ diff --git a/assets/docs/images/docs_integrations_frp_cert-san-example.png b/assets/docs/images/docs_integrations_frp_cert-san-example.png deleted file mode 100644 index a7b99bb9ef..0000000000 Binary files a/assets/docs/images/docs_integrations_frp_cert-san-example.png and /dev/null differ diff --git a/assets/docs/images/docs_integrations_frp_tls-san-example.png b/assets/docs/images/docs_integrations_frp_tls-san-example.png deleted file mode 100644 index 980ab2f55f..0000000000 Binary files a/assets/docs/images/docs_integrations_frp_tls-san-example.png and /dev/null differ diff --git a/assets/docs/images/docs_introduction_palette-components.png b/assets/docs/images/docs_introduction_palette-components.png deleted file mode 100644 index fd85654914..0000000000 Binary files a/assets/docs/images/docs_introduction_palette-components.png and /dev/null differ diff --git a/assets/docs/images/docs_introduction_palette-modes.png b/assets/docs/images/docs_introduction_palette-modes.png deleted file mode 100644 index 1ff244de1c..0000000000 Binary files a/assets/docs/images/docs_introduction_palette-modes.png and /dev/null differ diff --git a/assets/docs/images/docs_introduction_product-overview.png b/assets/docs/images/docs_introduction_product-overview.png deleted file mode 100644 index bd5311996e..0000000000 Binary files a/assets/docs/images/docs_introduction_product-overview.png and /dev/null differ diff --git a/assets/docs/images/docs_vm-mangement_vmo-diagram.png b/assets/docs/images/docs_vm-mangement_vmo-diagram.png deleted file mode 100644 index 994b2384e3..0000000000 Binary files a/assets/docs/images/docs_vm-mangement_vmo-diagram.png and /dev/null differ diff --git a/assets/docs/images/edge_edge-configuration_cloud-init_user-data.png b/assets/docs/images/edge_edge-configuration_cloud-init_user-data.png deleted file mode 100644 index 9f4113859e..0000000000 Binary files a/assets/docs/images/edge_edge-configuration_cloud-init_user-data.png and /dev/null differ diff --git a/assets/docs/images/eksd-cluster-profile.png b/assets/docs/images/eksd-cluster-profile.png deleted file mode 100644 index d1444c5c39..0000000000 Binary files a/assets/docs/images/eksd-cluster-profile.png and /dev/null differ diff --git a/assets/docs/images/eksd-cluster.png b/assets/docs/images/eksd-cluster.png deleted file mode 100644 index ad0a14db38..0000000000 Binary files a/assets/docs/images/eksd-cluster.png and /dev/null differ diff --git a/assets/docs/images/ema.png b/assets/docs/images/ema.png deleted file mode 100644 index 7cb3e2c4c3..0000000000 Binary files a/assets/docs/images/ema.png and /dev/null differ diff --git a/assets/docs/images/enterprise-app-registration.png b/assets/docs/images/enterprise-app-registration.png deleted file mode 100644 index 4a9d6a7fd8..0000000000 Binary files a/assets/docs/images/enterprise-app-registration.png and /dev/null differ diff --git a/assets/docs/images/enterprise-version_air-gap-repo_overview-order-diagram.png b/assets/docs/images/enterprise-version_air-gap-repo_overview-order-diagram.png deleted file mode 100644 index a3adf6ba42..0000000000 Binary files a/assets/docs/images/enterprise-version_air-gap-repo_overview-order-diagram.png and /dev/null differ diff --git a/assets/docs/images/enterprise-version_deploying-palette-with-helm_aws-iam-role.png b/assets/docs/images/enterprise-version_deploying-palette-with-helm_aws-iam-role.png deleted file mode 100644 index c840dc72d1..0000000000 Binary files a/assets/docs/images/enterprise-version_deploying-palette-with-helm_aws-iam-role.png and /dev/null differ diff --git a/assets/docs/images/full_profile.png b/assets/docs/images/full_profile.png deleted file mode 100644 index c6a7323ab0..0000000000 Binary files a/assets/docs/images/full_profile.png and /dev/null differ diff --git a/assets/docs/images/gcp_cluster_architecture.png b/assets/docs/images/gcp_cluster_architecture.png deleted file mode 100644 index d5fa0331e0..0000000000 Binary files a/assets/docs/images/gcp_cluster_architecture.png and /dev/null differ diff --git a/assets/docs/images/harshicorp.png b/assets/docs/images/harshicorp.png deleted file mode 100644 index c5712c3912..0000000000 Binary files a/assets/docs/images/harshicorp.png and /dev/null differ diff --git a/assets/docs/images/hide_copy_button.png b/assets/docs/images/hide_copy_button.png deleted file mode 100644 index eead0aece9..0000000000 Binary files a/assets/docs/images/hide_copy_button.png and /dev/null differ diff --git a/assets/docs/images/hide_copy_button_example.png b/assets/docs/images/hide_copy_button_example.png deleted file mode 100644 index 3c7e09e375..0000000000 Binary files a/assets/docs/images/hide_copy_button_example.png and /dev/null differ diff --git a/assets/docs/images/integrations_aws-cluster-autoscaler_edit-node.png b/assets/docs/images/integrations_aws-cluster-autoscaler_edit-node.png deleted file mode 100644 index e0c4bf6f91..0000000000 Binary files a/assets/docs/images/integrations_aws-cluster-autoscaler_edit-node.png and /dev/null differ diff --git a/assets/docs/images/integrations_aws-cluster-autoscaler_k8s-manifest.png b/assets/docs/images/integrations_aws-cluster-autoscaler_k8s-manifest.png deleted file mode 100644 index 9511e885e4..0000000000 Binary files a/assets/docs/images/integrations_aws-cluster-autoscaler_k8s-manifest.png and /dev/null differ diff --git a/assets/docs/images/integrations_aws-cluster-autoscaler_node-count.png b/assets/docs/images/integrations_aws-cluster-autoscaler_node-count.png deleted file mode 100644 index ae8c181149..0000000000 Binary files a/assets/docs/images/integrations_aws-cluster-autoscaler_node-count.png and /dev/null differ diff --git a/assets/docs/images/integrations_aws-cluster-autoscaler_one-node.png b/assets/docs/images/integrations_aws-cluster-autoscaler_one-node.png deleted file mode 100644 index 1093c2464c..0000000000 Binary files a/assets/docs/images/integrations_aws-cluster-autoscaler_one-node.png and /dev/null differ diff --git a/assets/docs/images/integrations_aws-cluster-autoscaler_two-nodes.png b/assets/docs/images/integrations_aws-cluster-autoscaler_two-nodes.png deleted file mode 100644 index 65e420d8ce..0000000000 Binary files a/assets/docs/images/integrations_aws-cluster-autoscaler_two-nodes.png and /dev/null differ diff --git a/assets/docs/images/integrations_frp_conection_overview.png b/assets/docs/images/integrations_frp_conection_overview.png deleted file mode 100644 index dc15e579f0..0000000000 Binary files a/assets/docs/images/integrations_frp_conection_overview.png and /dev/null differ diff --git a/assets/docs/images/integrations_pack_diffs.png b/assets/docs/images/integrations_pack_diffs.png deleted file mode 100644 index a28ee05072..0000000000 Binary files a/assets/docs/images/integrations_pack_diffs.png and /dev/null differ diff --git a/assets/docs/images/integrations_pack_line_diffs.png b/assets/docs/images/integrations_pack_line_diffs.png deleted file mode 100644 index 1334bc49f4..0000000000 Binary files a/assets/docs/images/integrations_pack_line_diffs.png and /dev/null differ diff --git a/assets/docs/images/integrations_prometheus-agent_cluster-detail-view-grafana.png b/assets/docs/images/integrations_prometheus-agent_cluster-detail-view-grafana.png deleted file mode 100644 index fb16dc55cc..0000000000 Binary files a/assets/docs/images/integrations_prometheus-agent_cluster-detail-view-grafana.png and /dev/null differ diff --git a/assets/docs/images/integrations_prometheus-agent_cluster-detail-view.png b/assets/docs/images/integrations_prometheus-agent_cluster-detail-view.png deleted file mode 100644 index 3d7695cfac..0000000000 Binary files a/assets/docs/images/integrations_prometheus-agent_cluster-detail-view.png and /dev/null differ diff --git a/assets/docs/images/integrations_prometheus-operator_operator-preset-view-expanded.png b/assets/docs/images/integrations_prometheus-operator_operator-preset-view-expanded.png deleted file mode 100644 index 8dc6d520c4..0000000000 Binary files a/assets/docs/images/integrations_prometheus-operator_operator-preset-view-expanded.png and /dev/null differ diff --git a/assets/docs/images/integrations_spectro-k8s-dashboard_diagram-flow-users.png b/assets/docs/images/integrations_spectro-k8s-dashboard_diagram-flow-users.png deleted file mode 100644 index db92ae6e18..0000000000 Binary files a/assets/docs/images/integrations_spectro-k8s-dashboard_diagram-flow-users.png and /dev/null differ diff --git a/assets/docs/images/integrations_ubuntu_ubuntu-pro-preset-drawer.png b/assets/docs/images/integrations_ubuntu_ubuntu-pro-preset-drawer.png deleted file mode 100644 index 8c99f3c583..0000000000 Binary files a/assets/docs/images/integrations_ubuntu_ubuntu-pro-preset-drawer.png and /dev/null differ diff --git a/assets/docs/images/intergrations_standalone-integrated-pack_diagram-overview.png b/assets/docs/images/intergrations_standalone-integrated-pack_diagram-overview.png deleted file mode 100644 index 19f90be1a6..0000000000 Binary files a/assets/docs/images/intergrations_standalone-integrated-pack_diagram-overview.png and /dev/null differ diff --git a/assets/docs/images/kcs.png b/assets/docs/images/kcs.png deleted file mode 100644 index 337707ed94..0000000000 Binary files a/assets/docs/images/kcs.png and /dev/null differ diff --git a/assets/docs/images/kpt.png b/assets/docs/images/kpt.png deleted file mode 100644 index a5e9133411..0000000000 Binary files a/assets/docs/images/kpt.png and /dev/null differ diff --git a/assets/docs/images/kubeadmconfig.png b/assets/docs/images/kubeadmconfig.png deleted file mode 100644 index 0beb45b339..0000000000 Binary files a/assets/docs/images/kubeadmconfig.png and /dev/null differ diff --git a/assets/docs/images/maas_cluster_architecture.png b/assets/docs/images/maas_cluster_architecture.png deleted file mode 100644 index fee6be0fd9..0000000000 Binary files a/assets/docs/images/maas_cluster_architecture.png and /dev/null differ diff --git a/assets/docs/images/native-edge-deployment-lifecycle.png b/assets/docs/images/native-edge-deployment-lifecycle.png deleted file mode 100644 index cd0aa1ab5d..0000000000 Binary files a/assets/docs/images/native-edge-deployment-lifecycle.png and /dev/null differ diff --git a/assets/docs/images/native-edge.png b/assets/docs/images/native-edge.png deleted file mode 100644 index 9e79a03c53..0000000000 Binary files a/assets/docs/images/native-edge.png and /dev/null differ diff --git a/assets/docs/images/notable-variables.png b/assets/docs/images/notable-variables.png deleted file mode 100644 index 83f7d820b6..0000000000 Binary files a/assets/docs/images/notable-variables.png and /dev/null differ diff --git a/assets/docs/images/oidc-azure-images/azure-app-registration.png b/assets/docs/images/oidc-azure-images/azure-app-registration.png deleted file mode 100644 index 8f74b81e43..0000000000 Binary files a/assets/docs/images/oidc-azure-images/azure-app-registration.png and /dev/null differ diff --git a/assets/docs/images/oidc-azure-images/client-config.png b/assets/docs/images/oidc-azure-images/client-config.png deleted file mode 100644 index 01d471fe51..0000000000 Binary files a/assets/docs/images/oidc-azure-images/client-config.png and /dev/null differ diff --git a/assets/docs/images/oidc-azure-images/enterprise-app-registration.png b/assets/docs/images/oidc-azure-images/enterprise-app-registration.png deleted file mode 100644 index 4a9d6a7fd8..0000000000 Binary files a/assets/docs/images/oidc-azure-images/enterprise-app-registration.png and /dev/null differ diff --git a/assets/docs/images/oidc-azure-images/kubeadmconfig.png b/assets/docs/images/oidc-azure-images/kubeadmconfig.png deleted file mode 100644 index 0beb45b339..0000000000 Binary files a/assets/docs/images/oidc-azure-images/kubeadmconfig.png and /dev/null differ diff --git a/assets/docs/images/oidc-azure-images/notable-variables.png b/assets/docs/images/oidc-azure-images/notable-variables.png deleted file mode 100644 index 83f7d820b6..0000000000 Binary files a/assets/docs/images/oidc-azure-images/notable-variables.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_add-access-policy.png b/assets/docs/images/oidc-okta-images/oidc-okta_add-access-policy.png deleted file mode 100644 index 7ab8c50ae5..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_add-access-policy.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_add-authz-server.png b/assets/docs/images/oidc-okta-images/oidc-okta_add-authz-server.png deleted file mode 100644 index 674c99c75e..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_add-authz-server.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_add-claims.png b/assets/docs/images/oidc-okta-images/oidc-okta_add-claims.png deleted file mode 100644 index 81ae7dd54b..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_add-claims.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_add-policy-rule.png b/assets/docs/images/oidc-okta-images/oidc-okta_add-policy-rule.png deleted file mode 100644 index 83c8aa12c3..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_add-policy-rule.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_add-tenant-role.png b/assets/docs/images/oidc-okta-images/oidc-okta_add-tenant-role.png deleted file mode 100644 index c78e638a21..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_add-tenant-role.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_assignments.png b/assets/docs/images/oidc-okta-images/oidc-okta_assignments.png deleted file mode 100644 index ec2f2f7634..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_assignments.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_claims-result.png b/assets/docs/images/oidc-okta-images/oidc-okta_claims-result.png deleted file mode 100644 index 07aeb6454d..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_claims-result.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_configure-palette-oidc.png b/assets/docs/images/oidc-okta-images/oidc-okta_configure-palette-oidc.png deleted file mode 100644 index e364082045..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_configure-palette-oidc.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_configure-policy-rule.png b/assets/docs/images/oidc-okta-images/oidc-okta_configure-policy-rule.png deleted file mode 100644 index dddce8d384..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_configure-policy-rule.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_copy-callback-url.png b/assets/docs/images/oidc-okta-images/oidc-okta_copy-callback-url.png deleted file mode 100644 index dbef2e2451..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_copy-callback-url.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_copy-client-id.png b/assets/docs/images/oidc-okta-images/oidc-okta_copy-client-id.png deleted file mode 100644 index 6206e8aac5..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_copy-client-id.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_copy-logout-url.png b/assets/docs/images/oidc-okta-images/oidc-okta_copy-logout-url.png deleted file mode 100644 index 0314f68b63..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_copy-logout-url.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_copy-shared-secret.png b/assets/docs/images/oidc-okta-images/oidc-okta_copy-shared-secret.png deleted file mode 100644 index 87501fe783..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_copy-shared-secret.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_create-team.png b/assets/docs/images/oidc-okta-images/oidc-okta_create-team.png deleted file mode 100644 index 4120573bbf..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_create-team.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_get-issuer-uri.png b/assets/docs/images/oidc-okta-images/oidc-okta_get-issuer-uri.png deleted file mode 100644 index 207f88dc44..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_get-issuer-uri.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_name-access-policy.png b/assets/docs/images/oidc-okta-images/oidc-okta_name-access-policy.png deleted file mode 100644 index a40e59bddf..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_name-access-policy.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_name-authz-server.png b/assets/docs/images/oidc-okta-images/oidc-okta_name-authz-server.png deleted file mode 100644 index 6413a85416..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_name-authz-server.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_name-team.png b/assets/docs/images/oidc-okta-images/oidc-okta_name-team.png deleted file mode 100644 index 6d0b0988a7..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_name-team.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_okta-general-settings.png b/assets/docs/images/oidc-okta-images/oidc-okta_okta-general-settings.png deleted file mode 100644 index cdfe4e1874..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_okta-general-settings.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_palette-login.png b/assets/docs/images/oidc-okta-images/oidc-okta_palette-login.png deleted file mode 100644 index 62c293848d..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_palette-login.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_paste-logout-uri.png b/assets/docs/images/oidc-okta-images/oidc-okta_paste-logout-uri.png deleted file mode 100644 index d119138972..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_paste-logout-uri.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_paste-redirect-uri.png b/assets/docs/images/oidc-okta-images/oidc-okta_paste-redirect-uri.png deleted file mode 100644 index 97da942bff..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_paste-redirect-uri.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_tenant-roles.png b/assets/docs/images/oidc-okta-images/oidc-okta_tenant-roles.png deleted file mode 100644 index 4fdbb0cb78..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_tenant-roles.png and /dev/null differ diff --git a/assets/docs/images/oidc-okta-images/oidc-okta_user-logout.png b/assets/docs/images/oidc-okta-images/oidc-okta_user-logout.png deleted file mode 100644 index f46ab070c7..0000000000 Binary files a/assets/docs/images/oidc-okta-images/oidc-okta_user-logout.png and /dev/null differ diff --git a/assets/docs/images/on_prem_system_console.png b/assets/docs/images/on_prem_system_console.png deleted file mode 100644 index 5e49b9a3b7..0000000000 Binary files a/assets/docs/images/on_prem_system_console.png and /dev/null differ diff --git a/assets/docs/images/openstack_cluster_architecture.png b/assets/docs/images/openstack_cluster_architecture.png deleted file mode 100644 index 91ce9dde73..0000000000 Binary files a/assets/docs/images/openstack_cluster_architecture.png and /dev/null differ diff --git a/assets/docs/images/pack_status.png b/assets/docs/images/pack_status.png deleted file mode 100644 index d7603dbfbc..0000000000 Binary files a/assets/docs/images/pack_status.png and /dev/null differ diff --git a/assets/docs/images/palette-rbac-scope.png b/assets/docs/images/palette-rbac-scope.png deleted file mode 100644 index f182824389..0000000000 Binary files a/assets/docs/images/palette-rbac-scope.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-application-group.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-application-group.png deleted file mode 100644 index 58dda035dd..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-application-group.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-identifiers.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-identifiers.png deleted file mode 100644 index 0784a1e3cd..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-identifiers.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-tenant-role.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-tenant-role.png deleted file mode 100644 index c78e638a21..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-tenant-role.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-1.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-1.png deleted file mode 100644 index 82442bad5d..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-1.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-2.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-2.png deleted file mode 100644 index fd12f3d29d..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-2.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-web-api.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-web-api.png deleted file mode 100644 index 3e3c67ce23..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-web-api.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_base-url.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_base-url.png deleted file mode 100644 index e668235dfb..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_base-url.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_configure-palette-oidc.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_configure-palette-oidc.png deleted file mode 100644 index 4742215ab9..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_configure-palette-oidc.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-callback-url.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-callback-url.png deleted file mode 100644 index 8323b08532..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-callback-url.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-logout-url.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-logout-url.png deleted file mode 100644 index 82466b7173..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-logout-url.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-shared-secret.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-shared-secret.png deleted file mode 100644 index 60ed28a332..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-shared-secret.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_create-team.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_create-team.png deleted file mode 100644 index 4120573bbf..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_create-team.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_enable-scopes.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_enable-scopes.png deleted file mode 100644 index 8684f5e37c..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_enable-scopes.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_get-client-identifier.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_get-client-identifier.png deleted file mode 100644 index fff9641486..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_get-client-identifier.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_groups-as-claims.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_groups-as-claims.png deleted file mode 100644 index 07ab970115..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_groups-as-claims.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_ldap-as-claims.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_ldap-as-claims.png deleted file mode 100644 index 5515e9ef93..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_ldap-as-claims.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-application-group.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-application-group.png deleted file mode 100644 index 23c5a34fa4..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-application-group.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-team.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-team.png deleted file mode 100644 index 0f420448dc..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-team.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_note-adfs-name.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_note-adfs-name.png deleted file mode 100644 index 59e8b43c01..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_note-adfs-name.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_open-oidc-app.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_open-oidc-app.png deleted file mode 100644 index 933f771bcd..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_open-oidc-app.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_palette-login.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_palette-login.png deleted file mode 100644 index 62c293848d..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_palette-login.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-logout-uri.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-logout-uri.png deleted file mode 100644 index 91a1ad61ba..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-logout-uri.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-redirect-uri.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-redirect-uri.png deleted file mode 100644 index 445a6900cf..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-redirect-uri.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_reopen-webapi-app.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_reopen-webapi-app.png deleted file mode 100644 index 0a43aed7da..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_reopen-webapi-app.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_select-policy.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_select-policy.png deleted file mode 100644 index 6bc1d3049d..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_select-policy.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-group-claim.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-group-claim.png deleted file mode 100644 index 7fb35b23f5..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-group-claim.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-ldap-claims.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-ldap-claims.png deleted file mode 100644 index 41e0f04d15..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-ldap-claims.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_team-members.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_team-members.png deleted file mode 100644 index 8fe038b3da..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_team-members.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_tenant-roles.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_tenant-roles.png deleted file mode 100644 index 2ce2ae8515..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_tenant-roles.png and /dev/null differ diff --git a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_user-logout.png b/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_user-logout.png deleted file mode 100644 index f46ab070c7..0000000000 Binary files a/assets/docs/images/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_user-logout.png and /dev/null differ diff --git a/assets/docs/images/pci-dss.png b/assets/docs/images/pci-dss.png deleted file mode 100644 index e9a823e103..0000000000 Binary files a/assets/docs/images/pci-dss.png and /dev/null differ diff --git a/assets/docs/images/pdb_kubectl_describe_nodes.png b/assets/docs/images/pdb_kubectl_describe_nodes.png deleted file mode 100644 index 05f2272f52..0000000000 Binary files a/assets/docs/images/pdb_kubectl_describe_nodes.png and /dev/null differ diff --git a/assets/docs/images/pdb_kubectl_describe_nodes_hd.png b/assets/docs/images/pdb_kubectl_describe_nodes_hd.png deleted file mode 100644 index 86883e864d..0000000000 Binary files a/assets/docs/images/pdb_kubectl_describe_nodes_hd.png and /dev/null differ diff --git a/assets/docs/images/pdb_kubectl_error.png b/assets/docs/images/pdb_kubectl_error.png deleted file mode 100644 index e4c2d2833c..0000000000 Binary files a/assets/docs/images/pdb_kubectl_error.png and /dev/null differ diff --git a/assets/docs/images/pdb_kubectl_error_hd.png b/assets/docs/images/pdb_kubectl_error_hd.png deleted file mode 100644 index c446d72f2d..0000000000 Binary files a/assets/docs/images/pdb_kubectl_error_hd.png and /dev/null differ diff --git a/assets/docs/images/pdb_node_cordoned.png b/assets/docs/images/pdb_node_cordoned.png deleted file mode 100644 index 9ca3081df2..0000000000 Binary files a/assets/docs/images/pdb_node_cordoned.png and /dev/null differ diff --git a/assets/docs/images/pdb_node_cordoned_hd.png b/assets/docs/images/pdb_node_cordoned_hd.png deleted file mode 100644 index e1bc0680bd..0000000000 Binary files a/assets/docs/images/pdb_node_cordoned_hd.png and /dev/null differ diff --git a/assets/docs/images/pdb_ui_error.png b/assets/docs/images/pdb_ui_error.png deleted file mode 100644 index 6b382fb501..0000000000 Binary files a/assets/docs/images/pdb_ui_error.png and /dev/null differ diff --git a/assets/docs/images/pdb_ui_error_hd.png b/assets/docs/images/pdb_ui_error_hd.png deleted file mode 100644 index b4ec539f42..0000000000 Binary files a/assets/docs/images/pdb_ui_error_hd.png and /dev/null differ diff --git a/assets/docs/images/prod_profile.png b/assets/docs/images/prod_profile.png deleted file mode 100644 index be16e84636..0000000000 Binary files a/assets/docs/images/prod_profile.png and /dev/null differ diff --git a/assets/docs/images/production.png b/assets/docs/images/production.png deleted file mode 100644 index f0125d5474..0000000000 Binary files a/assets/docs/images/production.png and /dev/null differ diff --git a/assets/docs/images/project-dashboard.png b/assets/docs/images/project-dashboard.png deleted file mode 100644 index 6061594f1a..0000000000 Binary files a/assets/docs/images/project-dashboard.png and /dev/null differ diff --git a/assets/docs/images/readme_codeblocks_example.png b/assets/docs/images/readme_codeblocks_example.png deleted file mode 100644 index 11d577b5c3..0000000000 Binary files a/assets/docs/images/readme_codeblocks_example.png and /dev/null differ diff --git a/assets/docs/images/record-details.png b/assets/docs/images/record-details.png deleted file mode 100644 index 65c891f85f..0000000000 Binary files a/assets/docs/images/record-details.png and /dev/null differ diff --git a/assets/docs/images/registries-and-packs_adding-a-custom-registry-tls_certificate.png b/assets/docs/images/registries-and-packs_adding-a-custom-registry-tls_certificate.png deleted file mode 100644 index 3f7258ea7d..0000000000 Binary files a/assets/docs/images/registries-and-packs_adding-a-custom-registry-tls_certificate.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-assign-users-and-groups.png b/assets/docs/images/saml-azure-images/saml-azure-assign-users-and-groups.png deleted file mode 100644 index 06b452401c..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-assign-users-and-groups.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-assigned-user-groups.png b/assets/docs/images/saml-azure-images/saml-azure-assigned-user-groups.png deleted file mode 100644 index eb824a346a..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-assigned-user-groups.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-basic-saml-config.png b/assets/docs/images/saml-azure-images/saml-azure-basic-saml-config.png deleted file mode 100644 index 12fc5fb381..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-basic-saml-config.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-config-identifier.png b/assets/docs/images/saml-azure-images/saml-azure-config-identifier.png deleted file mode 100644 index d2026bf19a..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-config-identifier.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-default-ad-users.png b/assets/docs/images/saml-azure-images/saml-azure-default-ad-users.png deleted file mode 100644 index edbf0f4eb3..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-default-ad-users.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-enterprise-all-app.png b/assets/docs/images/saml-azure-images/saml-azure-enterprise-all-app.png deleted file mode 100644 index 3aa8cc752a..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-enterprise-all-app.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-federation-metadata-xml.png b/assets/docs/images/saml-azure-images/saml-azure-federation-metadata-xml.png deleted file mode 100644 index abd4e3ce6d..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-federation-metadata-xml.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-identifier-entryid.png b/assets/docs/images/saml-azure-images/saml-azure-identifier-entryid.png deleted file mode 100644 index 1455e791b4..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-identifier-entryid.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-logout-url.png b/assets/docs/images/saml-azure-images/saml-azure-logout-url.png deleted file mode 100644 index 4f22741c46..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-logout-url.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-name.png b/assets/docs/images/saml-azure-images/saml-azure-name.png deleted file mode 100644 index f9dcc66873..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-name.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-project-viewer.png b/assets/docs/images/saml-azure-images/saml-azure-project-viewer.png deleted file mode 100644 index 78f80495bf..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-project-viewer.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-reply-url.png b/assets/docs/images/saml-azure-images/saml-azure-reply-url.png deleted file mode 100644 index 4dc8795dca..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-reply-url.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-saml-select.png b/assets/docs/images/saml-azure-images/saml-azure-saml-select.png deleted file mode 100644 index fd975d6744..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-saml-select.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-sign-on-url.png b/assets/docs/images/saml-azure-images/saml-azure-sign-on-url.png deleted file mode 100644 index 4f9d9c0b78..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-sign-on-url.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-team-create.png b/assets/docs/images/saml-azure-images/saml-azure-team-create.png deleted file mode 100644 index b50361a0ea..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-team-create.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-azure-users-and-group-assign.png b/assets/docs/images/saml-azure-images/saml-azure-users-and-group-assign.png deleted file mode 100644 index 855131ea5a..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-azure-users-and-group-assign.png and /dev/null differ diff --git a/assets/docs/images/saml-azure-images/saml-group-example.png b/assets/docs/images/saml-azure-images/saml-group-example.png deleted file mode 100644 index b0a8203637..0000000000 Binary files a/assets/docs/images/saml-azure-images/saml-group-example.png and /dev/null differ diff --git a/assets/docs/images/sbom_dependencies.png b/assets/docs/images/sbom_dependencies.png deleted file mode 100644 index 7a73c87cd6..0000000000 Binary files a/assets/docs/images/sbom_dependencies.png and /dev/null differ diff --git a/assets/docs/images/sbom_results.png b/assets/docs/images/sbom_results.png deleted file mode 100644 index c7d9f77c5a..0000000000 Binary files a/assets/docs/images/sbom_results.png and /dev/null differ diff --git a/assets/docs/images/sbom_scan.png b/assets/docs/images/sbom_scan.png deleted file mode 100644 index 7e2107ad9a..0000000000 Binary files a/assets/docs/images/sbom_scan.png and /dev/null differ diff --git a/assets/docs/images/sbom_vulnerabilities.png b/assets/docs/images/sbom_vulnerabilities.png deleted file mode 100644 index 1743718aea..0000000000 Binary files a/assets/docs/images/sbom_vulnerabilities.png and /dev/null differ diff --git a/assets/docs/images/scope-switcher.png b/assets/docs/images/scope-switcher.png deleted file mode 100644 index 519c6af7d0..0000000000 Binary files a/assets/docs/images/scope-switcher.png and /dev/null differ diff --git a/assets/docs/images/security_dev_lifecycle.png b/assets/docs/images/security_dev_lifecycle.png deleted file mode 100644 index f024a04ed7..0000000000 Binary files a/assets/docs/images/security_dev_lifecycle.png and /dev/null differ diff --git a/assets/docs/images/soc2.png b/assets/docs/images/soc2.png deleted file mode 100644 index 3256b13cd4..0000000000 Binary files a/assets/docs/images/soc2.png and /dev/null differ diff --git a/assets/docs/images/spectro_cloud_concepts.png b/assets/docs/images/spectro_cloud_concepts.png deleted file mode 100644 index cfd0679062..0000000000 Binary files a/assets/docs/images/spectro_cloud_concepts.png and /dev/null differ diff --git a/assets/docs/images/system-profile-1.png b/assets/docs/images/system-profile-1.png deleted file mode 100644 index cbbd334b65..0000000000 Binary files a/assets/docs/images/system-profile-1.png and /dev/null differ diff --git a/assets/docs/images/system-profile-2.png b/assets/docs/images/system-profile-2.png deleted file mode 100644 index 5c62027ab1..0000000000 Binary files a/assets/docs/images/system-profile-2.png and /dev/null differ diff --git a/assets/docs/images/system-profile-3.png b/assets/docs/images/system-profile-3.png deleted file mode 100644 index 6880412a4d..0000000000 Binary files a/assets/docs/images/system-profile-3.png and /dev/null differ diff --git a/assets/docs/images/system-profile-4.png b/assets/docs/images/system-profile-4.png deleted file mode 100644 index 6f00117d39..0000000000 Binary files a/assets/docs/images/system-profile-4.png and /dev/null differ diff --git a/assets/docs/images/tenant-settings_login-banner_settings-page-view.png b/assets/docs/images/tenant-settings_login-banner_settings-page-view.png deleted file mode 100644 index b33474d92e..0000000000 Binary files a/assets/docs/images/tenant-settings_login-banner_settings-page-view.png and /dev/null differ diff --git a/assets/docs/images/tenant-settings_login-banner_tenant-banner-view.png b/assets/docs/images/tenant-settings_login-banner_tenant-banner-view.png deleted file mode 100644 index 404033139a..0000000000 Binary files a/assets/docs/images/tenant-settings_login-banner_tenant-banner-view.png and /dev/null differ diff --git a/assets/docs/images/tencent-diagram.png b/assets/docs/images/tencent-diagram.png deleted file mode 100644 index 15c4a2dd21..0000000000 Binary files a/assets/docs/images/tencent-diagram.png and /dev/null differ diff --git a/assets/docs/images/terraform.png b/assets/docs/images/terraform.png deleted file mode 100644 index b8bda60332..0000000000 Binary files a/assets/docs/images/terraform.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/newly-created-cluster.png b/assets/docs/images/tf-tutorial-images/newly-created-cluster.png deleted file mode 100644 index 67ff1bf945..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/newly-created-cluster.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/terraform-finished-install.png b/assets/docs/images/tf-tutorial-images/terraform-finished-install.png deleted file mode 100644 index 1585c5630d..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/terraform-finished-install.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-apply.png b/assets/docs/images/tf-tutorial-images/tf-apply.png deleted file mode 100644 index 66da925835..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-apply.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-create-missing-resources.png b/assets/docs/images/tf-tutorial-images/tf-create-missing-resources.png deleted file mode 100644 index 5e35029bd0..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-create-missing-resources.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-destroy-complete.png b/assets/docs/images/tf-tutorial-images/tf-destroy-complete.png deleted file mode 100644 index c4e89a5ce0..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-destroy-complete.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-destroy.png b/assets/docs/images/tf-tutorial-images/tf-destroy.png deleted file mode 100644 index d56c95e1c6..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-destroy.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-execution-plan.png b/assets/docs/images/tf-tutorial-images/tf-execution-plan.png deleted file mode 100644 index e27e77638a..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-execution-plan.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-finished-installing.png b/assets/docs/images/tf-tutorial-images/tf-finished-installing.png deleted file mode 100644 index 87db015bd3..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-finished-installing.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-initialized.png b/assets/docs/images/tf-tutorial-images/tf-initialized.png deleted file mode 100644 index 7c7327ba73..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-initialized.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-initializing.png b/assets/docs/images/tf-tutorial-images/tf-initializing.png deleted file mode 100644 index 48ad4bc158..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-initializing.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-kubeflow.png b/assets/docs/images/tf-tutorial-images/tf-kubeflow.png deleted file mode 100644 index 92cbbf85ee..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-kubeflow.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-provision-plan.png b/assets/docs/images/tf-tutorial-images/tf-provision-plan.png deleted file mode 100644 index 4aa50e7e3d..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-provision-plan.png and /dev/null differ diff --git a/assets/docs/images/tf-tutorial-images/tf-provisioned-cluster.png b/assets/docs/images/tf-tutorial-images/tf-provisioned-cluster.png deleted file mode 100644 index 9ffa500cae..0000000000 Binary files a/assets/docs/images/tf-tutorial-images/tf-provisioned-cluster.png and /dev/null differ diff --git a/assets/docs/images/troubleshooting-pcg-cluster_settings.png b/assets/docs/images/troubleshooting-pcg-cluster_settings.png deleted file mode 100644 index a662e5642c..0000000000 Binary files a/assets/docs/images/troubleshooting-pcg-cluster_settings.png and /dev/null differ diff --git a/assets/docs/images/troubleshooting-pcg-http_error.png b/assets/docs/images/troubleshooting-pcg-http_error.png deleted file mode 100644 index b4a1e0cace..0000000000 Binary files a/assets/docs/images/troubleshooting-pcg-http_error.png and /dev/null differ diff --git a/assets/docs/images/troubleshooting_edge_grub-menu.png b/assets/docs/images/troubleshooting_edge_grub-menu.png deleted file mode 100644 index 347b4512b8..0000000000 Binary files a/assets/docs/images/troubleshooting_edge_grub-menu.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_app_deploy-apps_cluster-2-deploy-app.png b/assets/docs/images/tutorials/deploy-app/devx_app_deploy-apps_cluster-2-deploy-app.png deleted file mode 100644 index 99fd69ad67..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_app_deploy-apps_cluster-2-deploy-app.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_app_deploy-apps_scenario-1-overview.png b/assets/docs/images/tutorials/deploy-app/devx_app_deploy-apps_scenario-1-overview.png deleted file mode 100644 index 93596af90d..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_app_deploy-apps_scenario-1-overview.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-app_create-api-key.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-app_create-api-key.png deleted file mode 100644 index 3ca355e349..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-app_create-api-key.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-app_self-hosted-hello-universe.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-app_self-hosted-hello-universe.png deleted file mode 100644 index 287c9276a8..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-app_self-hosted-hello-universe.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_app-profile-creation.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_app-profile-creation.png deleted file mode 100644 index b33d2936c8..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_app-profile-creation.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_app-profiles.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_app-profiles.png deleted file mode 100644 index 9f9d31af47..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_app-profiles.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_architecture-diagram.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_architecture-diagram.png deleted file mode 100644 index 232f60d156..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_architecture-diagram.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_cluster-2-details-page.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_cluster-2-details-page.png deleted file mode 100644 index fa9bb79222..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_cluster-2-details-page.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_cluster-details-view.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_cluster-details-view.png deleted file mode 100644 index 9dc1367ccb..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_cluster-details-view.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_delete-apps-view.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_delete-apps-view.png deleted file mode 100644 index edba43be0a..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_delete-apps-view.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_delete-cluster-view.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_delete-cluster-view.png deleted file mode 100644 index 5d8c42d232..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_delete-cluster-view.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_hello-universe.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_hello-universe.png deleted file mode 100644 index 2f8fb851b4..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_hello-universe.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_postgres-service-create.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_postgres-service-create.png deleted file mode 100644 index 0b53be13c7..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_postgres-service-create.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_toggle-app-mode.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_toggle-app-mode.png deleted file mode 100644 index 8c06c06159..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_toggle-app-mode.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_ui-api-display.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_ui-api-display.png deleted file mode 100644 index 5bf3863201..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_ui-api-display.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_virtual-cluster-list.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_virtual-cluster-list.png deleted file mode 100644 index 91f4fdc630..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy-apps_virtual-cluster-list.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy_scenario-2-overview.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploy_scenario-2-overview.png deleted file mode 100644 index a68387709b..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploy_scenario-2-overview.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-app/devx_apps_deploys-apps_reverse-proxy-diagram.png b/assets/docs/images/tutorials/deploy-app/devx_apps_deploys-apps_reverse-proxy-diagram.png deleted file mode 100644 index c5ec8ee14e..0000000000 Binary files a/assets/docs/images/tutorials/deploy-app/devx_apps_deploys-apps_reverse-proxy-diagram.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_aws_create_cluster.png b/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_aws_create_cluster.png deleted file mode 100644 index 2a12d4f24f..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_aws_create_cluster.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png b/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png deleted file mode 100644 index 23b7e70cde..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_view.png b/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_view.png deleted file mode 100644 index 57151043ee..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_view.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png b/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png deleted file mode 100644 index 12d283dfdc..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_creation_parameters.png b/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_creation_parameters.png deleted file mode 100644 index bd44d3a4a4..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_creation_parameters.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png b/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png deleted file mode 100644 index 75833b971e..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_details.png b/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_details.png deleted file mode 100644 index 38bf3e8dce..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_details.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_profile_cluster_profile_review.png b/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_profile_cluster_profile_review.png deleted file mode 100644 index 7e200371cf..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_profile_cluster_profile_review.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-event_log.png b/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-event_log.png deleted file mode 100644 index a7a3662e82..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-event_log.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster.png b/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster.png deleted file mode 100644 index 4e07dec44d..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster_details.png b/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster_details.png deleted file mode 100644 index 35f7822712..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster_details.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png b/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png deleted file mode 100644 index e6b223ce26..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_profile.png b/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_profile.png deleted file mode 100644 index e759bfc0f4..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_profile.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_stack.png b/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_stack.png deleted file mode 100644 index a7dad18f34..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_stack.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png b/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png deleted file mode 100644 index 798b9095dd..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_parameters.png b/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_parameters.png deleted file mode 100644 index 2963bc26f4..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_parameters.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_profile_review.png b/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_profile_review.png deleted file mode 100644 index 026fe0b1cf..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_profile_review.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_app.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_app.png deleted file mode 100644 index 981efee952..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_app.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_application.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_application.png deleted file mode 100644 index 58fb3ce1f9..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_application.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create.png deleted file mode 100644 index 395a8a412e..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create_api_key.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create_api_key.png deleted file mode 100644 index 3ca355e349..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create_api_key.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create_events.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create_events.png deleted file mode 100644 index f9ad553999..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create_events.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_delete-cluster-button.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_delete-cluster-button.png deleted file mode 100644 index bc3becdd21..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_delete-cluster-button.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest.png deleted file mode 100644 index 980416a423..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest_blue_btn.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest_blue_btn.png deleted file mode 100644 index 1befe1ada6..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest_blue_btn.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png deleted file mode 100644 index 1d6ecbb023..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png deleted file mode 100644 index c82820fa84..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_service_url.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_service_url.png deleted file mode 100644 index 427445e63d..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_service_url.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_update_available.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_update_available.png deleted file mode 100644 index 9cbf7a1118..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_update_available.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_workloads.png b/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_workloads.png deleted file mode 100644 index fcf977d7d5..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_workloads.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_app_update_pods.png b/assets/docs/images/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_app_update_pods.png deleted file mode 100644 index b9a64f858c..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_app_update_pods.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_update_details_compare.png b/assets/docs/images/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_update_details_compare.png deleted file mode 100644 index 5ee10964d5..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_update_details_compare.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_updates_available.png b/assets/docs/images/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_updates_available.png deleted file mode 100644 index 63ee987684..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_updates_available.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_basic_info.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_basic_info.png deleted file mode 100644 index 1efde03c27..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_basic_info.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_gcp_profile.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_gcp_profile.png deleted file mode 100644 index 8c0f29c0d3..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_gcp_profile.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png deleted file mode 100644 index 8e683c7a44..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png deleted file mode 100644 index f18b86fa84..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_gcp_cluster_profile_stack_view.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_gcp_cluster_profile_stack_view.png deleted file mode 100644 index c8f52ff0c3..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_gcp_cluster_profile_stack_view.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_gcp_ssh_key_create.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_gcp_ssh_key_create.png deleted file mode 100644 index bfa1d0fa7c..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_gcp_ssh_key_create.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png deleted file mode 100644 index a417e42739..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_details.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_details.png deleted file mode 100644 index d110c2f903..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_details.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_review.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_review.png deleted file mode 100644 index 2c192e3652..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_review.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_provisioning.png b/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_provisioning.png deleted file mode 100644 index 8dd7402b3a..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_provisioning.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/terraform/clusters_public-cloud_deploy-k8s-cluster_details.png b/assets/docs/images/tutorials/deploy-clusters/terraform/clusters_public-cloud_deploy-k8s-cluster_details.png deleted file mode 100644 index fa432f1beb..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/terraform/clusters_public-cloud_deploy-k8s-cluster_details.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-clusters/terraform/clusters_public-cloud_deploy-k8s-cluster_profile.png b/assets/docs/images/tutorials/deploy-clusters/terraform/clusters_public-cloud_deploy-k8s-cluster_profile.png deleted file mode 100644 index 92fcb9b52f..0000000000 Binary files a/assets/docs/images/tutorials/deploy-clusters/terraform/clusters_public-cloud_deploy-k8s-cluster_profile.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-health.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-health.png deleted file mode 100644 index e54edfda43..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-health.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-layers.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-layers.png deleted file mode 100644 index ebe1084b99..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-layers.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-metrics.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-metrics.png deleted file mode 100644 index d925de29da..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-metrics.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_default-scope.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_default-scope.png deleted file mode 100644 index e2d622dc04..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_default-scope.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-cluster.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-cluster.png deleted file mode 100644 index e4a446bf39..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-cluster.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-profile.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-profile.png deleted file mode 100644 index 175a18290c..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-profile.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_generate-api-key.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_generate-api-key.png deleted file mode 100644 index 18d67992a8..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_generate-api-key.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_ngrok-start.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_ngrok-start.png deleted file mode 100644 index 25518dde1e..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_ngrok-start.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_pack-push.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_pack-push.png deleted file mode 100644 index 754b8db349..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_pack-push.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_palette-cloud-account.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_palette-cloud-account.png deleted file mode 100644 index 1922ea54a4..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_palette-cloud-account.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-certsan.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-certsan.png deleted file mode 100644 index 03758a95fe..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-certsan.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-layer.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-layer.png deleted file mode 100644 index c3d19d74fd..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-layer.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-values-yaml.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-values-yaml.png deleted file mode 100644 index 6ea1717c87..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-values-yaml.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-delete.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-delete.png deleted file mode 100644 index 2262d27d1b..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-delete.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-edit.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-edit.png deleted file mode 100644 index 4b9658e70f..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-edit.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-sync.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-sync.png deleted file mode 100644 index c03243cba4..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-sync.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_success.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_success.png deleted file mode 100644 index 653b4bee06..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_success.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_tenant-admin.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_tenant-admin.png deleted file mode 100644 index 4b84535e51..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_tenant-admin.png and /dev/null differ diff --git a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_verify-profile.png b/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_verify-profile.png deleted file mode 100644 index bd4464af35..0000000000 Binary files a/assets/docs/images/tutorials/deploy-pack/registries-and-packs_deploy-pack_verify-profile.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_access-service.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_access-service.png deleted file mode 100644 index 4360cd8a0a..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_access-service.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-manifest-file.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-manifest-file.png deleted file mode 100644 index 892c0c3474..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-manifest-file.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-manifest.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-manifest.png deleted file mode 100644 index 629331af97..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-manifest.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-master-node.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-master-node.png deleted file mode 100644 index d2ea639390..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-master-node.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-worker-node.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-worker-node.png deleted file mode 100644 index d422aee005..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_add-worker-node.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_delete-cluster.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_delete-cluster.png deleted file mode 100644 index 3765491601..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_delete-cluster.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_delete-profile.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_delete-profile.png deleted file mode 100644 index daa7a0ad8f..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_delete-profile.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_edge-hosts.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_edge-hosts.png deleted file mode 100644 index 851c8fd42d..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_edge-hosts.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_edit-profile.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_edit-profile.png deleted file mode 100644 index a3fccbeb78..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_edit-profile.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_hello-universe.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_hello-universe.png deleted file mode 100644 index 2a4c1447f3..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_hello-universe.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_overarching.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_overarching.png deleted file mode 100644 index 6d9ea361ac..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_overarching.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_profile-success.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_profile-success.png deleted file mode 100644 index e6d4dae1ff..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_profile-success.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_registration-token-fields.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_registration-token-fields.png deleted file mode 100644 index 1d318b319e..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_registration-token-fields.png and /dev/null differ diff --git a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_registration-token.png b/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_registration-token.png deleted file mode 100644 index 6efc721952..0000000000 Binary files a/assets/docs/images/tutorials/edge/clusters_edge_deploy-cluster_registration-token.png and /dev/null differ diff --git a/assets/docs/images/tutorials/palette-canvos/clusters_edge_palette-canvos_artifacts.png b/assets/docs/images/tutorials/palette-canvos/clusters_edge_palette-canvos_artifacts.png deleted file mode 100644 index 5ff9b1920d..0000000000 Binary files a/assets/docs/images/tutorials/palette-canvos/clusters_edge_palette-canvos_artifacts.png and /dev/null differ diff --git a/assets/docs/images/tutorials/palette-canvos/clusters_edge_palette-canvos_edit_profile.png b/assets/docs/images/tutorials/palette-canvos/clusters_edge_palette-canvos_edit_profile.png deleted file mode 100644 index a3fccbeb78..0000000000 Binary files a/assets/docs/images/tutorials/palette-canvos/clusters_edge_palette-canvos_edit_profile.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/newly-created-cluster.png b/assets/docs/images/tutorials/tf-tutorial-images/newly-created-cluster.png deleted file mode 100644 index 67ff1bf945..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/newly-created-cluster.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/terraform-finished-install.png b/assets/docs/images/tutorials/tf-tutorial-images/terraform-finished-install.png deleted file mode 100644 index 1585c5630d..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/terraform-finished-install.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-apply.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-apply.png deleted file mode 100644 index 66da925835..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-apply.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-create-missing-resources.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-create-missing-resources.png deleted file mode 100644 index 5e35029bd0..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-create-missing-resources.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-destroy-complete.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-destroy-complete.png deleted file mode 100644 index c4e89a5ce0..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-destroy-complete.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-destroy.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-destroy.png deleted file mode 100644 index d56c95e1c6..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-destroy.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-execution-plan.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-execution-plan.png deleted file mode 100644 index e27e77638a..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-execution-plan.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-finished-installing.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-finished-installing.png deleted file mode 100644 index 87db015bd3..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-finished-installing.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-initialized.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-initialized.png deleted file mode 100644 index 7c7327ba73..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-initialized.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-initializing.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-initializing.png deleted file mode 100644 index 48ad4bc158..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-initializing.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-kubeflow.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-kubeflow.png deleted file mode 100644 index 92cbbf85ee..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-kubeflow.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-provision-plan.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-provision-plan.png deleted file mode 100644 index 4aa50e7e3d..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-provision-plan.png and /dev/null differ diff --git a/assets/docs/images/tutorials/tf-tutorial-images/tf-provisioned-cluster.png b/assets/docs/images/tutorials/tf-tutorial-images/tf-provisioned-cluster.png deleted file mode 100644 index 9ffa500cae..0000000000 Binary files a/assets/docs/images/tutorials/tf-tutorial-images/tf-provisioned-cluster.png and /dev/null differ diff --git a/assets/docs/images/upgrade-details1.png b/assets/docs/images/upgrade-details1.png deleted file mode 100644 index 78d0856451..0000000000 Binary files a/assets/docs/images/upgrade-details1.png and /dev/null differ diff --git a/assets/docs/images/upgrade-details2.png b/assets/docs/images/upgrade-details2.png deleted file mode 100644 index f539b77867..0000000000 Binary files a/assets/docs/images/upgrade-details2.png and /dev/null differ diff --git a/assets/docs/images/user-experience.png b/assets/docs/images/user-experience.png deleted file mode 100644 index 29757b7481..0000000000 Binary files a/assets/docs/images/user-experience.png and /dev/null differ diff --git a/assets/docs/images/user-management_palette-rback_palette-rbac-model.png b/assets/docs/images/user-management_palette-rback_palette-rbac-model.png deleted file mode 100644 index 8f4233e686..0000000000 Binary files a/assets/docs/images/user-management_palette-rback_palette-rbac-model.png and /dev/null differ diff --git a/assets/docs/images/virtualized-edge.png b/assets/docs/images/virtualized-edge.png deleted file mode 100644 index 7037bfbe5b..0000000000 Binary files a/assets/docs/images/virtualized-edge.png and /dev/null differ diff --git a/assets/docs/images/vmware_arch_oct_2020.png b/assets/docs/images/vmware_arch_oct_2020.png deleted file mode 100644 index 85f6326cb9..0000000000 Binary files a/assets/docs/images/vmware_arch_oct_2020.png and /dev/null differ diff --git a/assets/docs/images/vmware_cluster_architecture.png b/assets/docs/images/vmware_cluster_architecture.png deleted file mode 100644 index 66c66714fd..0000000000 Binary files a/assets/docs/images/vmware_cluster_architecture.png and /dev/null differ diff --git a/assets/hero-background.png b/assets/hero-background.png deleted file mode 100644 index 1e2fe79762..0000000000 Binary files a/assets/hero-background.png and /dev/null differ diff --git a/assets/hero.png b/assets/hero.png deleted file mode 100644 index 4eff941e58..0000000000 Binary files a/assets/hero.png and /dev/null differ diff --git a/assets/icons/about.svg b/assets/icons/about.svg deleted file mode 100644 index a723e28956..0000000000 --- a/assets/icons/about.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/assets/logo_landscape_for_white.png b/assets/logo_landscape_for_white.png deleted file mode 100644 index 7cd92ca33d..0000000000 Binary files a/assets/logo_landscape_for_white.png and /dev/null differ diff --git a/assets/man_space_lost.png b/assets/man_space_lost.png deleted file mode 100644 index 7d3a412aad..0000000000 Binary files a/assets/man_space_lost.png and /dev/null differ diff --git a/assets/menu-background.png b/assets/menu-background.png deleted file mode 100644 index 317b14b706..0000000000 Binary files a/assets/menu-background.png and /dev/null differ diff --git a/assets/spectrocloud-logo.png b/assets/spectrocloud-logo.png deleted file mode 100644 index 0cc679a9cd..0000000000 Binary files a/assets/spectrocloud-logo.png and /dev/null differ diff --git a/babel.config.js b/babel.config.js new file mode 100644 index 0000000000..9e91354f12 --- /dev/null +++ b/babel.config.js @@ -0,0 +1,8 @@ +module.exports = { + plugins: ["macros"], + presets: [ + require.resolve("@docusaurus/core/lib/babel/preset"), + ["@babel/preset-env"], + "@babel/preset-typescript", + ], +}; diff --git a/commitlint.config.js b/commitlint.config.js new file mode 100644 index 0000000000..5073c20db1 --- /dev/null +++ b/commitlint.config.js @@ -0,0 +1 @@ +module.exports = { extends: ["@commitlint/config-conventional"] }; diff --git a/config.js b/config.js deleted file mode 100644 index 01e1aee228..0000000000 --- a/config.js +++ /dev/null @@ -1,46 +0,0 @@ -const config = { - gatsby: { - pathPrefix: "/", - siteUrl: "https://docs.spectrocloud.com", - gaTrackingId: null, - trailingSlash: true, - }, - header: { - search: { - enabled: true, - indexName: process.env.NODE_ENV, - algoliaAppId: process.env.GATSBY_ALGOLIA_APP_ID, - algoliaSearchKey: process.env.GATSBY_ALGOLIA_SEARCH_KEY, - algoliaAdminKey: process.env.ALGOLIA_ADMIN_KEY, - }, - }, - siteMetadata: { - title: "Spectro cloud documentation", - description: "Spectro cloud documentation, guides, API documentation, integrations and more", - ogImage: null, - docsLocation: "https://github.com/spectrocloud/librarium/edit/master/content/docs", - apiLocation: "https://github.com/spectrocloud/librarium/edit/master/content/api", - favicon: "assets/icons/favicon.png", - }, - pwa: { - enabled: false, // disabling this will also remove the existing service worker. - manifest: { - name: "Spectro cloud documentation", - short_name: "Spectro cloud docs", - start_url: "/", - background_color: "#206cd1", - theme_color: "#206cd1", - display: "standalone", - crossOrigin: "use-credentials", - icons: [ - { - src: "src/pwa-512.png", - sizes: `512x512`, - type: `image/png`, - }, - ], - }, - }, -}; - -module.exports = config; diff --git a/content/api/0-index.md b/content/api/0-index.md deleted file mode 100644 index 7d2d8ae490..0000000000 --- a/content/api/0-index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: "Home" -metaTitle: "Spectro Cloud API" -metaDescription: "Spectro Cloud API Documentation" -icon: "graph" -hideToC: false -fullWidth: false -hiddenFromNav: true -hideToCSidebar: true ---- diff --git a/content/api/v1/01-auth.md b/content/api/v1/01-auth.md deleted file mode 100644 index b3973f8d95..0000000000 --- a/content/api/v1/01-auth.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Authentication' -metaTitle: 'Authentication' -metaDescription: 'List of API endpoints that describes the authentication layer' -api: true -paths: ['/v1/auth'] ---- - -# Authentication diff --git a/content/api/v1/02-apiKeys.md b/content/api/v1/02-apiKeys.md deleted file mode 100644 index cfc876fbc6..0000000000 --- a/content/api/v1/02-apiKeys.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'API Key' -metaTitle: 'API Key' -metaDescription: 'List of API endpoints that describes the API key as authentication entity' -api: true -paths: ['/v1/apiKeys'] ---- - -# API Keys diff --git a/content/api/v1/02-appDeployments.md b/content/api/v1/02-appDeployments.md deleted file mode 100644 index 5cb44801a9..0000000000 --- a/content/api/v1/02-appDeployments.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'App Deployments' -metaTitle: 'App Deployments' -metaDescription: 'List of API endpoints that can be to interact with Application Deployment resources' -api: true -paths: ['/v1/appDeployments'] ---- - -# App Deployments diff --git a/content/api/v1/03-audits.md b/content/api/v1/03-audits.md deleted file mode 100644 index 31abbdc60b..0000000000 --- a/content/api/v1/03-audits.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Audit Logs' -metaTitle: 'Audit Logs' -metaDescription: 'List of API endpoints that can be used to generate audit logs' -api: true -paths: ['/v1/audits'] ---- - -# Audit Logs diff --git a/content/api/v1/05-cloudaccounts.md b/content/api/v1/05-cloudaccounts.md deleted file mode 100644 index a57c05cb8b..0000000000 --- a/content/api/v1/05-cloudaccounts.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Cloud Accounts' -metaTitle: 'Cloud Accounts' -metaDescription: 'List of API endpoints that can be used to list, create or edit cloud accounts' -api: true -paths: ['/v1/cloudaccounts'] ---- - -# Cloud Accounts diff --git a/content/api/v1/07-cloudconfig.md b/content/api/v1/07-cloudconfig.md deleted file mode 100644 index 059e810c2a..0000000000 --- a/content/api/v1/07-cloudconfig.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Cloud Config' -metaTitle: 'Cloud Config' -metaDescription: 'List of API endpoints that can be used to get or update the cloud configuration of a cluster' -api: true -paths: ['/v1/cloudconfigs'] ---- - -# Cloud Config diff --git a/content/api/v1/09-clouds.md b/content/api/v1/09-clouds.md deleted file mode 100644 index aabd62c205..0000000000 --- a/content/api/v1/09-clouds.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Clouds' -metaTitle: 'Clouds' -metaDescription: 'List of API endpoints that can be used to get cloud properties to be used in cloud configurations' -api: true -paths: ['/v1/clouds'] ---- - -# Clouds diff --git a/content/api/v1/10-clusters.md b/content/api/v1/10-clusters.md deleted file mode 100644 index 411f7c93b1..0000000000 --- a/content/api/v1/10-clusters.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Clusters' -metaTitle: 'Clusters' -metaDescription: 'List of API endpoints that can be used to list, create or update clusters' -api: true -paths: ['/v1/spectroclusters'] ---- - -# Clusters diff --git a/content/api/v1/11-clusterprofiles.md b/content/api/v1/11-clusterprofiles.md deleted file mode 100644 index 72cd98b907..0000000000 --- a/content/api/v1/11-clusterprofiles.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Cluster Profiles' -metaTitle: 'Cluster Profiles' -metaDescription: 'List of API endpoints that can be used to list, create or update cluster profiles' -api: true -paths: ['/v1/clusterprofiles'] ---- - -# Cluster Profiles diff --git a/content/api/v1/12-edgehosts.md b/content/api/v1/12-edgehosts.md deleted file mode 100644 index a87d0b9734..0000000000 --- a/content/api/v1/12-edgehosts.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Edge Hosts' -metaTitle: 'Edge Hosts' -metaDescription: 'List of API endpoints that can be used to list, create or update Edge Hosts.' -api: true -paths: ['/v1/edgehosts'] ---- - -# Edge Hosts diff --git a/content/api/v1/15-dashboard.md b/content/api/v1/15-dashboard.md deleted file mode 100644 index 92134057ae..0000000000 --- a/content/api/v1/15-dashboard.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Dashboard' -metaTitle: 'Dashboard' -metaDescription: 'List of API endpoints that can be used to get statistics for projects, clusters or cluster profiles' -api: true -paths: ['/v1/dashboard'] ---- - -# Dashboard diff --git a/content/api/v1/18-pcg.md b/content/api/v1/18-pcg.md deleted file mode 100644 index 6c746f0eb9..0000000000 --- a/content/api/v1/18-pcg.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Pcg' -metaTitle: 'Pcg' -metaDescription: 'List of API endpoints that can be used to get clustergroups data' -api: true -paths: ['/v1/pcg'] ---- - -# Pcg diff --git a/content/api/v1/19-events.md b/content/api/v1/19-events.md deleted file mode 100644 index 966757afe7..0000000000 --- a/content/api/v1/19-events.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Events' -metaTitle: 'Events' -metaDescription: 'List of API endpoints that can be used to get generic events for different components like clusters' -api: true -paths: ['/v1/events'] ---- - -# Events diff --git a/content/api/v1/20-appProfiles.md b/content/api/v1/20-appProfiles.md deleted file mode 100644 index c17868fbfe..0000000000 --- a/content/api/v1/20-appProfiles.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'App Profiles' -metaTitle: 'App Profiles' -metaDescription: 'List of API endpoints that can be used to get appProfiles data' -api: true -paths: ['/v1/appProfiles'] ---- - -# App Profiles diff --git a/content/api/v1/22-clustergroups.md b/content/api/v1/22-clustergroups.md deleted file mode 100644 index e4f8cab698..0000000000 --- a/content/api/v1/22-clustergroups.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Cluster Groups' -metaTitle: 'Cluster Groups' -metaDescription: 'List of API endpoints that can be used to get clustergroups data' -api: true -paths: ['/v1/clustergroups'] ---- - -# Cluster Groups diff --git a/content/api/v1/24-filters.md b/content/api/v1/24-filters.md deleted file mode 100644 index b140d0650f..0000000000 --- a/content/api/v1/24-filters.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Filters' -metaTitle: 'Filters' -metaDescription: 'List of API endpoints that can be used to manage resource filters' -api: true -paths: ['/v1/filters'] ---- - -# Filters diff --git a/content/api/v1/27-metrics.md b/content/api/v1/27-metrics.md deleted file mode 100644 index 3a7f1cbd05..0000000000 --- a/content/api/v1/27-metrics.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Metrics' -metaTitle: 'Metrics' -metaDescription: 'List of API endpoints that is used to get detailed metrics for different components' -api: true -paths: ['/v1/metrics'] ---- - -# Metrics diff --git a/content/api/v1/31-notifications.md b/content/api/v1/31-notifications.md deleted file mode 100644 index 3f50aed9f2..0000000000 --- a/content/api/v1/31-notifications.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Notifications' -metaTitle: 'Notifications' -metaDescription: 'List of API endpoints that is used to list or acknowledge notifications for different components' -api: true -paths: ['/v1/notifications'] ---- - -# Notifications diff --git a/content/api/v1/33-overlords.md b/content/api/v1/33-overlords.md deleted file mode 100644 index 2e69c916e7..0000000000 --- a/content/api/v1/33-overlords.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Private Cloud Gateway' -metaTitle: 'Private Cloud Gateway' -metaDescription: 'List of API endpoints that is used to list, create or configure private cloud gateways' -api: true -paths: ['/v1/overlords'] ---- - -# Private Cloud Gateway diff --git a/content/api/v1/35-packs.md b/content/api/v1/35-packs.md deleted file mode 100644 index 3706733c68..0000000000 --- a/content/api/v1/35-packs.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Packs' -metaTitle: 'Packs' -metaDescription: 'List of API endpoints that is used to list the packs of a repository' -api: true -paths: ['/v1/packs'] ---- - -# Packs diff --git a/content/api/v1/39-projects.md b/content/api/v1/39-projects.md deleted file mode 100644 index 090718efbb..0000000000 --- a/content/api/v1/39-projects.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Projects' -metaTitle: 'Projects' -metaDescription: 'List of API endpoints that is used to list, create or update projects' -api: true -paths: ['/v1/projects'] ---- - -# Projects diff --git a/content/api/v1/41-registries.md b/content/api/v1/41-registries.md deleted file mode 100644 index 1a136dace0..0000000000 --- a/content/api/v1/41-registries.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Registries' -metaTitle: 'Registries' -metaDescription: 'List of API endpoints that is used to list, create or update repositories' -api: true -paths: ['/v1/registries'] ---- - -# Registries diff --git a/content/api/v1/43-roles.md b/content/api/v1/43-roles.md deleted file mode 100644 index b4df80fdfc..0000000000 --- a/content/api/v1/43-roles.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Roles' -metaTitle: 'Roles' -metaDescription: 'List of API endpoints that is used to list, create or update roles' -api: true -paths: ['/v1/roles'] ---- - -# Roles diff --git a/content/api/v1/49-teams.md b/content/api/v1/49-teams.md deleted file mode 100644 index 3539f019d9..0000000000 --- a/content/api/v1/49-teams.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Teams' -metaTitle: 'Teams' -metaDescription: 'List of API endpoints that is used to list, create or update teams' -api: true -paths: ['/v1/teams'] ---- - -# Teams diff --git a/content/api/v1/50-tenants.md b/content/api/v1/50-tenants.md deleted file mode 100644 index 11015ff4a9..0000000000 --- a/content/api/v1/50-tenants.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Tenants' -metaTitle: 'Tenants' -metaDescription: 'List of API endpoints that is used to list, create or update tenants' -api: true -paths: ['/v1/tenants'] ---- - -# Tenants diff --git a/content/api/v1/53-users.md b/content/api/v1/53-users.md deleted file mode 100644 index 0480a72a0c..0000000000 --- a/content/api/v1/53-users.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Users' -metaTitle: 'Users' -metaDescription: 'List of API endpoints that is used to list, create or update users' -api: true -paths: ['/v1/users'] ---- - -# Users diff --git a/content/api/v1/54-workspaces.md b/content/api/v1/54-workspaces.md deleted file mode 100644 index 588d1932cd..0000000000 --- a/content/api/v1/54-workspaces.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: 'Workspaces' -metaTitle: 'Workspaces' -metaDescription: 'List of API endpoints that is used to list, create or update workspaces' -api: true -paths: ['/v1/workspaces'] ---- - -# Workspaces diff --git a/content/deprecated/04-clusters/03-edge/04-edgeforge-workflow/01-build-kairos-os.md b/content/deprecated/04-clusters/03-edge/04-edgeforge-workflow/01-build-kairos-os.md deleted file mode 100644 index 617243e85d..0000000000 --- a/content/deprecated/04-clusters/03-edge/04-edgeforge-workflow/01-build-kairos-os.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: "Bring Your Own OS" -metaTitle: "Bring Your Own OS - Create Kairos Image" -metaDescription: "Learn about building your own Kairos Image" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Edge supports the ability for you to specify a custom Operating System (OS) for your Edge host runtime. Building a system using your choice of OS requires creating a [Kairos-base](https://kairos.io/) image with your custom OS. The Palette feature, [Bring Your Own OS (BYOOS)](/integrations/byoos) allows you to use a custom OS in a cluster profile. - - -As an example, the following steps will guide you on how to build a Kairos-based Red Hat Enterprise Linux (RHEL) image. Use the same steps for any other operating system. - -
- - - - -BYOOS gives you the flexibility to tailor and manage the OS layer in your cluster profiles, ensuring that clusters perform optimally to meet your environment needs. -To learn how to use your own OS images with an Edge cluster profile, refer to the [Model Edge Native Cluster Profile](https://docs.spectrocloud.com/clusters/edge/site-deployment/model-profile) guide. - - - - -# Prerequisites - -- Linux Machine (Physical or VM) with an AMD64 architecture. - - -- Access to a container registry with permission to push container images. Review the registry login instructions for your respective registry for guidance on logging in. - - - - -Some operating systems require credentials to download the source image, such as RHEL. An RHEL subscription is required in this example to download the RHEL Universal Base Images (UBI) needed to build the Edge provider image. Ensure you have the necessary credentials to download the OS source image. - - - - - -# Build Image - -1. Issue the following commands to prepare your server. You can also add more packages to the `apt install` command if needed. -
- - ```shell - mkdir -p /etc/apt/keyrings - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg - echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt update -y - sudo apt install docker-ce docker-ce-cli containerd.io docker-compose-plugin git-all -y - ``` - -2. Create a workspace and download the builder code. - -
- - ```shell - mkdir -p ~/workspace/ - cd workspace/ - git clone https://github.com/spectrocloud/pxke-samples - ``` - -3. Build Karios Image. In this step, you will create a Kairos-based core image from an RHEL 8 base OS. Core images form the basis for Kubernetes provider images used for host cluster provisioning. Review the contents of the Dockerfile to understand the various steps involved in this process. You must supply credentials to your RHEL subscription to successfully build the image. - -
- - ```shell - cd pxke-samples/core-images - docker build \ - --tag [your image repository]/rhel8-kairos:1.0 \ - --build-arg USERNAME=[rhel subscription username]\ - --build-arg PASSWORD=[rhel subscription password] \ - --file Dockerfile.rhel8 . - ``` - -4. Upload the image to your container registry. - -
- - ```shell - docker push [your image repository]/rhel8-kairos:1.0 - ``` ---- - -
- -Your image will be used in the [Build Images](/clusters/edge/edgeforge-workflow/palette-canvos) step and become part of your Edge artifact. The custom OS you created will also be used in the OS layer of the cluster profile by using the [Bring Your Own OS (BYOOS)](/integrations/byoos) pack. -
- - -# Next Steps - - -Your next step is to evaluate if you need to create a content bundle. To create a content bundle, check out the [Build Content Bundle](/clusters/edge/edgeforge-workflow/palette-canvos) guide. - -
diff --git a/content/deprecated/04-clusters/03-edge/04-edgeforge-workflow/04-build-images.md b/content/deprecated/04-clusters/03-edge/04-edgeforge-workflow/04-build-images.md deleted file mode 100644 index 2d26051c68..0000000000 --- a/content/deprecated/04-clusters/03-edge/04-edgeforge-workflow/04-build-images.md +++ /dev/null @@ -1,231 +0,0 @@ ---- -title: "Build Images" -metaTitle: "Build Images" -metaDescription: "Learn about building your eterprise edge artifacts" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -You can use the Palette Edge CLI to create an Edge artifact. The Edge artifacts will include everything you may have created up to this point. - -
- -* Bring Your Own Operating System (BYOOS) - - -* Content Bundle - - -* User Data - - -![A diagram that illustrates the mentioned pieces making up an Edge artifact created by the Palette Edge CLI](/clusters_edge-forge-workflow_build-images_edge-artifact-result.png) - -Use the following steps to create an Edge artifact for your Edge host. -# Prerequisites - -- Linux Machine (Physical or VM) with an AMD64 architecture. - - -- 8 CPU - - -- 16 GB Memory - - -- 150 GB Storage - - If you experience disk space constraints on the machine where images are built, you can remove unnecessary Docker images and volumes. Or start the process on a machine with more storage allocated. - - -- Access to a container registry with permission to push container images. For guidance on logging in, review the registry login instructions for your respective registry. With docker, use the `docker login` command to log in to the registry. - -# Create Artifact - -Choose the workflow that fits your needs. - -
- - -1. Download the Palette Edge CLI and assign the executable bit. - -
- - ```shell - VERSION=3.4.3 - wget https://software.spectrocloud.com/stylus/v$VERSION/cli/linux/palette-edge - chmod +x palette-edge - ``` - -2. Issue the `show` command to review the list of options for Operating System (OS) distribution and versions, Kubernetes distributions and versions, the Spectro Agent Version, and Kairos version. - -
- - ```shell - ./palette-edge show - ``` - - ![CLI example output from the show command](/clusters_edge-forge-workflow_build-images_edge-cli-show.png) - -
- - The Kubernetes distribution and versions you choose must be available in the list displayed. We will continuously release newer versions of Kubernetes as part of our release cycle. If you decide to use your custom OS, you must build a Kairos image from the OS you used in the [Bring Your Own OS](/clusters/edge/edgeforge-workflow/build-kairos-os) guide. Typically, you will keep the Spectro Agent and Kairos versions the same. - - -3. Use the `generate` command to create an image scaffolding by providing your choice of OS and Kubernetes distribution. There are several CLI flags you can specify to the `generate` command. The following flags are the most common. - - - - | Parameter | Description | -|--------------------------|-----------------------------------------------------------------------------------------------------| -| `--os-flavor` | OS flavor. | -| `--k8s-flavor` | Kubernetes flavor. | -| `--output` | Directory for generating build files. | -| `--push-image-repository` | Repository for generated container images. | -| `--content-path` | Optional location of the content bundle if you preload content. | -| `--cache-provider-images` | Additional flag to preload generated provider images into the installer ISO. | -| `--cloud-init-file` | Specify the Edge Installer configuration user data file to include in the Edge artifact. | - -
- - - - When using the `generate` command, the specified registry is where Edge artifacts will be uploaded. - - - - - ```shell - ./palette-edge generate --os-flavor [pick-os] \ - --k8s-flavor [pick-k8s] \ - --output [output directory] \ - --push-image-repository [your registry path] \ - --content-path [path to content bundle, if applicable] \ - --cache-provider-images - ``` - - In this example, an `OpenSuse` + `k3s` image using the upstream Kairos `opensuse-leap` images is selected. The scaffolding image will also get published to a target repo `gcr.io/my-registry` and will include a content bundle. The `generate` command would look similar to the following. - - Example: - ```shell - ./palette-edge generate --os-flavor opensuse-leap \ - --k8s-flavor k3s \ - --output opensuse-k3s \ - --push-image-repository gcr.io/my-registry \ - --content-path /temp/bundles/content-c59a5a88/spectro-content-c59a5a88.zst \ - --cache-provider-images - ``` - Output: - ```shell - INFO Creating directory opensuse-k3s - INFO Created scaffolding directory structure under directory opensuse-k3s with the following parameters - ┌───────────────────────────────────────────────────────────────────────────┐ - | Spectro Agent Version | v0.0.0-d155796 | - | Base Image | quay.io/kairos/core-opensuse-leap:v1.5.0 | - | K8S Flavor | k3s | - | K8S Versions | 1.25.2-k3s1,1.24.6-k3s1,1.23.12-k3s1,1.22.15-k3s1 | - | Push Image Repository | gcr.io/spectro-testing | - | Kairos Version | v1.5.0 | - └───────────────────────────────────────────────────────────────────────────┘ - To build an installer iso and target docker images for - various versions supported, run the 'build.sh' in the - 'opensuse-k3s' directory. For any customizations to made to - all the generated images e.g adding packages, edit the - 'images/Dockerfile' as needed before running - 'build.sh'.Files to be copied to the target images can be - places under 'overlay/files' and files for the iso only can - be placed under 'overlay/files-iso - ``` - - For custom images use the `--base-image-uri` flag and specify the path to the custom image. - - Example: - - ```shell - ./palette-edge generate \ - --base-image-uri quay.io/kairos/core-rockylinux:v1.5.0 \ - --k8s-flavor k3s \ - --output rockylinux-k3s \ - --push-image-repository gcr.io/my-registry - ``` - - -4. Review the content of the output directory you specified using `--output` flag. The output directory structure contains the following files. - -
- - ![The output directory content in a tree diagram](/clusters_edge-forge-workflow_build-images_edge-cli-output.png) - - -5. Navigate to the output directory and review the file **.VERSIONS.env**. Set the variable `PUSH_BUILD` to `true` so that the Edge provider images and the Edge Installer image get pushed to your image registry. The alternative is to manually push all the images after the image creation process completes. - - - -6. Before you start the build script, you can make changes to customize your build. Review the use cases below to learn more about customization options. - - | Use case | Description | - | --- | --- | - | Modifying/controlling Kubernetes versions and flavors | You can update the .versions.env file or specify the versions and flavors as arguments to the build command. | - | Adding custom packages to target OS images | Edit the **Dockerfile** of the respective OS images to add the install commands using `apt-get` or `zypper`. | - | Adding custom files or directories into Kubernetes provider container images | Add the custom files or directories in the **overlay/files/** folder. The files directory is copied directly under the */* folder in the target image. | - | Adding custom content to Edge Install installer ISO | Place the custom content in the **overlay/files-iso** directory. To embed a content bundle, place it under the **overlay/files-iso/opt/spectrocloud/content** folder. This limits the scope to only the Edge Install installer ISO. | - - -7. Navigate to your output directory and issue the following command to build your Edge images. This command will take a few minutes to complete. - -
- - ```shell - ./build.sh - ``` - - The following images are generated by the build script. - -
- - - Edge Installer ISO image. -
- - - Edge Host container image containing the Palette Edge Host Agent. -
- - - Kubernetes Provider container images. - -
- -8. Locate your ISO file in the output directory. The ISO file's default name is **spectro-edge-installer.iso** but it may be different if you used the `--iso-name` CLI flag. - - -Using a bootable USB drive, PXE server, or other means, mount the ISO to the primary drive of the Edge host. The installer flashes to the Edge host's hard disk, and the host will shut down. The Edge host is now ready to be shipped to the edge location. - -
- - - -You can use several software tools to create a bootable USB drive, such as [balenaEtcher](https://www.balena.io/etcher). For a PXE server, there are open-source projects such as [Fog](https://fogproject.org/download) or [Windows Deployment Services](https://learn.microsoft.com/en-us/windows/deployment/wds-boot-support) for Windows. - - - - -# Validate - -1. In the build server, validate the output directory containing the ISO file. - - -2. You can validate that the ISO image is not corrupted and is valid by attempting to flash a bootable device. Most software that creates a bootable device will validate the ISO image before the flash process. - - -3. You can validate that the Edge host is ready for the site installation by simulating a site deployment on one of the Edge hosts. The simulation process will require you to complete the installation process and reset the device after the validation. - - -# Next Steps - -You now have an Edge artifact that you can use to create an Edge host. You can start the site deployment process. Check out the [Site Deployment](/clusters/edge/site-deployment) resource to learn more about the Site Deployment process. - diff --git a/content/deprecated/04-clusters/04-palette-virtual-clusters/00-add-virtual-cluster-to-host-cluster.md b/content/deprecated/04-clusters/04-palette-virtual-clusters/00-add-virtual-cluster-to-host-cluster.md deleted file mode 100644 index 128c6f1b84..0000000000 --- a/content/deprecated/04-clusters/04-palette-virtual-clusters/00-add-virtual-cluster-to-host-cluster.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: "Add Virtual Clusters to a Host Cluster" -metaTitle: "Add Virtual Clusters to a Host Cluster" -metaDescription: "How to add Palette Virtual Clusters to a Host Cluster" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Add Virtual Clusters to a Host Cluster - - - -As of Palette 3.2, this feature is deprecated. Use the [Deploy a Virtual Cluster to a Cluster Group](/clusters/palette-virtual-clusters/deploy-virtual-cluster) guide to learn how to deploy Palette Virtual clusters. - - - - -You can deploy Palette Virtual Clusters in a [Host Cluster](/glossary-all#hostcluster). To do this, Palette provides the **Enable Virtual Clusters** option for new or existing clusters. Clusters with the virtual clusters feature enabled are called Host Clusters. - -The advantages of a virtual cluster environment are: -- You can operate with admin-level privileges while ensuring strong isolation. -- Virtual clusters reduce operational overhead and improve resource utilization. - -Follow steps below to enable and deploy a virtual cluster. - -# Prerequisites - -- A Spectro Cloud account. - -- A configured [Cluster](/clusters). - -- Attach any required policies in your cloud account that must be added to your virtual cluster deployment. - - For AWS, refer to the [Required IAM Policies](/clusters/public-cloud/aws/required-iam-policies#globalroleadditionalpolicies) documentation. - - For Azure, no additional policies are required. - - - -Palette doesn't support _Usage_ and _Cost_ metrics for Virtual Clusters running on Google Kubernetes Engine (GKE). - - - -## Add Node-Level Policies in your Cloud Account - -In some situations additional node-level policies must be added to your deployment. - -To add node-level policies: - -1. In **Cluster Mode**, switch to the **Tenant Admin** project. -2. Select **Tenant Settings** in the **Main Menu**. -3. Click **Cloud Accounts** and ensure **Add IAM policies** is enabled for your cloud account. If an account does not already exist, you must add one. -4. You can specify any additional policies to include in virtual clusters deployed with this cloud account. - - For AWS, add the **AmazonEBSCSIDriver** policy so that the virtual clusters can access the underlying host cluster's storage. Check out the [Palette required IAM policies](/clusters/public-cloud/aws/required-iam-policies#globalroleadditionalpolicies) documentation to learn more about additional IAM policies. -5. Confirm your changes. - -# Enable Virtual Clusters on a Host Cluster - -Follow these steps to enable virtual clusters on a new or existing Host Cluster: - -1. In **Cluster Mode**, select **Clusters** in the **Main Menu**. -2. Select a **Host Cluster** from the list and click **Settings > Cluster Settings > Virtual Clusters**. -3. Toggle the **Enable Virtual Clusters** option to _on_. -4. Select an endpoint in the **Cluster Endpoint Type** drop-down menu: _Load Balancer_ or _Ingress_. -5. Configure the load balancer or ingress endpoint. - - - - -### Configure Load Balancer Endpoint -
-These requirements apply to a Load Balancer endpoint: -
-
- -* The Host Cluster supports dynamic provisioning of load balancers. -* If the Host Cluster is in the public cloud, the AKS/EKS/GCP Cloud Controller Manager must support load balancers by default. -* If the Host Cluster is in a private data center, a bare metal load balancer provider such as MetalLB must be installed and configured. - -
- - - -Configure Ingress Endpoint: -
-These requirements apply to an Ingress endpoint: -
-
- -* The Host Cluster must specify a Host domain name service (DNS) Pattern, for example: `*.starship.te.spectrocloud.com` -
-To create a valid Host DNS Pattern, you must deploy the NGINX Ingress Controller on the Host Cluster with SSL passthrough enabled. This allows transport layer security (TLS) termination to occur at the virtual cluster's Kubernetes API server. -
-* A wildcard DNS record must be configured, which maps the Host DNS Pattern to the load balancer associated with the NGINX Ingress Controller. - -To map the Host DNS Pattern to the load balancer with the NGINX Ingress Controller: -
- -1. Deploy the NGINX Ingress Controller on the Host Cluster and ensure that SSL passthrough is enabled in the `values.yaml` file for the NGINX Ingress Controller pack. Set `charts.ingress-nginx.controller.extraArgs` to _true_ as shown in the example: -
- -
- - ```yml - charts: - ingress-nginx: - ... - controller: - ... - extraArgs: - enable-ssl-passthrough: true - ``` -2. Identify the public DNS name of the load balancer associated with the LoadBalancer Service associated with your NGINX Ingress Controller deployment. - -3. Create a wildcard DNS record that maps the Host Pattern to the NGINX Ingress Controller load balancer. The example shows an AWS Route53 record for the `*.starship.te.spectrocloud.com` Host DNS Pattern. - - |Example Record with Host DNS Pattern|| - |-|-| - |![AWS Route 53](/record-details.png) |Here is an example of an
AWS Route53 record for the
`*.starship.te.spectrocloud.com`
Host DNS Pattern.| - -
-
- - -# Deploy a Virtual Cluster in the Host Cluster - -To deploy a new virtual cluster in an existing Host Cluster: - -1. In **Cluster Mode** select a project from the drop-down menu, and click **Clusters** in the **Main** menu. - -2. Click the **Virtual Clusters** tab to list available virtual clusters, and select **Add New Virtual Cluster**. - -3. Provide **Deploy New Virtual Cluster** configuration information:
- - - Select the Host Cluster in which you'll enable virtual clusters. - - - Add a cluster name. - - - Optionally, provide a Description and Tags. - - - Click the **Attach Profile** button to assign a profile. - - You can attach one or more Add-on layers to this cluster. If you do not have a Cluster Profile, refer to [Creating Cluster Profile](/cluster-profiles/task-define-profile) for details. - -
- -4. (Optional) If the Host Cluster's **Cluster Endpoint Type** is a _Load Balancer_, you can provide the following advanced configuration options here: - - - [External Traffic Policy](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip): _Cluster_ or _Local_.
- - - Load Balancer Source Ranges: Limits which client IPs can access the load balancer. Inputs must be a comma-separated list of CIDR ranges in `a.b.c.d/x` format. [Network Load Balancer support on AWS](https://kubernetes.io/docs/concepts/services-networking/service/#aws-nlb-support) provides additional details. - -# Validate -To validate your virtual cluster is available and ready for use, navigate to **Clusters > Virtual Clusters**, which lists all your virtual clusters. - - -# Resources - -- [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) - -- [CPU resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) - -- [Memory resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) - -- [Amazon EBS CSI driver - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) - -- [Creating the Amazon EBS CSI driver IAM role for service accounts - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/csi-iam-role.html) diff --git a/content/deprecated/06-integrations/00-EKS-D.md b/content/deprecated/06-integrations/00-EKS-D.md deleted file mode 100644 index 3236b2aaf2..0000000000 --- a/content/deprecated/06-integrations/00-EKS-D.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: 'Amazon EKS Distro' -metaTitle: 'Kubernetes with Spectro Cloud' -metaDescription: 'EKS-D packs in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['kubernetes'] -logoUrl: 'https://registry.spectrocloud.com/v1/kubernetes-eksd/blobs/sha256:5790ca7040999e2f9371163a319cda652ed1e32139bcb9c6fb32a0152d9f48fb?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Amazon EKS Distro - -Amazon EKS-D allows for the manual deployment of secure and reliable workload clusters, free from constant testing and tracking for dependencies, security patches, and updates of Kubernetes. EKS-D provisions clusters with consistent versions of Kubernetes and dependencies of Amazon EKS. The deployment of EKS-D is standardized enough to build your Kubernetes cluster on any public, private or on-prem platform. Once the community support for Kubernetes versions expires, Amazon takes care of the version control including the latest security patches. With EKS-D, users enjoy benefits such as secure Docker images, back-ported security fixes, and a single upstream vendor. -## Provision and Manage Amazon EKS Distro (EKS-D) with Spectro Cloud -Spectro Cloud leverages EKS-D services to customers as a platform of their choice. We support easy provisioning and management of EKS-D services for on-premises as well as for public cloud platforms such as: - -* vSphere Cloud Provider (vSphere) -* Amazon Web Services (AWS) -* Microsoft Azure (Azure) -* Google Cloud Platform (GCP) -* Metal as a Service (MaaS) -* OpenStack Cloud - -We have made the usage of EKS-D easy by incorporating it as integration within the Spectro Cloud pack. At the click of a button, EKS-D is brought to use while creating a Spectro Cloud-specific cluster profile. -Once the cluster profile is created, users can deploy EKS-D based Kubernetes clusters through the Spectro Cloud console. - -![eksd-cluster-profile](/eksd-cluster-profile.png) - -![eksd-cluster](/eksd-cluster.png) - -## Why EKS-D with Spectro Cloud - -Spectro Cloud fosters the tenant EKS-D clusters with add-on features such as authorization, monitoring, logging, load balancing, and more. -The extensive platform support that Spectro Cloud provides to its customers makes EKS-D with Spectro Cloud highly flexible. -We provide isolation to the EKS-D tenant clusters by virtue of projects and RBAC. - -||Spectro Add-On Packs| -|-|------| -|Deployment specifics|Logging| -||Monitoring | -||Security | -||Authentication| -||Service Mesh | -||Load Balancer | -||Ingress | -||Service Mesh | -||Helm Charts | -||Registries| - -||Spectro EKS-D Platform Support| -|-|----| -|Public Cloud|Amazon Web Services (AWS) -||Microsoft Azure (Azure) -||Google Cloud Platform (GCP)| -|On-Premises|vSphere Cloud Provider (vSphere)| -||OpenStack Cloud| -||Metal-as-a-Service Cloud (MaaS)| - -||Resource Isolation| -|-|---| -| Project |Users and teams with specific roles can be associated with the project.| -| |The project helps to organize the cluster resources in a logical grouping | -| RBAC|Role-Based Access Control.| -| |This is a method that allows the same user to have a different type of access control based on the resource being accessed.| - -## Supported EKS-Distro Versions - - - - -* **v1-21-eks-4 ** - - - - -* **v1-20-eks-6 ** - - - - - -* **1.18.9-eks-1-18-1** - - - - - -## Reference - -https://aws.amazon.com/eks/eks-distro diff --git a/content/deprecated/06-integrations/00-oidc-eks.md b/content/deprecated/06-integrations/00-oidc-eks.md deleted file mode 100644 index 37dc21e736..0000000000 --- a/content/deprecated/06-integrations/00-oidc-eks.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'aws-eks-oidc' -metaTitle: 'aws-eks-oidc' -metaDescription: 'aws-eks-oidc Authentication pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['authentication'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/aws-eks-oidc/blobs/sha256:f86813591b3b63b3afcf0a604a7c8c715660448585e89174908f3c6a421ad8d8?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# OIDC identity provider authentication for Amazon EKS - -OpenID Connect (OIDC) Identity Provider (IDP) authentication for Amazon EKS clusters. This feature allows customers to integrate an OIDC identity provider with a new or existing Amazon EKS cluster running Kubernetes version 1.16 or later. OpenID Connect is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. It adds a thin layer that sits on top of OAuth 2.0 that adds login and profile information about the identity of who is logged in. - - -## Versions Supported - - - - - -**1.0.0** - - - - -## References -https://aws.amazon.com/blogs/containers/introducing-oidc-identity-provider-authentication-amazon-eks/ diff --git a/content/deprecated/06-integrations/00-ubuntu-k3s.md b/content/deprecated/06-integrations/00-ubuntu-k3s.md deleted file mode 100644 index fa32bc2cbb..0000000000 --- a/content/deprecated/06-integrations/00-ubuntu-k3s.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: 'Ubuntu-K3s' -metaTitle: 'Ubuntu Lightweight Kubernetes K3s' -metaDescription: 'Choosing K3s with Ubuntu within the Palette console' -hiddenFromNav: true -type: "integration" -category: ['system app'] -logoUrl: 'https://registry.spectrocloud.com/v1/ubuntu-k3s/blobs/sha256:10c291a69f428cc6f42458e86cf07fd3a3202c3625cc48121509c56bdf080f38?type=image/png' ---- - -import WarningBox from 'shared/components/WarningBox'; -import Tabs from 'shared/components/ui/Tabs'; - - -# Lightweight Kubernetes on Ubuntu -K3s is a purpose-built container orchestrator for running Kubernetes on bare-metal servers. With the bloat stripped out, the CNCF (Cloud Native Computing Foundation) accredited Kubernetes distribution orchestrator makes installation and application deployment faster. Palette supports this Lightweight Kubernetes and Ubuntu pack versions to run at scale. - -
- -## Version Supported - -
- -## Ubuntu K3s -
- - - -
-
- -Name: **Ubuntu-K3s** -Version: **Ubuntu-K3s-1.22.9-k3s0** - - -
-
- - -
- - -
-
- -Name: **Ubuntu-K3s** -Version: **Ubuntu-K3s-1.21.12-k3s0** - -
-
- -
-
- - -## Manifest Parameters - - -```yaml -pack: - spectrocloud.com/install-priority: "0" -#k3sconfig: -# disable: -# - metrics-server -# service-cidr: "10.40.0.0/16" -# cluster-cidr: "10.45.0.0/16" -``` - - - -# References - -[Rancher](https://rancher.com/docs/k3s/latest/en/) diff --git a/content/docs/00-index.mdx b/content/docs/00-index.mdx deleted file mode 100644 index 7d4419d742..0000000000 --- a/content/docs/00-index.mdx +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: "Home" -metaTitle: "Spectro Cloud" -metaDescription: "Spectro Cloud provides scalable, policy-based cluster management of Kubernetes for enterprises that need a high degree of control over their infrastructure, whether it is in public cloud, private cloud, bare metal or any combination. Enterprises can define the enterprise Kubernetes stack they need while still gaining the efficiencies of automation at scale." -icon: "folder" -hideToC: true -fullWidth: true -hiddenFromNav: true -isIntegration: false -category: [] -hideToCSidebar: true -hideMenuSidebar: true ---- - -import MainHeader from "shared/components/common/MainHeader"; -import QuickSetup from "shared/components/common/QuickSetup"; -import CloudsSection from "shared/components/common/CloudsSection"; -import DocUpdatesSection from "shared/components/common/DocUpdatesSection"; - - -

Step-by-step guides, definitions and help

-

A deeper dive into the Palette platform with all the technical details you need

-
- - - - - - - diff --git a/content/docs/00-release-notes.md b/content/docs/00-release-notes.md deleted file mode 100644 index 005560891c..0000000000 --- a/content/docs/00-release-notes.md +++ /dev/null @@ -1,1455 +0,0 @@ ---- -title: "Release Notes" -metaTitle: "Release Notes" -metaDescription: "Spectro Cloud release notes for Palette and its sub-components." -icon: "audits" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -

August 27, 2023 - Release 4.0.0

- -Palette 4.0.0 introduces new features and improvements, including [Palette VerteX](/vertex) - a FIPS-compliant edition - and the [Virtual Machine Orchestrator](/vm-management) (VMO) which enables unified management of containerized applications and virtual machines in Kubernetes. Additionally, Palette 4.0.0 introduces a new Pack User Interface (UI) that improves the user experience for finding and installing packs. Check out the [Upgrade Notes](/enterprise-version/upgrade) and release notes below to learn more about the new features and improvements in Palette 4.0.0. - - -

Palette

- - - -

Breaking Changes

- -- Deploying Virtual Clusters directly into host clusters is no longer supported. Use Cluster Groups to deploy Virtual Clusters in host clusters. For guidance on deploying Virtual Clusters into a Cluster Group, check out the [Add Virtual Clusters to a Cluster Group](/clusters/palette-virtual-clusters/deploy-virtual-cluster) documentation. - - -

Features

- - -- The Virtual Machine Orchestrator (VMO) is now available in Palette. You can natively manage virtual machines from Palette. Palette uses kubevirt under the hood to facilitate the management of virtual machines. Review the [VMO](/vm-management) documentation to learn more. - - -- Custom Pack registries now support the ability for you to upload your own SSL Certificate Authority (CA). You can use HTTPS to connect to your private registries by providing your SSL certificate. Refer to the [Configure a Custom Pack Registry in Palette](/registries-and-packs/adding-a-custom-registry) documentation to learn more. - - -- A new Pack User Interface (UI) is available in Palette. This new UI allows you to search for packs across registries while providing you with important metadata. The new search experience improves the user experience for finding and installing packs. - - -- Pack registries now support the Open Container Initiative (OCI) image format. This allows you to use OCI images in your custom pack registries instead of the previous Palette-specific format. - - -- Palette now supports VMware vSphere 8.0. You can now deploy host clusters with VMware vSphere 8.0. - - -- Host clusters deployed to VMware now support [VMware NSX](https://www.vmware.com/products/nsx.html) overlay networking. - - -- Palette's internal message communication between components now uses the gRPC protocol. The previous usage of [NATS](https://nats.io/) has been deprecated and will be removed in a future release. You can review a network diagram of Palette's communication architecture on the [Network Ports](/architecture/networking-ports) page. If you are using network proxies, we encourage you to review the [gRPC and Proxies](/architecture/grps-proxy) documentation for potential issues. - - -- Pack deprecated status is now available in the Palette UI. This lets you identify which packs are deprecated and will be removed in future releases. Review the [Maintenance Policy](/integrations/maintenance-policy) documentation to learn more. - - -- Self-hosted Palette now provides a new installation method using the [Palette CLI](/palette-cli). You can now install a self-hosted Palette through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette's installation parameters. Check out the [Install Enterprise Cluster](/enterprise-version/deploying-an-enterprise-cluster) documentation to learn more. The previous installation method using the Palette OVA Installer is deprecated and unavailable in this release. - - -- Private Cloud Gateway (PCG) deployments are now available through the Palette CLI. You can now install a PCG through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure the PCG installation parameters. Check out the Palette CLI [PCG install command](/palette-cli/commands/#pcg) documentation to learn more. The previous installation method using the PCG Docker image is deprecated and unavailable in this release. - - -- You can now specify namespace labels and annotations in a Container Network Interface (CNI), Container Storage Interface (CSI), and Add-on pack's YAML configuration. This allows you to specify labels and annotations that are applied to specific namespaces in the cluster. To learn more about configuring labels and annotations, refer to the [Add-on Profile](/cluster-profiles/create-add-on-profile#packlabelsandannotations) documentation. - - - -

Improvements

- -- You can now download different kubeconfig files for your host clusters in Palette. You can download an admin kubeconfig file or a user kubeconfig file. The admin kubeconfig file allows you to perform all actions on the cluster. In contrast, the user kubeconfig file is only accessible to those with the proper Palette permissions to access the cluster. To learn more, check out the Palette [kubeconfig](/clusters/cluster-management/kubeconfig) documentation. - - -- You can now install a self-hosted Palette through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette's installation parameters. Learn more about the Palette [EC command](/palette-cli/commands/#ec) documentation. - - -- The login banner message in Palette is now also exposed in the Palette CLI. Users logging in to Palette through the CLI will receive the same message as those logging in through the UI. Refer to the [Login Banner](/tenant-settings/login-banner) documentation to learn more. - - -- You can now configure the logout timer for users in Palette. This allows you to set the time a user can be inactive before they are automatically logged out of Palette. The default value is 240 minutes. - - -- Private Cloud Gateway (PCG) deployments and self-hosted Palette Enterprise Clusters (EC) are now deployed with Kubernetes version 1.25. - - -- Palette now supports Kubernetes 1.27.x. You can deploy host clusters with Kubernetes 1.27.x. - - -- The Cox Edge provider is upgraded to version 0.5.0. - - -- You can now access Palette documentation directly from the Palette UI. This allows you to quickly access the documentation for the page you are currently on. You can find the documentation link in the top right corner of the Palette UI. - - -- Palette now supports configuring the time interval for node repavement. The time interval is the amount of time that Palette waits before it starts the node replacement process on nodes in the cluster. The default time interval is 15 minutes. Refer to the [Node Pool](/clusters/cluster-management/node-pool/) documentation to learn more. - - - -

Deprecations and Removals

- -- The Palette OVA Installer is deprecated and no longer provided as of this release. Self-hosted Palette now provides a new installation method using the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette's installation parameters. Check out the [Install Enterprise Cluster](/enterprise-version/deploying-an-enterprise-cluster) documentation to learn more. - - - -- The Palette PCG Docker installation method is deprecated and not available in this release. You can now install a PCG through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette's installation parameters. Check out the Palette CLI [PCG install command](/palette-cli/commands/#pcg) documentation to learn more. - - -

Known Issues

- -- With the deprecation of deploying Virtual Clusters directly into host clusters. The ability to specify an Add-on profile to a Palette Virtual Cluster is currently unavailable. This will be addressed in an upcoming release. - - -

Edge

- - - -

Features

- -- Palette Edge now supports ARM64 architecture. This is a preview feature and still active in development. You can deploy Palette Edge on ARM64 architecture, such as Nvidia Jetson (Orin). Review the list of available [ARM64 packs](/integrations) in Palette before deploying Palette Edge on ARM64 architecture. - - -- Palette Edge now supports the ability for you to configure OIDC Identity Providers (IDP) at the Kubernetes layer of a Cluster Profile. Refer to the Kubernetes distributions [pack documentation](/integrations) to learn more. - - -

Improvements

- -- You can now assign dynamic tags to your edge hosts by specifying files or invoking a script that returns a JSON payload containing the tag values. This allows you to dynamically assign tags to your Edge hosts based on the host's local environment. Refer to the [Edge Installer Configuration Tags](/clusters/edge/edge-configuration/installer-reference#tags) documentation to learn more. - - -- You can now skip the auto registration of Edge hosts in Palette. This allows you to manually register your Edge hosts in Palette by either using the QR code method or by providing the machine ID in Palette. Set the Edge Installer configuration parameter `disableAutoRegister` to `true` to turn off auto registration. Refer to the [Edge Installer Configuration](/clusters/edge/edge-configuration/installer-reference) documentation to learn more. - - -- You can configure the node drainage behavior for your Edge hosts. To learn more about configuring node drainage, refer to the [Bring Your Own OS (BYOOS) pack](/integrations/byoos#nodedrainage) documentation. - - -

Known Issues

- -- Palette eXtended Kubernetes - Edge (PXKE) and RKE2 cannot be upgraded from version 1.26.4 to 1.27.2 in an active cluster. Create a new cluster profile with the latest version of PXKE or RKE2 to upgrade to version 1.27.2. This will be addressed in an upcoming release. - - -

Palette Dev Engine (PDE)

- - - -

Features

- -- A Visual Studio Code (VS Code) extension is now available for Palette Dev Engine (PDE). This extension allows you to deploy and manage virtual clusters directly from VS Code. To learn more, you can review the [Palette PDE Plugin](https://marketplace.visualstudio.com/items?itemName=SpectroCloud.extension-palette) documentation. - - -- The Palette CLI now supports managing App Profiles and Apps in Palette Dev Engine (PDE). You can now create, update, and delete App Profiles and Apps directly from the CLI. Use the `palette pde app-profile` and `palette pde app` commands to manage App Profiles and Apps. Refer to the [Palette CLI](/palette-cli) documentation or use the `--help` flag to learn more. - - - -

Virtual Machine Orchestrator (VMO)

- - -

Features

- - -- Host clusters supporting Virtual Machine (VM) workloads can now be placed in host maintenance mode, with the ability to choose which Kubernetes node to place in maintenance mode. When a node is placed in maintenance mode, also known as “cordoned”, the VM workload is automatically migrated without any disruptions to another healthy node in the cluster. - - -- VMO supports the ability to import a VMware OVA template from VMware vSphere into Palette. This allows you to import a VM template from VMware vSphere into Palette and deploy it as a VM workload in a host cluster. - - -- You can now migrate a VM from VMware vSphere to a host cluster in Palette through the Palette CLI. The CLI provides an interactive migration experience allowing you to configure the VM migration parameters. - - - -

VerteX

- - -

Features

- -- [Palette VerteX](https://www.spectrocloud.com/news/spectro-cloud-announces-palette-vertex-for-government) is now available and brings FIPS 140-2 cryptographic modules to the Palette management platform and deployed clusters. Palette VerteX is available to all government and private sector organizations that value strong data protection, backed by the Spectro Cloud Government practice, a growing ecosystem of specialist channel partners, and continental US technical support. Refer to the [Palette VerteX](/vertex) documentation to learn more. - - -- You can install Palette VerteX in a VMware environment through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette VerteX's installation parameters. To learn more, refer to the Palette [VMware install instructions](/vertex/install-palette-vertex/install-on-vmware/install) documentation. You can also install Palette VerteX in a FIPS-certified Kubernetes cluster. Check out the [Kubernetes install instructions](/vertex/install-palette-vertex/install-on-kubernetes/install) for more details. - - - -

Terraform

- -- Version 0.15.0 of the [Spectro Cloud Terraform provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) is available. For more details, refer to the Terraform provider [release page](https://github.com/spectrocloud/terraform-provider-spectrocloud/releases). - - - -

Education

- -- A new Edge tutorial is available to learn how to deploy an Edge cluster using Palette with VMware. The [Deploy an Edge Cluster on VMware](/clusters/edge/site-deployment/deploy-cluster) provides an end-to-end tutorial that walks you through creating Edge artifacts, creating a Cluster Profile, and deploying an Edge cluster on VMware. - - -- The documentation site for Palette now provides a chatbot capable of answering your questions about Palette. The chatbot is available in the bottom right corner of the documentation site. You can ask the chatbot questions about Palette, and it will provide you with relevant answers and documentation links. - - - -

Packs

- - -

Pack Notes

- -- A new community pack repository is available. The Palette Community Repository allows partners and customers to contribute and share their packs. For more details, refer to the Palette Community Repository [README](https://github.com/spectrocloud/pack-central). - - -- The Spectro-VM-Dashboard pack is renamed to Virtual Machine Orchestrator. - - -- This release introduces the start of a formal maintance policy for packs. Several packs are now marked as deprecated, disabled, and deleted. A completed list of packs that are deprecated, disabled, and deleted is available in the [Deprecations and Removals](#release-4-0-packs-pack-deprecation-removals) section below. To learn more about the pack maintenance policy, refer to the [Maintenance Policy](/integrations/maintenance-policy) documentation. - - - - -The following news packs are available in Palette 4.0.0. - -
- - -

Kubernetes

- -| **Pack** | **New Version** | -|--------------------|----------| -| K3s | 1.27.2 | -| Kubernetes AKS | 1.27.0 | -| Kubernetes Coxedge | 1.25.10 | -| Kubernetes Coxedge | 1.26.5 | -| Kubernetes Coxedge | 1.27.2 | -| Kubernetes EKS | 1.27.0 | -| Kubernetes GKE | 1.24.14 | -| Kubernetes GKE | 1.25.10 | -| Kubernetes GKE | 1.26.5 | -| Kubernetes GKE | 1.27.2 | -| MicroK8s | 1.27.0 | -| Palette eXtended Kubernetes| 1.24.14 | -| Palette eXtended Kubernetes| 1.25.10 | -| Palette eXtended Kubernetes| 1.26.5 | -| Palette eXtended Kubernetes| 1.27.1 | -| Palette eXtended Kubernetes - Edge | 1.27.2 | -| RKE2 | 1.24.6 | -| RKE2 | 1.25.10 | -| RKE2 | 1.26.3 | -| RKE2 | 1.26.5 | -| RKE2 | 1.27.2 | - - -

CNI

- -| **Pack** | **New Version** | -|--------------------|----------| -| AWS VPC CNI | 1.13.0 | -| AWS VPC CNI | 1.17.0 | -| Calico | 3.25.1 | -| Calico | 3.26.0 | -| Cilium OSS | 1.14.0 | -| Flannel | 0.22.0 | - - -

CSI

- -| **Pack** | **New Version** | -|--------------------|----------| -| AWS EBS CSI | 1.17.0 | -| AWS EBS CSI | 1.20.0 | -| AWS EFS CSI | 1.5.06 | -| Azure Disk CSI | 1.26.3 | -| Longhorn CSI | 1.4.1 | -| Portworx CSI | 3.0.0 | -| Rook Ceph | 1.11.9 | -| vSphere CSI | 3.0.0 | -| vSphere CSI | 3.0.2 | - - - -

Add-on Packs

- -| **Pack** | **New Version** | -|--------------------|----------| -| AWS ALB | 2.5.1 | -| AWS Cluster Autoscaler | 1.26.3 | -| External Secrets Operator | 0.8.1| -| Image Swap | 1.5.2 | -| MetalLB | 0.13.10 | -| Nvidia GPU Operator | 23.3.2 | -| Open Policy Agent | 3.12.0 | -| Prometheus Grafana | 46.4.0 | -| Vault | 0.25.0 | - - - -

Community Packs

- -| **Pack** | **New Version** | -|--------------------|----------| -| Ngrok Ingerss Controller | 0.9.0 | - - -

FIPS Packs

- -| **Pack** | **New Version** | -|-------------------------------------------|---------------| -| AWS EBS CSI | 1.17.0 | -| AWS VPC CNI | 1.1.17 | -| Calico | 3.25.1 | -| Calico | 3.4.1 | -| Longhorn CSI | 1.4.1 | -| Palette eXtended Kubernetes | 1.24.10 | -| Palette eXtended Kubernetes | 1.24.13 | -| Palette eXtended Kubernetes | 1.24.14 | -| Palette eXtended Kubernetes | 1.25.6 | -| Palette eXtended Kubernetes | 1.25.9 | -| Palette eXtended Kubernetes | 1.25.10 | -| Palette eXtended Kubernetes | 1.26.3 | -| Palette eXtended Kubernetes | 1.26.4 | -| Palette eXtended Kubernetes | 1.26.5 | -| Palette eXtended Kubernetes | 1.27.1 | -| Palette eXtended Kubernetes | 1.27.2 | -| Palette eXtended Kubernetes - Edge (PXKE) | 1.24.13 | -| Palette eXtended Kubernetes - Edge (PXKE) | 1.25.9 | -| Palette eXtended Kubernetes - Edge (PXKE) | 1.26.4 | -| Palette eXtended Kubernetes - Edge (PXKE) | 1.27.2 | -| RKE2 | 1.24.6 | -| RKE2 | 1.25.0 | -| RKE2 | 1.25.2 | -| RKE2 | 1.25.10 | -| RKE2 | 1.26.4 | -| RKE2 | 1.26.5 | -| RKE2 | 1.27.2 | -| vSphere CSI | 3.0 | - - -

Deprecations and Removals

- -The following packs are marked as deprecated, disabled, or deleted. Refer to the [Maintenance Policy](/integrations/maintenance-policy) for more details on the deprecation and removal process. - -
- -#### Operating Systems - -| **Pack** | **Version** | **Status** | -|-------------------------------------------|-------------|--------------| -| OpenSuse Leap | 15.4 | Disabled | -| Ubuntu (For Edge) | 20.04 | Disabled | -| Ubuntu (For Edge) | 22.04 | Disabled | - -#### Kubernetes - -| **Pack** | **Version** | **Status** | -|-------------------------------------------|-------------|--------------| -| MicroK8s | 1.23 | Deprecated | -| Konvoy | 1.19.10 | Deleted | -| Konvoy | 1.19.15 | Deleted | -| Konvoy | 1.20.8 | Deleted | -| Konvoy | 1.20.11 | Deleted | -| Konvoy | 1.21.6 | Deleted | -| Kubernetes AKS | 1.22 | Deleted | -| Kubernetes AKS | 1.23 | Deleted | -| Kubernetes AKS | 1.24 | Deleted | -| Kubernetes Coxedge | 1.21.14 | Deprecated | -| Kubernetes Coxedge | 1.22.12 | Deprecated | -| Kubernetes Coxedge | 1.23.9 | Deprecated | -| Kubernetes EKS | 1.17 | Deprecated | -| Kubernetes EKS | 1.18 | Deprecated | -| Kubernetes EKS | 1.18 | Deprecated | -| Kubernetes EKS | 1.19 | Deprecated | -| Kubernetes EKS | 1.20 | Deprecated | -| Kubernetes EKS | 1.21 | Deprecated | -| Kubernetes EKS | 1.22 | Deprecated | -| Kubernetes EKS | 1.23 | Deprecated | -| Kubernetes EKSD | 1.18.9 | Disabled | -| Kubernetes EKSD | 1.19.6 | Disabled | -| Kubernetes EKSD | 1.20.8 | Disabled | -| Kubernetes EKSD | 1.21.6 | Disabled | -| Kubernetes GKE | 1.24.10 | Deleted | -| Kubernetes GKE | 1.25.7 | Deleted | -| Kubernetes GKE | 1.26.4 | Deleted | -| K3s | 1.22.13 | Deprecated | -| K3s | 1.22.15 | Deprecated | -| K3s | 1.23.10 | Deprecated | -| K3s | 1.23.12 | Deprecated | -| K3s | 1.24.6 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.0 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.4 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.5 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.6 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.7 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.8 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.9 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.10 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.11 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.12 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.13 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.14 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.15 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.19.16 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.0 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.1 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.2 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.4 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.5 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.6 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.7 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.8 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.9 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.10 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.11 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.12 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.20.14 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.21.0 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.21.1 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.21.2 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.21.3 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.21.5 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.21.6 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.21.8 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.21.10 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.21.14 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.22.7 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.22.12 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.23.4 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.23.9 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.23.16 | Deprecated | -| Palette eXtended Kubernetes (PXK) | 1.23.17 | Deprecated | -| Palette eXtended Kubernetes - Edge (PXKE) | 1.22.15 | Deprecated | -| Palette eXtended Kubernetes - Edge (PXKE) | 1.23.12 | Deprecated | -| Palette eXtended Kubernetes - Edge (PXKE) | 1.24.6 | Deprecated | -| RKE2 | 1.22.12 | Disabled | -| RKE2 | 1.22.13 | Deprecated | -| RKE2 | 1.22.15 | Deprecated | -| RKE2 | 1.23.9 | Disabled | -| RKE2 | 1.23.10 | Deprecated | -| RKE2 | 1.23.12 | Deprecated | -| RKE2 | 1.24.6 | Deprecated | - - - -### CNI - -| **Pack** | **Version** | **Status** | -|---------------|---------------|--------------| -| Calico | 3.9 | Deprecated | -| Calico | 3.10 | Deprecated | -| Calico | 3.16 | Deprecated | -| Calico | 3.19 | Deprecated | -| Calico | 3.22 | Deprecated | -| Cilium OSS | 1.10.9 | Deprecated | -| Cilium OSS | 1.12.3 | Deprecated | -| Cilium OSS | 1.12.4 | Deprecated | -| Flannel CNI | 0.10.0 | Deprecated | - - - -#### CSI - -| **Pack** | **Version** | **Status** | -|---------------------------------------------|--------------|--------------| -| AWS EBS CSI | 1.0.0 | Deprecated | -| AWS EBS CSI | 1.5.1 | Deprecated | -| AWS EBS CSI | 1.8.0 | Deprecated | -| AWS EBS CSI | 1.10.0 | Deprecated | -| AWS EBS CSI | 1.12.0 | Deprecated | -| AWS EFS CSI | 1.3.6 | Deprecated | -| Azure CSI Driver | 1.20.0 | Deprecated | -| Azure Disk | 1.0.0 | Deprecated | -| GCE Persistent Disk CSI | 1.7.1 | Deprecated | -| GCE Persistent Disk | 1.0.0 | Deprecated | -| Openstack Cinder | 1.18 | Deprecated | -| Openstack Cinder | 1.19 | Deprecated | -| Openstack Cinder | 1.20 | Deprecated | -| Openstack Cinder | 1.21 | Deprecated | -| Openstack Cinder | 1.22 | Deprecated | -| Openstack Cinder | 1.23 | Deprecated | -| Portworx CSI AWS | 2.9.0 | Deprecated | -| Portworx CSI AWS | 2.10 | Deprecated | -| Portworx CSI GCP | 2.6.1 | Deprecated | -| Portworx CSI Generic | 2.11.2 | Deprecated | -| Portworx CSI Generic | 2.11.4 | Deprecated | -| Portworx CSI Vsphere | 2.8.0 | Deprecated | -| Portworx CSI Vsphere | 2.9.0 | Deprecated | -| Portworx CSI Vsphere | 2.10 | Deprecated | -| Rook-Ceph CSI | 1.5.9 | Deprecated | -| VSphere CSI | 1.0.0 | Deprecated | -| VSphere CSI | 2.3.0 | Deprecated | -| VSphere CSI | 2.5.2 | Deprecated | -| VSphere Volume | 1.0.0 | Deprecated | - - - - - -#### Add-on - -| **Pack** | **Version** | **Status** | -|-------------------------------|-----------------|--------------| -| AWS Cluster Autoscaler | 1.0.0 | Deprecated | -| AWS EFS Addon | 1.3.6 | Deprecated | -| Dex | 2.21.0 | Deprecated | -| Dex | 2.25.0 | Deprecated | -| Dex | 2.28.0 | Deprecated | -| External DNS | 0.7.2 | Deprecated | -| External Secrets | 8.5.0 | Deprecated | -| External Secrets Operator | 0.5.6 | Deprecated | -| External Secrets Operator | 0.6.0 | Deprecated | -| Hashicorp Vault | 0.3.1 | Deprecated | -| Hashicorp Vault | 0.6.0 | Deprecated | -| Hashicorp Vault | 0.9.0 | Deprecated | -| Hashicorp Vault | 0.11.0 | Deprecated | -| Hashicorp Vault | 0.17.1 | Deprecated | -| Hashicorp Vault | 0.20.1 | Deprecated | -| Image Swap | 1.4.2 | Deprecated | -| Istio | 1.6.2 | Deprecated | -| Kong | 1.4 | Deprecated | -| Kubernetes Dashboard | 2.0.1 | Deprecated | -| Kubernetes Dashboard | 2.1.0 | Deprecated | -| Kubernetes Dashboard | 2.4.0 | Deprecated | -| Kubernetes Dashboard | 2.5.1 | Deprecated | -| MetalLB | 0.8.3 | Deprecated | -| MetalLB | 0.9.5 | Deprecated | -| Nginx | 0.26.1 | Deprecated | -| Nginx | 0.43.0 | Deprecated | -| Nginx | 1.0.4 | Deprecated | -| Nginx | 1.2.1 | Deprecated | -| Nginx | 1.3.0 | Deprecated | -| Nvidia GPU Operator | 1.9.1 | Deprecated | -| Open Policy Agent | 3.5.1 | Deprecated | -| Open Policy Agent | 3.6.0 | Deprecated | -| Palette Upgrader | 3.0.51 | Deprecated | -| Palette Upgrader | 3.0.70 | Deprecated | -| Palette Upgrader | 3.0.95 | Deprecated | -| Palette Upgrader | 3.1.26 | Deprecated | -| Portworx Generic Addon | 2.11.2 | Deprecated | -| Portworx Generic Addon | 2.11.4 | Deprecated | -| Prometheus Operator | 12.3.0 | Deprecated | -| Prometheus Operator | 19.2.3 | Deprecated | -| Prometheus Operator | 30.0.3 | Deprecated | -| Prometheus Operator | 30.2.0 | Deprecated | -| Prometheus Operator | 35.5.1 | Deprecated | -| Prometheus Operator | 37.2.0 | Deprecated | -| Reloader | 0.0.104 | Deprecated | -| Spectro Proxy | 1.0.0 | Deprecated | -| Spectro Proxy | 1.1.0 | Deprecated | - - - - - -# May 22, 2023 - Release 3.4.0 - -Palette 3.4.0 has various security upgrades, better support for multiple Kubernetes environments, a new cluster deployment platform, and increased user customization options for Palette, Edge, and Palette Dev Engine. Additionally, it includes updates for several packs and stops supporting Kubernetes 1.23 in Azure Kubernetes Service (AKS). You can find upgrade notes for self-hosted Palette 3.4 in the [Upgrade Notes](/enterprise-version/upgrade#palette3.4) documentation. - - -## Palette - -### Breaking Changes - -- Installations of self-hosted Palette in a Kubernetes cluster now require [cert-manager](https://cert-manager.io/docs/installation/) to be available before installing Palette. Cert-manager is used to enable Mutual TLS (mTLS) between all of Palette's internal components. Refer to the prerequisites section of [Installing Palette using Helm Charts](https://docs.spectrocloud.com/enterprise-version/deploying-palette-with-helm/) guide for more details. - - -- Self-hosted Palette for Kubernetes now installs Palette Ingress resources in a namespace that Palette manages. Prior versions of Palette installed internal components ingress resources in the default namespace. Review the [Upgrade Notes](/enterprise-version/upgrade#palette3.4) to learn more about this change and how to upgrade. - - -### Features - -- Palette's tenant administrators now have the ability to set up a personalized login banner for both the system and tenant levels. Refer to the [Login Banner](/tenant-settings/login-banner) reference page to learn more. - - -- You can now access a customized Amazon Machine Image (AMI) in Palette for Amazon Elastic Kubernetes Service (Amazon EKS) with support for AWS Launch Template. This allows you to personalize your EKS nodes and EBS root volumes by creating your own custom AMI. - - -- Palette now supports using IAM Roles for Service Accounts (IRSA) for AWS clusters. Enable the Palette managed policy *Controllers EKS Policy* to enable this feature. Refer to the [AWS Required Policies](/clusters/public-cloud/aws/required-iam-policies) for more information about the managed policy. - - -- You can now deploy clusters in the Google Kubernetes Engine (GKE) environment with Palette. Use the [Create and Managed GCP GKE Cluster](/clusters/public-cloud/gcp/create-gcp-gke-cluster) guide to learn how to deploy clusters to GKE with Palette. - - - -- Palette now supports the ability for you to use image swap to override specific registries, images, or a combination of both. You can add an `imageSwap` configuration to the Kubernetes pack YAML to point to a different registry or image. Check out the [Image Swap](/clusters/cluster-management/image-swap) reference resource to learn more. - - - -- Deploying a host cluster to AWS with Red Hat Enterprise Linux (RHEL) as the Operating System (OS) is now possible. This can be done by utilizing the *Bring Your Own Operating System* (BYOOS) pack, which allows for the creation of a custom AMI based on RHEL. - - -### Improvements - -- OpenID Connect (OIDC) identity provider configuration has now moved to the Kubernetes layer. You can now select the desired OIDC setting when selecting a Kubernetes distribution and version during a cluster profile creation. - - -- New macros for gathering attributes about a cluster profile, such as name, uid, and version, are now available. Refer to the [Macros Supported Variables](/registries-and-packs/pack-constraints?System%20Macros=system_macros_syntax#supportedvariables) documentation to learn more. - - -- Cluster profiles can now be filtered by scope such as Tenant and project. - - -- The tenant administrator dashboard now displays the cluster usage and cost information at the tenant scope. - - -- The Cox Edge cluster deployment wizard now populates a Point of Presence (PoP) list to help you select the geographical deployment target. - - -- As a tenant administrator, you can now quickly review all Edge hosts that are deployed in your tenant and quickly identify which project they belong to. - - -- The Cox Edge provider has been updated to support worker nodes' load balancers and customizable volume mounts for virtual machines. - - -- The Metal as a Service (MAAS) provider has been updated to improve the node reconciliation behavior. In scenarios where the Machine Intelligent Platform Management Interface (IPMI) is powered off, the machine is powered on instead of provisioning a new node. - - - -### Bug Fixes - -- A bug that caused issues with the deletion of a cluster's profile manifest has been successfully fixed. Manifests are now correctly deleted when removed from a cluster profile. - - -- The problem with Palette not removing namespaces when removing a layer from a cluster profile has been resolved. - - - -- You can now configure the behavior of the Palette agent to disable sending workload reports to the Palette control plane. This addresses scenarios where large clusters with many nodes exceed the 1 MB payload threshold, resulting in agent failures. Refer to the [Nodes Troubleshooting](/troubleshooting/nodes#paletteagentsworkloadpayloadsizeissue) for guidance on disabling the workload report feature. - - -## Edge - -### Breaking Changes - -- To enhance the security of Edge deployments, a tenant [registration token](/clusters/edge/site-deployment/site-installation/create-registration-token) created by the Tenant administrator is now required for pairing an Edge host with Palette. However, you can continue to use the auto registration, QR code, and manual registration methods available today. Refer to the [Register Edge Host](/clusters/edge/site-deployment/site-installation/edge-host-registration) documentation to learn more about Edge registration methods. - - -- Prior Edge Installer versions are incompatible with Palette 3.4 and newer versions due to product enhancements and security fixes. New Edge clusters deployed with an earlier Edge Installer version will not operate in Palette. Active Edge clusters in Palette will continue to work as expected. Use the latest version of the [Edge Installer](/spectro-downloads#edgeinstallerimage) when creating Edge artifacts and deploying new Edge clusters. - -### Features - - - -- You can now assign a static IP address to an Edge host during deployment. Previously, you could only assign a static IP address through the user-data configuration file. You can now set a static IP address by using the user-data configuration file, the Palette API, Terraform, or the Palette dashboard during the Edge host cluster creation wizard. - - -- An Edge host ID can now be sourced directly from system files exposed by the BIOS or firmware via the [Desktop Management Interface](https://www.dmtf.org/standards/dmi) (DMI). Ensure that the system file is not empty and does not contain special characters, and the Edge installer will use the value in the file as the Edge host ID. This is an advanced feature and is not required for setting a device ID. - - - -- To deploy an Edge host device, use the Edge Forge workflow. The workflow allows you to customize the Edge Installer, include a user-agent configuration file, preload content bundles, and perform other functions according to your preferences. Visit the [Edge Forge workflow](/clusters/edge/edgeforge-workflow) page to learn more. - - -### Improvements - - -- The events log stream for the Edge host cluster now includes audit messages and critical errors. Logs for an individual Edge host are now also accessible. The Edge host logs are helpful in debugging and monitoring the deployment process of an Edge host. - - -- The upgrade process for Edge cluster has been optimized to avoid extra reboots of the Edge host, whether it's for upgrading the OS or the Kubernetes version. - - - -- The latest Kairos release information is now appended to the **/etc/os-release** file. Unlike previous versions of Pallette, the Kairos release information no longer replaces the entire content of the OS's release file. This change prevents any issues that may arise with tools like Nvidia's GPU Operator due to the previous overwrite behavior. - - -- The Palette dashboard now displays Edge host clusters undergoing an upgrade process.. - - -## Palette Dev Engine (PDE) - - -## Features - -- Palette PDE is now available in self-hosted installation of Palette. - - -- PDE now has a Command Line Interface (CLI) that you can use for programmatic access to PDE resources. Users can perform actions such as create, list, delete, resize, pause, and resume virtual cluster. You can also download the kubeconfig file of a virtual cluster with the CLI. Refer to the [Palette CLI](/palette-cli/install-palette-cli) documentation page to learn more. - - -## Improvements - -- Container applications that expose a service now automatically receive ingress support with HTTPS support out-of-the-box. This means exposed service URLs automatically receive dynamic SSL certificates used for HTTPS. - - -- You can now access a [new dashboard](/devx#manageresources) to better understand your virtual clusters, app profiles, deployed apps, and resource utilization. The dashboard provides a comprehensive overview of critical metrics and more. - - -- You can now increase or decrease the number of replicated instances of a container service. Check out the [Container Deployment](/devx/app-profile/container-deployment) guide to learn more about containerized deployments. - - -## Terraform - -- Version 0.14.0 of the [Spectro Cloud Terraform provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) is available. Refer to the Terraform provider [release page](https://github.com/spectrocloud/terraform-provider-spectrocloud/releases) for more details. - -## Packs - -### Operating System Packs - -| Pack | New Version | -|--------------------|----------| -| COS GKE | 1.0.0 | -| Edge Native BYOI | 1.0.0 | -| SLES Libvirt | 15.4.1 | -| SLES vSphere | 15.4.1 | -| Ubuntu OpenStack | 22.04 | - - -### Kubernetes Packs - -| Pack | New Version | -|--------------------|----------| -| Edge k3s | 1.25.2 | -| Edge k8s | 1.25.2 | -| Edge microk8s | 1.25 | -| Edge RKE2 | 1.25.2 | -| Kubernetes | 1.26.3 | -| Kubernetes EKS | 1.26 | -| Kubernetes GKE | 1.25.8 | - - -### CNI Packs - -| Pack | New Version | -|------------------------|----------| -| CNI Calico | 3.25.1 | -| CNI Calico Azure | 3.25.1 | -| CNI Cilium | 1.13.2 | -| CNI VPC Native GKE | 1.0.0 | -| CNI Flannel | 0.21.4 | -| CNI Cilium Tetragon | 0.9.0 | - - -### CSI Packs - -| Pack | New Version | -|----------------------------|----------| -| CSI AWS EBS | 1.17.0 | -| CSI GCP Persistent Driver | 1.10.1 | -| CSI Longhorn | 1.4.1 | -| CSI OpenStack Cinder | 1.26 | -| CSI Portworx Generic | 2.12.0 | -| GKE CSI GCP Driver | 1.0.0 | -| CSI vSphere | 3.0.0 | - - - -### Add-on Packs - -| Pack | New Version | -|------------------------|----------| -| Nvidia GPU Operator | 22.9.2 | -| AVI Kubernetes Operator| 1.9.2 | -| Istio | 1.17.2 | -| Cloudanix | 1.0.0 | -| CSI Longhorn | 1.4.1 | -| CSI Topolvm | 11.1.1 | -| External DNS | 0.13.4 | -| Flux CD | 2.6.0 | -| Kong | 2.17.0 | -| Nginx | 1.7.0 | -| Palette Upgrader | 3.3.16 | -| Portworx | 2.13.0 | -| Prometheus Agent | 19.0.2 | -| Prometheus Operator | 45.25.0 | -| Reloader | 1.0.24 | -| Spectro k8s Dashboard | 2.7.1 | -| HashiCorp Vault | 0.24.1 | - - -### Pack Notes - -- The CNI Calico pack version 3.25.1 is now available which contains support for IPV6 CIDR support has been added. - -- The Nvidia GPU Operator pack is available and can be used to install Nvidia GPU drivers on Nvidia hardware. - -- The AVI Kubernetes Operator (AKO) pack is now available. You can use this pack to provide L4-L7 load balancing for applications deployed in a Kubernetes cluster for north-south traffic network traffic. This pack is only available for VMware vSphere. - -- The vSphere CSI pack version 3.0.0 is available. This versions supports the usage of custom images for vSphere pods. - - -- The CNCF Kubernetes pack is renamed to Palette eXtended Kubernetes. - - -- Kubernetes versions before 1.24 are no longer supported for host clusters targeting Azure Kubernetes Service (AKS). This deprecation is due to Azure's Kubernetes support policy. You can learn more about Azure-supported Kubernetes versions [here](https://learn.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli). - - -## Education - -- Learn how to create a custom pack and how to deploy the custom pack to a Palette registry server with the [Create and Deploy a Custom Add-On Pack](/registries-and-packs/deploy-pack) tutorial. - - -- An introductory tutorial on deploying a Palette-managed cluster to public cloud providers is now available. Learn to deploy a host cluster with Palette using the Palette user interface or Terraform. Check out the [Deploy a Cluster](/clusters/public-cloud/deploy-k8s-cluster) tutorial to get started - - -# March 19, 2023 - Release 3.3.0 - -This release contains several security fixes and new features for Edge. The Edge installation process has been improved to allow users greater flexibility and more control over the installation process. - -## Palette - -### Enhancements: - -* Users can now download all the clusters listed when applying a filter to the clusters list. - -## Edge - -### Features: - -* Edge now supports the ability to load images from an external OCI registry. -* The Edge Installer can now include preloaded content bundles containing packages and artifacts. This is useful for scenarios where you work with limited internet bandwidth or want to optimize the installation process. -* Users can now [create custom Edge Installer images](/clusters/edge/edgeforge-workflow/palette-canvos) to support advanced scenarios such as Bring Your Own Operating System (BYOOS), installing additional OS packages, preloading content into the installer, and more. -* Support for creating Virtual Machine Disks (VMDK) from the Edge installer ISO is now available. Use this to simplify deployments into VMware-based environments. -* Support for generating random UUID values for the Edge host is now available. This addresses the issue of some devices having the same Universal Unique Identifier (UUID) due to identical device identifiers. - -## Packs - -* CNI Packs: - * Calico CNI 3.25.0 -* CSI Packs: - * EBS CSI 1.16.0 - * vSphere CSI 2.7.0 -* Add-on Packs: - * Flux v2 2.6.0 - * Prometheus Operator 45.4.0 - * MetalLB 0.13.9 - * Spectro Proxy 1.3.0 - -# February 28, 2023 - Release 3.2.0 - -Release 3.2 introduces support for a new public cloud provider, Cox Edge. Other highlights include a streamlined experience for installing the Kubernetes Dashboard in a cluster, a new security scan, auto registration capabilities for edge devices, new [out-of-the-box services](/devx/app-profile/services/service-listings), and many other product enhancements. - -## Palette - -### Features - -* Support for the [Cox Edge](/clusters/public-cloud/cox-edge/) cloud provider is now available in Palette. -* Palette introduces a new user sign-in flow for users who previously created an account through SSO and who are a member of different organizations. Palette prompts you to select the organization to log in to. If you need help remembering, you can retrieve it using “Forgot your organization name?”. -* Palette now provides a streamlined experience for users when installing [Kubernetes dashboard](/integrations/spectro-k8s-dashboard). When adding Kubernetes dashboard as a pack, Palette displays relevant configuration items directly in the pack UI. -* Palette now auto-cleans deleted clusters, deployments, cluster profiles, cloud accounts, edge hosts, and other resources. Users can expect auto cleanup to take approximately 15 minutes. -* Additional filtering options are available to apply to clusters. Users can filter by region and country with pre-populated values based on cluster information and by ‘Unknown’ state. -* Palette now provides a way to search and filter private cloud gateways (PCGs) by resource tag. -* Palette provides the ability to schedule OS patching for enterprise clusters and PCGs. OS patching applies to clusters that have a master pool with multiple nodes. -* Palette provides a **tag.update** permission that can be assigned to user roles that allows modifying resource tags. -* Palette introduces a Software Bill of Materials [(SBOM) scan](/clusters/cluster-management/compliance-scan/#sbom:dependencies&vulnerabilities) capability that can be invoked manually or scheduled to run on tenant clusters. Multiple output formats are available. -* Palette offers two new app services: CockroachDB and HashiCorp Vault. -* Palette provides access to configuration and status [logs for each application](/devx/apps/logs/). -* Palette now allows you to revise the order of layers as you create an app profile. -* Virtual clusters now support the ability to [back up all disk volumes](/clusters/cluster-groups/cluster-group-backups) within the cluster. -* A system cluster profile named **nginx-ingress** is now available to help users [set up ingress endpoints](/clusters/cluster-groups/ingress-cluster-group) for cluster groups. - -### Enhancements - -* [Cluster groups](/clusters/cluster-groups) that were previously supported only at the tenant scope are now supported at the project scope. -* Palette has improved the launch time for virtual clusters. -* [Virtual clusters can be resized](/devx/palette-virtual-clusters/resize-virtual-clusters) from the default to a size that does not exceed the system-level quota for a cluster group like Beehive or the user quota for tenant-level cluster groups. -* Virtual clusters now display a progress status during the creation phase. -* The App profile container service layer contains additional [output variables](/devx/app-profile/app-profile-macros#containerserviceoutputvariables) to help services connect. Refer to the [service connectivity](/devx/app-profile/services/connectivity) document for additional guidance. -* We optimized the Spectro Cloud Postman [collection](/api/postman-collection) to circumvent a nested levels [bug](https://github.com/postmanlabs/postman-app-support/issues/10928) in Postman. - -### Deprecations - -* Enabling virtual clusters on host clusters is deprecated. Use [cluster groups](/clusters/cluster-groups) to enable virtual clusters moving forward. Cluster groups are also now supported at the [project](/projects) scope. - -## Edge - -### Features - -* Palette provides the ability to automatically register edge hosts for a specific project when a host authentication token is specified in **Tenant Settings > Registration Tokens**. - -* Bring Your Own OS (BYOS) support. - -## Packs -* OS packs: - * Ubuntu 22.04 on AWS, Azure, GCP -* K8s packs: - * Support for K8s 1.26.1 - * Support for K8s 1.25.6 - * Support for K8s 1.24.10 - * Support for K8s 1.23.16 - * Support for Tencent TKE 1.0.0 on VMware -* CNI Packs: - * Calico CNI 3.24.5 - * Cilium CNI 1.12.6 - * Antrea CNI for VMware 1.9.0 -* CSI Packs: - * EFS CSI 1.4.9 - * Azure Disk CSI 1.25.0 - * GCE Persistent Disk CSI 1.8.2 - * Rook-Ceph CSI 1.10.0 -* Add-on Packs: - * Kong Ingress 2.13.1 - * K8S Dashboard 2.7.0 - * External DNS 0.13.1 - * Open Policy Agent 3.11.0 - * Reloader 0.0.129 - * External 0.7.1 - * Vault 0.23.0 - * Nginx Ingress 1.5.1 - * AWS Application Load Balancer 2.4.6 - * Prometheus Operator 44.3.0 - * Bring Your Own OS (BYOS) pack 1.1.0 - * Spectro Proxy 1.2.0 - -
- - - - -# December 28, 2022 - Release 3.1.0 - -Palette 3.1 is released with support for AWS GovCloud, FIPS compliant PXK, and PXK-E Kubernetes versions. This release also features Autoscalers for IaaS clusters, FIPS enablement at the scope level, cluster tagging, and the ability to use tags for resource filtering and access control. The Palette Developer Experience (PDE) product also contains several enhancements that improve the user experience, such as the ability to pause and resume clusters, new services for app profiles, and more. - -## Palette - -### Upgrade Notes: - -* MaaS cluster's initialization configuration has been updated to disable memory swap. This will result in MaaS cluster nodes becoming repaved when applying the new configuration. - -### Features: - -* Palette supports integration with [AWS GovCloud services](/clusters/public-cloud/aws/add-aws-accounts#prerequisites) to meet the compliance mandates for safeguarding sensitive data by strengthening identity management, improving cloud visibility, and protecting accounts and workloads to support mission-critical workloads for government customers. -* [Autoscaling](/clusters/cluster-management/node-pool#workerpool) capabilities for Palette IaaS clusters to ensure better availability and cost management for dynamic workloads. -* Palette is now compliant with FIPS compliance and provides a [FIPS-compliant](/compliance#fips140-2) version of Kubernetes (PXK and PXK-E). Palette FIPS support is extended at the platform Level with the tenant and project Scope and cluster level with FIPS compliant infrastructure layer cluster profiles. -* Palette supports tagging and the ability to filter user [access](/clusters/cluster-management/cluster-tag-filter) and [visibility](/clusters/cluster-management/noc-ui#monitoryourclusterlocation) to clusters using tags. You can filter geographically dispersed clusters in the Palette map view and list view using [flexible filters](/clusters/cluster-management/noc-ui#mapfilters) to have a granular view of cluster information. -* Palette supports app profile versioning. Versioning enables users to create multiple [versions of an App Profile](/devx/app-profile/versioning-app-profile#appprofileversioning) within the scope of a single profile name. -* Palette supports the [cloning](/devx/app-profile/app-profile-cloning#cloneappprofiles) of App Profiles across multiple projects. For example, you can clone an app profile created under a specific project to another project within the same tenant. -* Palette Dev Engine supports the manual and system update of an [App Profile](/devx/app-profile/versioning-app-profile#appprofileversioning). You can verify the update notification and apply the changes to the Apps. -* Palette app mode now supports the use of [containers](/devx/app-profile#services). You can specify containers when creating an app profile. -* Palette leverages [Helm and OCI registries](/devx/manage-dev-engine/registries) for custom pack management. -* Palette provides [out-of-the-box](/devx/app-profile#messagingsystemservices) support for application services such as Kafka, MySQL, NATS, and more for Palette Dev Engine. These services can be specified when creating an App Profile. -* Palette allows you to [pause and resume](/devx/palette-virtual-clusters/pause-restore-virtual-clusters#overview) virtual clusters that are not in use. This adds significant flexibility in managing the operating costs and optimizing resource management for virtual clusters. - -### Enhancements: - -* [OS patch reboot](/clusters/cluster-management/os-patching#rebootifrequired) allows clusters to reboot to apply system updates if required. - -* Palette Tencent clusters now support using [security groups](/clusters/public-cloud/tke#deployatencentcluster) for network isolation and improved security. - -* Reduced launch time when creating Palette Virtual Clusters. - -* Palette Virtual Clusters now support ephemeral storage. - -### Deprecations: - -* Deprecated API : GET/v1/dashboard/projects , new API: POST /v1/dashboard/projects - -* Deprecated API: POST /v1/dashboard/spectroclusters , new API: POST /v1/dashboard/spectroclusters/search - -### Known Issues: - -* Palette does not allow scaling of control plane nodes for the Microk8s pack. The workaround is to remove the scaling limit of the control plane. - -* Currently, Microk8s does not support an out-of-box service load balancer. - * Work Around: To avoid this, you can install the [AWS Application Load Balancer](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/service/nlb/) pack. The packs containing service type as a load balancer will require annotation and `loadBalancerClass` changes. - -## Edge - -### Features: - -* Palette supports the provision of [MicroK8s clusters](/integrations/microk8s#microk8soverview). Microk8s deployments are quick and ideal when creating disposal Kubernetes clusters. The MicroK8s pack supports automatic updates, security configuration, and the ability to self-update Kubernetes dependencies. - -## [Spectro Image Updates](/spectro-downloads#on-premartifacts): - -* Private Cloud Gateway Installer updated to version 1.4.0. -* On-Prem Installer updated to version 2.4.0. -* Air Gap Repo Appliance updated to version 2.1.0. -* EDGE Installer version 2.2.23. - -## [Packs and Integrations](/integrations): - -* csi-longhorn version 1.3.1 -* csi-longhorn-addon version 1.3.1 -* kafka-operator version 0.32.0 -* mysql-operator version 0.6.2 -* nats-operator version 0.18.2 -* palette-upgrader version 3.0.70 -* palette-upgrader version 3.0.51 -* spectro-k8s-dashboard version 2.6.0 - - -# October 24, 2022 - Release 3.0.0 - -Spectro Cloud Palette 3.0.0 is released with [Native Edge](/clusters/edge), [Palette Dev Engine](/devx), [NOC-UI](/clusters/cluster-management/noc-ui), and many more exciting capabilities. - -**Features** - -* A new set of capabilities that improve the [developer experience](/devx) are introduced in this release: - * Rapid Application deployment with a smooth onboarding experience. - * RBAC with a developer-centric view. - * System scope resource quota. - * System Scope Cluster groups to host [Palette Virtual Clusters](/clusters/palette-virtual-clusters). - * Out-of-the-box application profiles and applications deployment with Palette Virtual Clusters. - * Application profiles can consists of Helm charts, Manifests, and Database services such as MongoDB, Redis, PostgreSQL - -* The Palette [Native Edge](/clusters/edge#edgenative) architecture is an instance of Palette Edge Distribution. The Palette Edge instance is based on the desired version of Operating System-Kubernetes installed natively onto the edge devices. All the Day 1 and Day 2 Operations starting from Installation to Scaling, Upgrades, and Reconfigurations, will be managed by the Palette Console. - -* Palette provides intuitive, location-based UI that monitors clusters with [NOC-UI](/clusters/cluster-management/noc-ui). - -* Palette enterprise mode production clusters can be backed up to [Azure Blob storage](/clusters/cluster-management/backup-restore#forazureblobbackup) for convenient restoration. - -* Palette provisions cluster monitoring with [Kubernetes Dashboard](/integrations/spectro-k8s-dashboard) exposed to external traffic using [Spectro Proxy](/integrations/frp) pack with RBAC authentication. - -**Enhancements** - -* Palette enables the provisioning of private Azure Kubernetes Clusters (AKS) clusters within Azure Virtual networks (VNet) for enhanced security by offloading the orchestration to a [Private Cloud Gateway](/clusters/public-cloud/azure/gateways) deployed within the same account as the private AKS clusters. - -* Operators can now customize the [pod limit](https://learn.microsoft.com/en-us/azure/aks/) for AKS clusters. Customize the pod limit value from the Kubernetes configuration [file](/clusters/cluster-management/node-pool) at any time by editing the `maxPodPerNode` value. - -* The Kubernetes Packs for [Edge Native](/clusters/edge/architecture#kubernetesdefaults) deployments disable a few items by default to allow users to install those items independently or to avoid duplication. - -* The latest Palette Terraform releases, [Module 0.4.1 and Module 0.10.1](/terraform), support: - * Native Edge clusters - * Palette Virtual Clusters - * Fixes towards [Enhancements](/terraform#changes) - -**Packs and Integrations** - -* Dex version 2.35.1 -* Harbor version 1.9.3 -* Istio version 1.14.3 -* Image Swap version 1.5.1 -* Generic-VM Libvirt version 1.0.1 -* Generic VM vSphere version 1.0.3 -* Tekton-chains version 0.12.0 -* Tekton-operator version 0.61.0 -* K3s version 1.24.4 -* Spectro Proxy version 1.1.0 -* External DNS version 0.12.2 -* MetalLB-version version 0.13.5 -* Reloader version version 0.0.118 -* AWS Cluster Autoscaler version 1.22.2 -* Fluentbit version 1.9.6 -* Kubernetes dashboard version 2.6.1 -* Calico version 3.24 -* Cert-Manager version 1.9.1 -* Open Policy Agent version 3.9.0 -* AWS EBS CSI version 1.10.0 - -**Known Issues** - -* While deploying multiple apps in a Palette Virtual Cluster, if the deployment of one of the apps is blocked due to errors, then subsequent apps deployed to the same virtual cluster might also be stuck in deploying state. Apply the following workarounds if you encounter the issue. - - * Delete the stuck App. - * Fix the App with the error. - * Redeploy the App again. -# September 10, 2022 - Release 2.8.0 -Spectro Cloud Palette 2.8.0 is now available with the support of Palette Virtual Clusters, Web-Based Kubectl, Import and Export of Profiles, Terraform Releases, and many exciting enhancements. - -**Features** -* Palette now supports lightweight, cost-effective, secure, and resource-efficient [Palette Virtual Clusters](/clusters/palette-virtual-clusters) to rapidly create securely-isolated environments for applications without the infrastructure and operational overhead of additional Kubernetes clusters. -* Palette leverages web-based [Kubectl](/clusters/cluster-management/palette-webctl#overview) for the users to deploy applications, inspect and manage cluster resources, and view logs via the Palette terminal without an external terminal. -* Palette enables the reuse and sharing of large profiles with many add-ons and integrations to be [exported and imported](/cluster-profiles/cluster-profile-import-export#overview) across multiple environments, projects, and tenants. -* Palette customers can now provision the fully conformant Kubernetes distribution [RKE2](/integrations/rke2#rke2overview) focusing on security and compliance. -* The latest Palette Terraform releases, [Module 0.2.3 and Module 0.3.0](/terraform#moduletoprovidercompatibilitymatrix), focus on: - * Cluster resource tagging - * Static placement of AKS clusters - * VMware cloud-type support of Terraform modules - * Image template support - -**Enhancements** -* Palette upgrades the vSphere Private Cloud Gateways and On-Prem cluster specifications to newer versions: - - * K8s version has been upgraded from 1.21 to 1.22.12 [ the latest version in 1.22 ] -  - * The storage layer has been upgraded from 2.3 to 2.5.2 to fix volume attachment issues -   - * Ubuntu OS has been upgraded from LTS 18.04 to LTS 20.04 -   - * The PCG and On-Premise images now have all the latest OS patches updated - -* Palette enables [Cluster(s) Lock](/clusters/cluster-management/palette-lock-cluster#overview) to restrict the cluster(s) under the Tenant, Project, or single cluster from being upgraded from cluster management services upgrade on the upgrade of the Palette. -* Palette feeds observability of [OS patching details](/clusters/cluster-management/os-patching#monitoring) such as `Last Applied Patch Time` and `The date and time of the last OS Patch.` -* Palette boards the mapping between cluster profiles and clusters in cluster profiles details UI page listing the clusters created using a specific cluster profile. -* Palette promotes VNet Resource Group filtering for AKS clusters, allowing the VNet to be a part of a different resource group than the AKS resource group. -* Palette enables the users to override the [custom folder](/clusters/data-center/vmware#deployingavmwarecluster) for vSphere templates, in addition to the default image template folder, `spectro-templates` for the vSphere environment. -* [Regex Expression](/workspace#regexfornamespaces) for mass selection of workspace names for role binding. -* Palette also leverages the single sign-on, using SAML/OIDC integration with [Google Identity](/user-management/saml-sso#oidcbasedsso). -* Palette enables the customers to optionally disable the [OIDC associate provider](/clusters/public-cloud/aws/eks) for EKS clusters if the service provider restricts the cluster deployment in OIDC enabled state. -* Tenant administrators can now set the [Palette resource limits](/user-management/palette-resource-limits#setresourcelimit) though the Palette console. -* Palette provisions user's [infrastructure privacy](/clusters/public-cloud/azure#deployinganazurecluster) for the Azure cloud account. - -**Deprecations** - -* **API Deprecations** - - * Deprecated API: `GET /v1/clusterprofiles`
- New API : `POST /v1/dashboard/clusterprofiles` with better filter support - * Deprecated API: `GET /v1/projects`
- New API : `POST /v1/dashboard/projects` with better filter support - * Deprecated API: `GET /v1/spectroclusters`
- New API : `POST /v1/dashboard/spectroclusters` with better filter support - * Deprecated API: `GET /v1/spectroclusters/{uid}/packs/{packName}/config`.
- New API : `GET /v1/spectroclusters/{uid}/profiles/{profileUid}/packs/{packName}/config` with multiple cluster profiles support within cluster, the profileUid is required to locate a uniquely within the cluster - - -* **Pack Deprecations:** - - * Azure Kubernetes Services (AKS) 1.21 - -**Packs and Integrations** - -* Nginx 1.3.0 -* Thanos - 10.5.3 -* EFK - 7.17.3 -* Kubernetes Dashboard - 2.6.0 -* Vault - 0.20.1 -* Calico - 3.23 -* Calico for Azure - 3.23 -* AWS EBS CSI - 1.8.0 -* AWS EFS - 1.4.0 -* AWS EFS -addon - 1.4.0 -* gce-pd-csi-driver-v1.7.1 -* Portworx-generic-addon-v2.11.2 -* Portworx-generic-v2.11.2 -* vSphere_csi_2.5.2 - -**Known Issues** - -* AKS Clusters in v1beta1 environment gives an empty report for Kubernetes Conformance Testing (Sonobuoy scan). -* OS Patch information not getting displayed for clusters with os patch scheduled on boot. - - - -# July 17, 2022 - Release 2.7.0 -Spectro Cloud Palette 2.7 is released with advanced features supporting Windows Worker Node Pools, Canonical Ubuntu Advantage, Cluster Migration from Private Cloud Gateway, enhanced Workspace, and more. - -**Features:** -* Spectro Cloud Palette has enhanced the import cluster functionality with ["Read-Only Mode"](/clusters/imported-clusters/cluster-import) mode and the "Full Permission Mode" mode. Users can start exploring Palette by importing a cluster in a minimal model without granting the full administrative set of permissions. Over time, users can grant additional permissions to manage Day 2 operations. -* Palette now supports [Windows worker nodes](/clusters/public-cloud/azure) in addition to the Linux worker nodes for Azure Kubernetes Services (AKS) clusters. -* Palette ensures Security and OS patching benefits with [Canonical's Ubuntu Advantage](/integrations/ubuntu#ubuntuadvantage) for Infrastructure subscription with Ubuntu as an OS layer for multiple operating environments. -* Automatically scale the workload resources of your Azure Kubernetes Services (AKS) clusters with [AKS Autoscaler](/clusters/public-cloud/azure) to meet the dynamic user workloads. -* Palette leverages the Container Storage Interface (CSI) and Container Network Interface (CNI) layers using Helm Chart in addition to manifest-based deployment. -* Palette introduces a well-defined [color scheme to monitor](/clusters/cluster-management/pack-monitoring#packmonitoring) the different stages of pack deployment during cluster creation. -* Palette [Edge Clusters](/clusters/edge) deployed on remote bare metal or virtual machines appliances providing end-to-end support on deployment, scaling, upgrades and reconfiguration. - -**Enhancements:** - -* Palette [Azure CNI Pack](/integrations/azure-cni#azurecni) ensures advanced traffic flow control using Calico Policies for AKS clusters. -* Palette supports the [migration of Private Cloud Gateway (PCG)](/enterprise-version/enterprise-cluster-management#palettepcgmigration) traffic from unhealthy to healthy PCG without compromising service availability. -* Palette Workspace upgraded with - * [Resource Quota](/workspace/workload-features#workspacequota) allocation for Workspaces, Namespaces, and Clusters. - * [Restricted Container Images](/workspace/workload-features#restrictedcontainerimages) feature to restrict the accidental deployment of a delisted or unwanted container to a specific namespace. - * Enable the collective role binding of namespaces using [regular expressions for namespaces](/workspace#regexfornamespaces) selection. - * Selective [Resource Restore](/workspace/workload-features#restoreyourbackup) from Workspace Backup across Cluster resources, Node Ports, and Persistent Volumes. -* Palette provides visibility into [Role Binding and Cluster Role Binding](/clusters/cluster-management/workloads#overview) resources running inside our workload clusters. - -# May 30, 2022 - Release 2.6.0 - -Spectro Cloud Palette 2.6 is released to support Cluster Profile Version, EKS Secret Encryption, CSI Storageclass, and added Parameters capabilities. - -**Features:** - -* Palette supports multiple [versions](/cluster-profiles/task-define-profile#clusterprofileversioning) of a single-cluster profile under a unique name to allow backward compatibility. - -* Palette leverages AWS Key Management Service (KMS) to provide envelope [encryption](/clusters/public-cloud/aws/eks#eksclustersecretsencryption) of Kubernetes Secrets stored in Amazon Elastic Kubernetes Service (EKS) clusters. - -* Palette covers a long list of [parameters](https://github.com/kubernetes-sigs/aws-ebs-csi-driver#createvolume-parameters) and customization capabilities for the [csi-aws-1.0.0](/integrations/aws-ebs#parametersupportcsi-aws-1.0.0packmanifest) pack manifest. - -**Enhancement:** - -* Palette allows reconciliation of the CSI layer Storageclass for managed clusters of Amazon Elastic Kubernetes Service (EKS). - -**Bug Fixes** - -* We request our users to add the `ec2:ReplaceRoute` permission to the [AWS](/clusters/public-cloud/aws/required-iam-policies) and [EKS-AWS](/clusters/public-cloud/aws/required-iam-policies) cloud account Controller Policy to replace an existing route, within a route table in a Virtual Private Cloud, to facilitate the cluster deletion process. - - -# April 26, 2022 - Release 2.5.0 - -Spectro Cloud Palette 2.5.0 was released with support for Tencent Kubernetes Engine (TKE), Palette free service offerings, many enhancements, and bug fixes. - -**Features:** - -- Palette now supports [Tencent Kubernetes Engine (TKE)](/clusters/public-cloud/tke#overview)—a fully-managed Kubernetes service from Tencent Cloud. Deploy and manage the end-to-end life cycle of TKS clusters, effortlessly. -- Palette introduces **Placeholder Variables** as [Macros](/clusters/cluster-management/macros#overview) in our Cluster Profile layers for advanced regression and easier update of variables, across multiple running clusters. -- Palette displays a well-organized [Product Onboarding](/getting-started/onboarding-workflow#paletteonboardingworkflow) process to streamline user-product adoption, with an assured unfailing user experience, to jump-start our product journey. -- Palette helps out new users in their purchase decision by offering free tier services. - - [Palette Freemium](/getting-started/palette-freemium#trypaletteforfree) to explore Palette's capabilities with free and fixed kilo-Core-hour usage for finite cluster deployments. - - [Free Cloud Credit](/getting-started/palette-freemium) offers access to a free cloud account, with sufficient permissions and credentials, to have a first impression on our product journey. - -**Enhancements:** - -- Palette users can now manually [Force Delete a Cluster](/clusters/public-cloud/aws#forcedeleteacluster), stuck in the **Deletion** state for more than **15 minutes**, through the User Interface. -- Palette production clusters can be backed up to object storage of [GCP Buckets](/clusters/cluster-management/backup-restore#configureyourbackupingcpbucket) for convenient restoration. - -**Bug Fixes:** - -- We request our users to please add the `ec2:DeleteNetworkInterface` permission to their AWS cloud account Controller Policy Permissions to detach and delete the network interface for [AWS](/clusters/public-cloud/aws#awscloudaccountpermissions) and [EKS](/clusters/public-cloud/aws/eks) clusters. - -**Packs and Integrations:** - -- [Ubuntu 20.04](/integrations/ubuntu)- A long term support release of highly popular Ubuntu Linux Operating System. -- Imageswap-webhook—a mutating webhook integration that intercepts pod requests and changes to the container image location. - -# February 26, 2022 - Release 2.3.0 - -Palette 2.3.0 includes the following enhancements: - -- Added support for cluster-centric detailed [**Namespace Management** and granular **RBAC**](/clusters/cluster-management/cluster-rbac). Previously this capability was only available via workspaces. -- Enabled secure and straightforward user authentication with [**API Keys**](/user-management/user-authentication/#apikey) to access the APIs without referring to user credentials. -- Tenant administrators can now get an [**aggregated view**](/clusters/#scope) of clusters across all the projects under their tenant. -- Added support for [**Taints**](/clusters/cluster-management/taints/#overviewontaints) that can be applied to a node pool to restrict a set of intolerant pods getting scheduled to an inadequate node. -- Added support for [**Labels**](/clusters/cluster-management/taints/#overviewonlabels) to constrain pods so that they can run on a particular set of nodes. -- Enable multi-cluster [**backup and restore from workspaces**](/clusters/cluster-management/backup-restore/#workspacebackupandrestore). -- New [**workspace user roles**](/clusters/cluster-management/backup-restore#workspaceoperator) that provide granular control to specific actions within a workspace: - - _Workspace Operator_ - Allows only backup and restore capabilities within a workspace - - _Workspace Admin_ - Administrative privileges within a workspace -- Palette will now perform a [**rolling upgrade**](/clusters/#rollingupgrade) on the nodes for any fundamental changes to the cluster config. Palette will keep track of the reason that triggered the rolling upgrade on the nodes in the cluster and is made accessible under **Cluster Overview** > **Upgrade details**. -- Enable deployment of the single pack across [**multiple layers**](https://docs-latest.spectrocloud.com/cluster-profiles/task-define-profile/#creatingclusterprofiles) cluster profile layers. -- Palette introduces a VM operator to allow Virtual Machine based applications to be modeled as Cluster Profile layers. - -# January 20, 2022 - Hotfix 2.2.26 - -- Palette Hotfix 2.2.26 supports custom Helm chart registry in Private Networks. -- [Helm registries](/registries-and-packs/helm-charts) can now be set up in **Protected** mode also. In protected mode, charts are configured in cluster profiles without being synchronized into the management console. -- For the tenant clusters deployed in a private network, these charts from the protected Helm registries are downloaded and deployed by the Palette orchestrator. - -# December 24, 2021 - Release 2.2.0 - -Palette 2.2.0 is released with the beta version of Edge Clusters along with upgraded Cluster API support. - -The 2.2.0 Palette enhancements are: - -- Palette users can now provision and manage their [Kubernetes clusters using edge appliances](/clusters/edge/) in addition to usual data centers or cloud environments. -- Palette has been upgraded to use a newer version of the CNCF Cluster API for better automation, integration, and efficiency. -- The upgraded Cluster API version used by Palette mandates the following pack updates: - - Kubernetes 1.18.x and below are no longer supported. Please use Kubernetes version 1.19.x or above in the Cluster Profile. - - vSphere CSI storage driver 1.0.x version is no longer supported for new Cluster Provisioning. Please upgrade your CSI Pack to 2.3.x for enhanced performance. -- As part of Palette upgrade to 2.2.0, control plane node(s) of any existing vSphere cluster will be replaced. - -# November 20, 2021 - Release 2.1.0 - -Palette 2.1.0 is released with the following key improvements: - -- Added support for replicated, cross-region Amazon Elastic Container Registries (ECR) whereby a single OCI registry within Spectro Cloud Palette can serve multiple deployment regions. -- Spectro Cloud users can now join more than one tenant. Users belonging to multiple organizations must choose the desired tenant to log in to. This feature is also supported for SSO-enabled tenants. -- Improved the UI of the Cluster Overview page. Visibility into basic cluster properties as well as cluster management actions such as configuration overrides, machine management, scan and backup policies, cluster deletion are now arranged under the **Settings** menu on the top right-hand side. - -# November 1, 2021 - Release 2.0.0 - -We are excited to announce the Spectro Cloud platform's new name - "PALETTE". In addition, version 2.0 of our platform brings additional cost visibility, optimization features, enhanced governance, and control with **Workspaces**. - -Our latest list of features includes: - -- **Workspaces** enable the association of relevant namespaces across clusters to manage access, obtain cost visibility, and get workload visibility by applications or teams. -- Cluster health alert can be integrated with IT service management (ITSM) and collaboration tools such as Slack, ServiceNow, Microsoft Teams, etc. -- Our built-in Spectro Proxy can be leveraged to establish seamless and secured access to the Kubernetes clusters in public and private data center environments. -- Cluster cloud cost calculation for public and private clouds. -- Granular usage cost break down by namespaces, workspaces, and projects based on actual resource utilization by pods, jobs, stateful sets, PVCs, etc. -- Detailed visibility of resource utilization by cluster, namespaces, projects, and workspaces. - -# September 14, 2021 - Release 1.14.0 - -Spectro Cloud 1.14 is released with additional health alert conveyances, secured log storage, transparent cost features, and scalable enterprise cluster backup. - -- Spectro Cloud users can now push their audit logs to the AWS Cloudtrail to enhance continuous monitoring and troubleshooting of the workload clusters. -- Spectro Cloud layouts instantaneous and effortless monitoring of the cluster cloud cost. -- Now Spectro Cloud users can receive real-time alerts on cluster health at hooked external applications. -- Spectro Cloud enterprise mode production clusters can be backed up to object storage of S3 buckets for convenient restoration. -- Spectro Proxy authentication pack to provision reverse proxy aided communication for clusters deployed in a private network belonging to local data centers. -- Spectro Cloud has stepped up to an upgraded and stable API version for better automation, integration, and efficiency. - -# August 14, 2021 - Release 1.13.0 - -Spectro Cloud users can now convert their bare-metal servers into flexible, cohesive, and distributed instances of virtual machines with the slightest efforts utilizing Metal as a Service (MAAS). - -# July 23, 2021 - Release 1.12.0 - -Spectro Cloud 1.12 is released with generic cluster import, OpenID Connect (OIDC) support to handle identify management securely and seamlessly, and support for AKS—a managed Kubernetes Service offering from Azure cloud. - -- Now import existing non-Spectro clusters from any cloud platform using our Generic cluster import feature. We support broad operations like scans, backups, etc. on these imported clusters as well as provisioning and lifecycle management of add-ons. -- Spectro Cloud now supports AKS, a fully-managed Kubernetes service from Azure. Deploy and manage end-to-end lifecycle of AKS clusters. -- Spectro Cloud extends its SSO support by providing integration with OpenID Connect (OIDC). OIDC is the de facto standard to handling application authentication in the modern world. Through this integration, Spectro Cloud enables users to integrate single sign on, using various identify providers such as Amazon Cognito, Keycloak etc. -- Kubernetes upgraded to version 1.19 for enterprise clusters. - -# June 28, 2021 - Release 1.11.0 - -Spectro Cloud 1.11 is released with the support of OpenStack cloud and support for OIDC based authentication into Kubernetes clusters. - -- Spectro now supports deployment and management of Kubernetes clusters in OpenStack based private data centers. -- Support for OIDC based authentication into Kubernetes clusters and preconfigured kubeconfig file to easily authenticate when using kubectl. - -# June 1, 2021 - Release 1.10.0 - -Spectro Cloud 1.10 released with support for Amazon Elastic Kubernetes Service (EKS), cluster management policies to measure cluster compliance and perform backups and restores. - -- Provision and manage Kubernetes clusters using Amazon EKS service including support for advanced configurations like Fargate profiles, OIDC Authentication etc. -- Scan your Kubernetes clusters to ensure they are conformant and compliant. -- Consensus-driven security scan for the Kubernetes deployment with CIS Kubernetes Benchmarks. -- Perform penetration tests to check for configuration issues that can leave the tenant clusters exposed to attackers. -- Backup your Kubernetes clusters including any persistent volumes. Restore these backups as required on any cluster. - -**Note**: - -The following permissions are additionally required to be granted to the cloud accounts used to launch clusters on AWS. Please update your account to ensure that you have these new permissions included. - -Add the following permissions to the IAM policy called NodePolicy if it was created as documented in Spectro Cloud documentation. - -```json -{ - "Effect": "Allow", - "Action": ["secretsmanager:DeleteSecret", "secretsmanager:GetSecretValue"], - "Resource": ["arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/*"] -}, -{ - "Effect": "Allow", - "Action": [ - "ssm:UpdateInstanceInformation", - "ssmmessages:CreateControlChannel", - "ssmmessages:CreateDataChannel", - "ssmmessages:OpenControlChannel", - "ssmmessages:OpenDataChannel", - "s3:GetEncryptionConfiguration" - ], - "Resource": ["*"] -} -``` - -Add the following permissions to the IAM policy called ControllerPolicy if it was created as documented in Spectro Cloud documentation. - -```json -{ - "Effect": "Allow", - "Action": ["eks:AssociateIdentityProviderConfig", "eks:ListIdentityProviderConfigs"], - "Resource": ["arn:aws:eks:*:*:cluster/*"] -}, -{ - "Effect": "Allow", - "Action": ["eks:DisassociateIdentityProviderConfig", "eks:DescribeIdentityProviderConfig"], - "Resource": ["*"] -} -``` - -# May 4, 2021 - Release 1.9.0 - -Spectro Cloud 1.9.0 released with advanced support of security, availability and updates. - -- Spectro Cloud ensures users to start, run and scale highly-available and secure clusters with automated key tasks such as patching, node provisioning and updates with EKS support. -- Now create and gain permission to your AWS cloud account by just using role ARN, without sharing long-term credentials. - -# March 29, 2021 - Release 1.8.0 - -Spectro Cloud 1.8.0 released with advanced support for deploying & discovering Helm Charts and several usability enhancements! - -Featuring - -- Set up public and private helm chart registries to leverage the vast database of integrations and add-ons. -- Deploy reliable and secure Kubernetes clusters, without worrying about Kubernetes updates, dependencies and security patches using the EKS Distro (EKS-D). -- Accumulate container logs across all cluster nodes to create a support bundle to enable faster troubleshooting. -- Attach multiple supporting manifests to your cluster profile layers in order to deploy integrations end to end without having to use command line client. -- Add additional BYOM (Bring Your Own Manifest) layers to your cluster profiles to perform ad-hoc customized deployments on the cluster. -- You can now import and manage existing clusters running in your private VMware environment behind a proxy. -- Discover charts deployed on your existing clusters and convert them into a cluster profile to use it as a template for future cluster deployments. -- Enhanced cluster profile builder experience with several usability enhancements. - -# February 07, 2021 - Release 1.7.0 - -The following features and enhancements were released as part of 1.7.0 - -- Support for existing Kubernetes clusters that were not deployed by Spectro Cloud to be imported into the Spectro Cloud platform for visibility, management and additional capabilities such as application lifecycle management -- Automated as well as on-demand OS updates to keep cluster nodes up-to-date with the latest security fixes and enhancements. -- Modularize cluster profiles as Core Infra, Add-on, and Full profiles; Apply multiple add-on profiles to a cluster. -- Optimize AWS cloud cost utilizing spot instance pricing for cluster worker node pools. -- Selectively upgrade on-premises Spectro Cloud instance to a desired version, as opposed to always having to upgrade to the latest version. - -# December 23, 2020 - Hotfix 1.6.4 - -This release adds a fix for the permissions of vSphere GET folders. - -# December 13, 2020 - Release 1.6.0 - -Our on-premises version gets attention to finer details with this release: - -- The Spectro Cloud database can now be backed up and restored. -- Whereas previous on-premises versions allowed upgrading only to major versions, this release allows upgrading}> Upgrades to the Spectro Cloud platform are published to the Spectro Cloud repository and a notification is displayed on the console when new versions are available. to minor versions of the Spectro Cloud platform. -- Monitoring the installation using the dedicated UI}>The platform installer contains a web application called the Supervisor, to provide detailed progress of the installation. now provides more details when [migrating](/enterprise-version/deploying-an-enterprise-cluster/#migratequickstartmodeclustertoenterprise) from the quick start version to the enterprise version. -- AWS and GCP clusters can now be provisioned from an on-premises Spectro Cloud system. - -On the VMware front, we have: - -- removed the dependency on the HA Proxy Load balancer for creating clusters via DHCP. -- introduced dynamic folder creation in vCenter. This applies to every cluster, in all of the cluster virtual machines. -- enabled support for DNS mapping in search domains on vSphere. - -Other new features: - -- New customers can now sign up for free trials of Spectro Cloud. When ready, it is easy to upgrade plans and set up automatic payments using credit/debit cards. -- Pack constraints}> Pack constraints are a set of rules defined at the pack level to validate the packs for a Profile or a Cluster before it gets created or updated. Packs must be validated before the cluster is submitted to ensure a successful deployment. have been enabled to reduce the chances of cluster deployment failures that might occur due to incorrect values being set. -- Compatibility for Portworx version 2.6.1, Calico version 3.16, and for newer versions for [Kubernetes](/integrations/kubernetes/). - -# December 03, 2020 - Hotfix 1.5.7 - -In this hotfix, we added: - -- Compatibility for [Calico 3.16](https://www.projectcalico.org/whats-new-in-calico-3-16/). -- The on-premises version now allows specifying [CIDR for pods](/enterprise-version/deploying-the-platform-installer/#deployplatforminstaller) to allocate them an exclusive IP range. -- It also allows allocating an IP range in the CIDR format exclusive to the service clusters. - -The IP ranges for the pods, service clusters, and your IP network must not overlap with one another. This hotfix provides options to prevent node creation errors due to IP conflicts. - -# November 05, 2020 - Hotfixes 1.5.1 through 1.5.6 - -A host of hotfixes were applied for a smoother on-premises operation: - -| Version | Feature | -| ------- | --------------------------------------------------------------------------------------------- | -| 1.5.6 | Added improvements for faster kCh usage calculation. | -| 1.5.5 | Patched the `govc vm.info` command to allow spaces in datacenter names. | -| 1.5.4 | Changes to use client updates instead of patches for _vendorcrd_ installations. | -| 1.5.3 | Improved resource utilization by deleting a machine when a node is not available. | -| 1.5.2 | Updates to keep sessions alive for SOAP and REST clients using the `keepalive` command. | -| 1.5.1 | Fixed a bug that caused a trailing line to be added in the `vsphere.conf` file. | - -# October 23, 2020 - Release 1.5.0 - -The 1.5.0 release of the Spectro Cloud platform consists of the following features and enhancements: - -- On-Premise version of the Spectro Cloud platform for deployment into private VMWare environments. -- Cloud accounts can now be created at the tenant scope, to allow accounts to be shared across all projects in the tenant. -- Cross-compute cluster deployment of Private Cloud Gateway clusters for high-availability purposes. -- SSH Public Key management to easily select the desired keys and share them across Kubernetes clusters within a project. -- Improvements to cloud settings interface to simplify the creation of multiple failure domains during cluster provisioning. - -# September 10, 2020 - Release 1.2.0 - -With release 1.2.0, users get more control and added support: - -- Users can now access Kubernetes cluster certificates and renew them. -- For VMware, multi-domain support for private gateways is now available. -- Also for VMware, layout changes have been made to improve usability. - -# August 21, 2020 - Release 1.1.0 - -Release 1.1.0 is all about enhancing the user experience, providing tighter controls on clusters, and important bug fixes. - -- On the UI side, the login has been made faster. Additionally, users can now set up alerts to monitor cluster health. A `Revert to default values` button for cluster profiles is added. -- Clusters are easier to launch with the `Copy from Master` button; bad deployments are now prevented for certain instances; scaling is easier with the `Scale Strategy`. -- Private gateways can now be provisioned on static IPs with greater control on IP allocation using [IP pools](/clusters?clusterType=vmware_cluster#ipaddressmanagement). -- Updates to the CLI tool include more [flags](/registries-and-packs/spectro-cli-reference?cliCommands=cli_push#flags) to the `PUSH` command for forcibly overwriting registry packs. -- Bug Fixes: BET-806 related to SSO login and BET-403 related to validation of dependencies for availability zones have been resolved. - -# July 3, 2020 - Release 1.0.2 - -- Minor bug fixes for release 1.0.1. -- Updates to the [orchestration engine](https://www.spectrocloud.com/webinars/cluster-api-and-the-spectro-cloud-orchestration-engine/) for the new regions. -- Minor updates to the Istio integration. - -# July 3, 2020 - Release 1.0.1 - -- New Regions for AWS > Spectro Cloud is now available for deploying AWS clusters in the European regions. -- Changes to the pricing structures > more usage = lesser price per kCh. - -# June 23, 2020 - Release 1.0 - -The following features are included as part of Spectro Cloud 1.0: - -- Multi cluster deployment and lifecycle management of Kubernetes clusters across multiple cloud environments—AWS, Azure, and VMWare. -- Security-hardened, compliant, and conformant Kubernetes clusters out of the box. -- Cluster construction templates called Cluster Profiles. -- Platform extensibility through custom integration packs. -- Grouping of clusters logically into Projects for governance and control. -- Rich set of enterprise features such as granular RBAC, Single Sign-on, detailed Audit logs, etc. - - - -Spectro Cloud adopts relevant security best practices for operating systems, Kubernetes components, and cloud environments. All Spectro Cloud container images are scanned for CVEs before a release. While Spectro Cloud takes ownership of securing the cluster infrastructure, there may be additional 3rd party integrations installed on the Kubernetes clusters provisioned. Security of such 3rd party integrations, including their container images and associated configurations, is the responsibility of the provider. - diff --git a/content/docs/00-search.mdx b/content/docs/00-search.mdx deleted file mode 100644 index df2f0545d6..0000000000 --- a/content/docs/00-search.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Search Results" -metaTitle: "Search Results" -metaDescription: "Search Results" -icon: "folder" -hideToC: true -fullWidth: true -hiddenFromNav: true -isIntegration: false -category: [] -hideToCSidebar: true -hideMenuSidebar: true ---- - -import SearchResults from "shared/components/SearchResults"; - - diff --git a/content/docs/01-introduction.md b/content/docs/01-introduction.md deleted file mode 100644 index 86d7b37a19..0000000000 --- a/content/docs/01-introduction.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "What is Palette?" -metaTitle: "What is Palette?" -metaDescription: "Learn what Spectro Cloud's Palette platform is, and how it reduce the complexities your encounter today with Kubernetes." -icon: "" -hideToC: true -fullWidth: false ---- - -# What is Palette? - -Palette is a complete and integrated platform that enables organizations to effectively manage the entire lifecycle of any combination of new or existing, simple or complex, small or large Kubernetes environments, whether in a data center or the cloud. - -With a unique approach to managing multiple clusters, Palette gives IT teams complete control, visibility, and production-scale efficiencies to provide developers with highly curated Kubernetes stacks and tools based on their specific needs, with granular governance and enterprise-grade security. - -Palette VerteX edition is also available to meet the stringent requirements of regulated industries such as government and public sector organizations. Palette VerteX integrates Spectro Cloud’s Federal Information Processing Standards (FIPS) 140-2 cryptographic modules. To learn more about FIPS-enabled Palette, check out [Palette VerteX](/vertex). - - -![Palette product high level overview](/docs_introduction_product-overview.png) - -## What Makes Palette Different? - -### Full-Stack Management - -Unlike rigid and prepackaged Kubernetes solutions, Palette allows users to construct flexible stacks from OS, Kubernetes, container network interfaces (CNI), and container storage interfaces (CSI) to additional add-on application services. As a result, the entire stack - not just the infrastructure - of Kubernetes is deployed, updated, and managed as one unit, without split responsibility from virtual machines, base OS, Kubernetes infra, and add-ons. - -### End-to-End Declarative Lifecycle Management - -Palette offers the most comprehensive profile-based management for Kubernetes. It enables teams to drive consistency, repeatability, and operational efficiency across multiple clusters in multiple environments with comprehensive day 0 - day 2 management. - -### Any Environment - -Palette has the richest coverage in supported environments that includes: -- Public Clouds: AWS, Azure, and Google Cloud (both IaaS and managed Kubernetes services EKS/AKS/GKE) -- Data Centers: VMware, OpenStack -- Bare Metal: Canonical MaaS -- Edge - - - -## What is Under the Hood? - -Palette uniquely extends and integrates the Cloud Native Computing Foundation (CNCF) open-source Cluster API project. Palette does this by providing comprehensive full-stack modeling and orchestration, governance, security, and day 0 - day 2 management capabilities. - -With Palette’s Cluster Profiles, teams can define full-stack clusters that include both the Kubernetes infrastructure and any add-on application services. Cluster Profiles enable a repeatable way to deploy and reuse clusters across any environment. Palette also enables importing of existing Kubernetes environments and creating equivalent Cluster Profiles. - - -![2-what-is-sc](/docs_introduction_palette-components.png) - -## Who Can Benefit From Palette? - -### Developers - -Development teams will get the flexibility and freedom they are looking for to increase the speed of innovation, whether it is the cluster template with the add-on application services or choosing a Kubernetes version with integrations like logging, monitoring, and service mesh for your application development. They need not worry about Kubernetes configurations but focus on the stuff that matters. - -### IT Operations and SREs - -Declarative management makes life easier for IT teams, with consistency, repeatability, and all the enterprise-grade controls and governance they need - especially when moving to production [Cluster Profiles](/glossary-all#clusterprofile) enable them to define and re-use full-stack clusters and support them across the entire lifecycle without having to write scripts, as well as integrate with existing tools and methodologies. - - -### IT Executives - -With an open and enterprise-grade platform, IT leaders can get peace of mind without being locked into proprietary orchestration technologies or one-size-fits-all solutions. This helps lower the total cost of ownership (TCO) and reduce operational risk. - - -# Next Steps -Learn more about Palette and how it can improve your Kubernetes experience and those in your organization. Try [Palette](https://console.spectrocloud.com/) for free today and experience a better way of working with Kubernetes. - -
- -- [Try Palette for Free](/getting-started/palette-freemium) -- [App Mode and Cluster Mode](/introduction/palette-modes) -- [Palette Architecture](/architecture/architecture-overview/) diff --git a/content/docs/01-introduction/05-palette-modes.md b/content/docs/01-introduction/05-palette-modes.md deleted file mode 100644 index 3f1df9fa78..0000000000 --- a/content/docs/01-introduction/05-palette-modes.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: "App Mode and Cluster Mode" -metaTitle: "App Mode and Cluster Mode" -metaDescription: "Learn about the two modes available in Palette and how they benefit your Kubernetes experience." -icon: "" -hideToC: true -fullWidth: false ---- - -# Palette Modes -Palette supports two consumption modes - each aimed at different use cases and, potentially, different personas. The two modes are _App Mode_ and _Cluster Mode_. These modes can be used separately but often work together by sharing resources or relying on resources that each provides. - -![App Mode and Cluster Mode](/docs_introduction_palette-modes.png) - -# What is Cluster Mode? - -Cluster mode gives you the ability to provision Kubernetes clusters to various platforms and cloud providers through Palette. Palette manages and maintains the lifecycle of these Kubernetes clusters. We call a Kubernetes cluster that Palette manages and deploys a [_Host Cluster_](/glossary-all#hostcluster). - -Cluster mode is frequently leveraged by personas such as platform engineers, infrastructure engineers, system administrators, and others who are in a role that requires them to support infrastructure. These personas frequently leverage cluster mode to specify attributes that should make up the Kubernetes cluster, and where and how the cluster should be deployed. These operators leverage a concept we call [_Cluster Profiles_](/cluster-profiles). Other users such as developers, can also leverage cluster mode and cluster profiles to deploy a Kubernetes cluster for ad-hoc purposes, such as research efforts. - -When you operate in cluster mode, you have the ability to specify projects to control the scope of the Kubernetes cluster. The ability to specify projects is beneficial when segmenting resources for different teams. For example, a project titled “ml-modeling” could belong to a team focused on machine learning. In the project “modeling,” you could deploy various Kubernetes clusters for the machine learning team to conduct their work. These Kubernetes clusters could also be grouped together (Cluster Group) if grouping of similar resources is needed. - -Other teams could be prevented from accessing the resources that belong to the project “modeling” by not being a member of the project. Palette offers role-based access control (RBAC) that enables more fine-grained control of resources. Lastly, you can also view the utilization of resources from the project level, which is helpful when understanding utilization and reviewing costs. - -Another important feature of cluster mode is the ability to allow specific host clusters to support Palette Virtual Clusters. Virtual clusters are Kubernetes clusters that run as nested clusters within an existing host cluster. A virtual cluster looks and feels like a normal Kubernetes cluster, except that it resides inside a larger Kubernetes cluster or host cluster often deployed in a cloud provider or on-prem. You can control the resources a virtual cluster is allocated, such as CPU, memory, and storage. - -Virtual clusters are powerful and beneficial to teams due to their characteristics: -- Look and feel exactly like a host cluster -- Are up and running within minutes -- Removes the infrastructure overhead for downstream consumers -- Reduces the need to set up or manage complicated Kubernetes namespaces and roles. Teams can instead receive their own virtual cluster without worrying about permissions or affecting other teams’ resources. - -Virtual clusters help reduce development time by allowing downstream consumers to focus more on application development versus addressing infrastructure overhead. You can also [pause and resume](/devx/palette-virtual-clusters/pause-restore-virtual-clusters) virtual clusters, which helps significantly in reducing costs. App mode heavily leverages virtual clusters. - - -# What is App Mode? - -App Mode is a unique experience that Palette provides in that it removes Kubernetes infrastructure overhead as much as possible. In App mode, you can focus on creating and managing [_App Profiles_](/devx/app-profile). App profiles are declarative templates that you use to define all the required services, containers, and databases that make up an application. Once you define an app profile, you can deploy your application to any Palette Virtual Cluster by specifying the respective app profile. - -App mode comes with an out-of-the-box cluster group managed by us here at Spectro Cloud called _beehive_. This cluster group, which under the cover is a collection of Kubernetes clusters, is configured to support Palette Virtual Clusters. As a consumer, you can deploy a new virtual cluster to the beehive cluster group and get started with a Kubernetes cluster in minutes. - -App mode's ability to get you started with a Kubernetes cluster in minutes makes it a powerful development tool. You can use the virtual clusters temporarily, such as for testing, ad-hoc development, or any other scenario where you want a short-lived Kubernetes environment up and running quickly. - -Alternatively, you could use app mode to offer your own Palette-managed host clusters as a PaaS experience to downstream consumers. This concept is easier explained through an example. Assume you are a system administrator, and you want to expose Kubernetes to various in-house development teams. You could deploy several Kubernetes clusters to various platforms and create a " development " cluster group. You also ensured every cluster is enabled for Palette Virtual Cluster by selecting the option before deployment. You can now direct your organization members to use app mode and create Palette Virtual Clusters as needed, or you can create virtual clusters ahead of time for them. The organization members or downstream consumers can now focus on creating app profiles and deploying their applications. You have essentially enabled a Kubernetes PaaS experience for your organization. - -As the consumer of app mode, you simply focus on deploying your application to a Kubernetes cluster by specifying the app profile. The overhead of managing infrastructure has essentially been removed for you, thus freeing up your time to focus on what matters the most, developing an application that solves business problems. - - -# How to Access Each Mode? - -You can quickly toggle between **App Mode** and **Cluster Mode** by navigating to the **User Menu** at top right and selecting the mode you want. - - -# App Mode or Cluster Mode? - -You might ask yourself, "How do I know which mode I should use?” The answer comes down to your objective. - -- Choose cluster mode if you want to enable Kubernetes capabilities for others or configure Palette. Cluster mode, provides all the configuration options a power user wants. -- Choose app mode if you want to simply deploy an application using Kubernetes without the infrastructure overhead. If you want to simply try out Palette, app mode is a good starting point. - - -App Mode may not meet your needs if your application requires a lot of resources. The Palette-managed cluster group, called Beehive, imposes a resource limitation that could prevent a resource-heavy application from launching successfully. Review the [Resource Quota](/devx/manage-dev-engine/resource-quota) documentation to understand App Mode limits. If you already have Palette-managed Kubernetes host clusters deployed or available to you as a cluster group with Palette Virtual Clusters enabled, then leveraging App Mode is a great fit so you can focus on the developer experience aspect. - -Below are some of the characteristics of each mode. Use this to help you better understand the differences between the two modes. - -
- -- App Mode - - Optimized for the developer experience - - You’re a builder that is not part of an organization and needs quick access to a Kubernetes cluster - - Expose a PaaS-like experience to organizational members by leveraging Palette-managed Kubernetes clusters - - Deploy applications without worrying about Kubernetes infrastructure - - Scope of concerns limited to app profiles - -
- -- Cluster Mode - - Optimized for power users and those that are comfortable with deploying Kubernetes clusters - - Used to deploy host clusters to different platforms (VMware, AWS, GCP, Azure, etc) - - Deploy Edge clusters - - Create cluster groups - - Create cluster profiles - - Create projects, workspaces, teams, - - Leverage specialized hardware for Kubernetes workload - - Audit logging - - Enable cloud providers and other platforms - - Configure registries - - -# Next Steps - -Get started with [Palette](https://console.spectrocloud.com/) today and deploy an application though [app mode](/devx). Or create a Kubernetes cluster on your favorite platform and let Palette handle the challenges of maintaining Kubernetes clusters by leveraging cluster mode and [cluster profiles](/cluster-profiles). \ No newline at end of file diff --git a/content/docs/02-getting-started.md b/content/docs/02-getting-started.md deleted file mode 100644 index e8494cb879..0000000000 --- a/content/docs/02-getting-started.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "Getting Started" -metaTitle: "Getting Started" -metaDescription: "Spectro Cloud Getting Started" -icon: "overview" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -## Quick Start with Spectro Cloud Palette - -This page gives an overview of getting started with Spectro Cloud Palette quickly. We introduce Palette's complimentary subscription plans, features, workflow, and user experience session to our users. - - -The first step towards adopting Palette to your organization is to create a login. We highly appreciate our users having a first impression of our product before making a purchase decision; hence, we provide our users with the following options: - - -[Palette Freemium](/getting-started/palette-freemium#trypaletteforfree) - -[Free Cloud Credit](/getting-started/palette-freemium#freecloudcreditwithpalette) - - -After successful account creation, Palette presents a well-organized Product Onboarding Workflow to streamline the user product adoption. The onboarding process consists of explaining our product features, followed by a Palette experience session. Here we furnish an easy-to-follow deployment pipeline for our users to launch their first cluster successfully. Explore more about the feature: - -[Product Onboarding Workflow](/getting-started/onboarding-workflow#paletteonboardingworkflow) diff --git a/content/docs/02-getting-started/01-palette-freemium.md b/content/docs/02-getting-started/01-palette-freemium.md deleted file mode 100644 index cdb43f9601..0000000000 --- a/content/docs/02-getting-started/01-palette-freemium.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "Try Palette for Free" -metaTitle: "About Free Tier" -metaDescription: "Palette Free Tier, Freemium, Free Tier" -icon: "" -hideToC: true -fullWidth: false ---- - -# Try Palette for Free - -**Palette Free Tier** encourages new users to explore Palette, without having to make a purchase decision, before they are ready. With unlimited kilo-Core-hour usage the first month, there is full access to the Spectro Cloud Palette Platform to create, deploy, and manage Palette resources. - -The second month and every month thereafter, the customer is granted a complimentary 25 kilo-Core-hour (kCh) to use freely to manage up to five Kubernetes clusters with Palette. - -There is the choice to continue in the Free Tier, so long as you stay under the 25 kCh consumption. Go over the usage limit of 25 kCh, and the clusters remain visible, but convert to Read-Only. Palette sends alert messages to users when 25 kCh is crossed, and if more than five active clusters are launched further deployments will be restricted. Enter a payment and all usage and capabilities are restored. - - -## Free Cloud Credit with Palette - -Palette provides a cloud account—with required credentials—for user deployments. This free cloud credit is offered to customers who do not have access to a cloud account and want to try Palette, before committing to the Palette platform. Perhaps a customer finds themselves without sufficient permissions to input their existing cloud credentials or is exploring new ways to manage their services. The Free Cloud Credit, granted through the Palette account, is a great way to begin exploring the Palette platform. - - -## Request a Free Cloud Account - -To request an authorization to the Spectro Cloud Free Cloud Credit program, connect via the [Slack Spectro Cloud Community](https://join.slack.com/t/spectrocloudcommunity/shared_invite/zt-g8gfzrhf-cKavsGD_myOh30K24pImLA) or [email](mailto:developer@spectrocloud.com) and ask about the Free Cloud Credit program. A $100 of free cloud credit will be granted to use with the Spectro Cloud's Palette platform. The user is encouraged to monitor the usage and expenditure percentage through Palette, and as they reach the conclusion of the credit usage, a message will remind the user of the Usage Status. - - -## Managing a Free Cloud Account - -The Free Cloud Credit ends when the $100 of the free cloud credit, granted by Spectro Cloud is consumed. At that time, all resources created during the trial period will cease services and clusters will be marked for deletion and will be lost, so make sure to back up as needed. Reminders will be sent along the way at the 50%, 90%, and 100% consumption stage sharing the state of the free cloud usage. - - -### Upgrade to a Paid Cloud Account - -Upgrade to a Cloud Account at any time after trying the Free Cloud Credit service. Create your own Cloud Account and replace the Free Cloud Credit with your own Cloud Account information. - - -# Kilo-Core-hour Calculations - -Usage is calculated as a concept of kilo-Core-hour or kCh. This is a measurement of the number of CPU Cores multiplied by Time. For instance, for a 4-node cluster which has 16 CPU cores each, one day of management will equate to 4 x 16 CPU cores x 24 hours = 1.54 kCh. - -# Next Steps - -To get started with Palette Free Tier, visit the [signup link](https://www.spectrocloud.com/free-trial). - - diff --git a/content/docs/02-getting-started/03-dashboard.md b/content/docs/02-getting-started/03-dashboard.md deleted file mode 100644 index 04d3f7d21f..0000000000 --- a/content/docs/02-getting-started/03-dashboard.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Palette Dashboard" -metaTitle: "Palette Dashboard" -metaDescription: "Spectro Cloud Palette Dashboard" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Palette Dashboards - -This section is a tour of the two main dashboards of the Tenant console–the **Project Dashboard** and the **Admin Dashboard**. The Project Dashboard is used to perform operations related to setting up your Kubernetes clusters such as setting up Cluster Profiles, Creating Cloud Accounts, and deploying clusters. The Admin Dashboard is used for performing administrative tasks such as setting up Single Sign On (SSO), creating user, teams and setting up Role-Based Access Control (RBAC), and setting up additional package registries. The Admin Dashboard is only available to the users who have the Tenant Admin role. Admin users can toggle between the Project Dashboard and Tenant Admin Dashboard. Users without the Tenant Admin role can only see the Project Dashboard. - -## Project Dashboard - -Upon login, the dashboard shows the views available for a non-admin user. At the top, we have the Projects}> A Project helps to organize the cluster resources in a logical grouping method. button which helps to organize the cluster resources in a logical grouping. From the dropdown, we can shift between the projects. The left panel contains the Project Overview}>Project Overview gives an overview of the resource and cost consumption of the selected project. (2) which gives an overview of the resource and cost consumption of the selected project. Cluster Profiles}>Cluster Profiles are instantiated templates that are created with pre-configured layers/components needed for cluster deployments. (3) of the Default Project are shown. The left pane in this dashboard also contains options for clusters}>Kubernetes clusters in Palette that are instantiated from cluster profiles. (4). Workspaces}>Workspace Workspace enables the coupling of relevant namespaces across multiple clusters to manage access, obtain cost, and workload visibility by applications or teams. (5) enables the coupling of relevant namespaces across multiple clusters to manage access, obtain cost, and workload visibility by applications or teams. Audit logs}>Audit Logs gives the log of activities with timeline. (6) gives the log of activities with timeline. - - -#### Non-admin User view - -Upon login, the dashboard shows the views available for a non-admin user. - -1. The Projects}> A Project helps to organize the cluster resources in a logical grouping method. button which helps to organize the cluster resources in a logical grouping. From the dropdown, we can shift between the projects. - - -2. The left panel contains the Project Overview}>Project Overview gives an overview of the resource and cost consumption of the selected project. (2) which gives an overview of the resource and cost consumption of the selected project. - - -3. Cluster Profiles}>Cluster Profiles are instantiated templates that are created with pre-configured layers/components needed for cluster deployments. of the Default Project are shown. The left pane in this dashboard also contains options for clusters}>Kubernetes clusters in Palette that are instantiated from Cluster Profiles. - - - -4. Workspaces}>Workspace Workspace enables the coupling of relevant namespaces across multiple clusters to manage access, obtain cost, and workload visibility by applications or teams. enables the coupling of relevant namespaces across multiple clusters to manage access, obtain cost, and workload visibility by applications or teams. - - - -5. Audit logs}>Audit Logs shows the log of activities with timeline. shows the log of activities with timeline. - - - -6. The **Settings** section (7) of the Default dashboard relates to the Cloud Account settings, Backup Location settings, and Alerts. This is an important distinction from the settings under the Admin Dashboard. It also allows the user to upload SSH keys for safekeeping. These key(s) can be recalled when deploying a cluster. - - - - - ![project-dashboard](/project-dashboard.png) - - - - -## Tenant Admin Dashboard - - -The menu within the Tenant Admin Dashboard contains the Projects button. This is different from the Projects menu in the Default Dashboard. Within the Tenant Admin Dashboard, the Projects button provides access to modifying a project itself (edit/configure/delete and the overall status), whereas the button in the Default Dashboard provides access to the Cluster Profiles inside the project. - -1. The Cluster Profiles button in the Tenant Admin Dashboard provides the ability to create and manage Global Cluster profiles that can be used for cluster creation, across all projects, within a tenant. - - -2. The Roles}>A Role is a collection of permissions. (and Permissions}>Permissions are associated with specific actions within the platform.); as well as Users}>Users are members of a tenant who are assigned roles that control their access within the platform. and Teams}>A Team is a group of users. Allows the admin to set or restrict these attributes for one or more team members. See the RBAC}>Palette's RBAC design allows granting granular access to resources and its operations. section for more details. - - -3. The audit logs(9)}>The Palette management platform application captures audit logs to track the user interaction with the application resources along with the timeline. in the admin Dashboard allow the admin to track the user interaction with the application resources along with the timeline for all projects and users. For admin users, the "audit log" button is visible for each project as well. Here, the admin can view the logs of the resources specific to the project. - - -4. Finally, the Tenant Admin settings (10) under the Admin Dashboard provide access to the pack registries}>A pack is a collection of files such as manifests, Helm charts, ansible roles, configuration files, etc.; private cloud gateways}>A Private Cloud Gateway is a Palette component that enables the communication between Palette's management console and a VMware based private data center. and [SAML SSO](/user-management/saml-sso) configurations. - - - -![admin-dashboard](/admin-dashboard.png) diff --git a/content/docs/02-getting-started/04-onboarding-workflow.md b/content/docs/02-getting-started/04-onboarding-workflow.md deleted file mode 100644 index e81cf824f2..0000000000 --- a/content/docs/02-getting-started/04-onboarding-workflow.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: "Palette Onboarding Workflow" -metaTitle: "Palette Onboarding Workflow" -metaDescription: "Palette Onboarding Workflow" -icon: "" -hideToC: true -fullWidth: false -hideToCSidebar: true ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Palette Onboarding Workflow - -Palette offers a product tour to help you get familiar with the console and many of its key components. - -## Product Tour -Upon a successful sign-in to our platform, we start the onboarding process with a product tour—an introduction to the platform, to familiarize the users with our Palette features. - - -## Start your Palette Experience - -![user-experience.png](/user-experience.png) - - -The product tour is followed by a Palette experience session. Here we make sure that our users are guided through a successful deployment pipeline in their first use, instead of them just figuring things out along the way towards cluster creation. The major components of this session are as follows: - -* [Create New Cluster](/clusters) - - * Create a new cluster from scratch using any cloud environment or bare metal. - - * A system-level cluster profile is included for the users to explore the Palette functionalities easier and faster. - -* [Import Cluster](/clusters/imported-clusters) - * Bring your own cluster into Palette in two easy steps. - -* Out-of-the-box (OOTB) Configurations: - * Try one of our out-of-the-box cluster profile configurations applicable on your own cluster or in our Palette Virtual Cluster environment. - - -Once the user experience session is finished, the user will be familiar with Palette's workflow and deployment pipeline. This section of the document is a quick start to the deployment process with simple instructions to jump start the Palette journey. The different Palette features and Day-2 operations are detailed in the remainder of this documentation site. - - - -### Connect with us -* [Slack](https://spectrocloudcommunity.slack.com/join/shared_invite/zt-g8gfzrhf-cKavsGD_myOh30K24pImLA#/shared-invite/email) - -* support@spectrocloud.com - - -# Palette Workflow - -Palette requires the creation of a cluster profile before a workload cluster can be created. This is because [cluster profiles](/cluster-profiles) are -templates created with preconfigured layers that define the required dependencies, such as the Operating System (OS) and Kubernetes version for your cluster. The cluster profile is a core component of Palette. You can learn more about cluster profiles by reviewing the [cluster profile](/cluster-profiles) reference page. - -# Resources - -* [Create your Cluster Profile](/cluster-profiles/task-define-profile/#creatingclusterprofiles) - - -* [Create your Cluster](/clusters) - - -* [Imported Clusters](/clusters/imported-clusters) - - -* [Cluster Management](/clusters/cluster-management/#managecl) - diff --git a/content/docs/02.5-architecture.md b/content/docs/02.5-architecture.md deleted file mode 100644 index e15464a272..0000000000 --- a/content/docs/02.5-architecture.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: "Architecture" -metaTitle: "Palette Architecture" -metaDescription: "A deep dive into Palette's architecture and technical concepts" -icon: "cubes" -hideToC: true -fullWidth: false -hideToCSidebar: true ---- - -import {Intro} from "shared/components" -import WarningBox from 'shared/components/WarningBox'; - - - -# Architecture - -Palette supports three different architecture models; multi-tenant SaaS, dedicated SaaS, and self-hosted. To learn more about Palette's architecture and the various components that make up the model, visit the resources listed below. - -
- -- [Architecture Overview](/architecture/architecture-overview/) - - -- [Provisioning Order of Operations](/architecture/orchestration-spectrocloud) - - -- [Namespaces and Pods](/architecture/palette-namespaces-podes) - - -- [Network Ports](/architecture/networking-ports) - - -- [IP Addresses](/architecture/palette-public-ips) - -![Architecture image with on-prem, sass](/docs_architecture-overview_components-overview.png) \ No newline at end of file diff --git a/content/docs/02.5-architecture/01-architecture-overview.md b/content/docs/02.5-architecture/01-architecture-overview.md deleted file mode 100644 index ec84da4926..0000000000 --- a/content/docs/02.5-architecture/01-architecture-overview.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: "Overview" -metaTitle: "Spectro Cloud Architecture" -metaDescription: "Spectro Cloud Architecture Overview" -icon: "" -hideToC: true -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; - -# Deployment Architecture Overview - -Palette is available in three flexible deployment models: - -* **Multi-tenant SaaS**: The management plane is hosted in AWS across three regions (us-east-1, us-west-1, us-west-2) and managed by Spectro Cloud. Each customer will occupy a tenant. The Spectro Cloud operation team controls when to upgrade the management plane. - -* **Dedicated SaaS**: The management plane is hosted in a cloud/region specified by the customer in Spectro Cloud’s cloud account with a dedicated instance managed by Spectro Cloud. The customer can decide when to upgrade the management plane. - -* **Self-hosted**: The management plane is hosted in the customer’s environment. It can be the customer’s on-prem VMware vSphere, OpenStack, bare metal, or in a public cloud using the customer’s cloud account. - -
- -![A diagram of Palette deployment models](/architecture_architecture-overview-deployment-models.png) - -
- - -## Product Security - -At Spectro Cloud, we recognize the importance of robust security measures in today's rapidly evolving digital landscape. As the provider of our cutting-edge SaaS and self-hosted Palette product, our commitment to safeguarding your data and ensuring the integrity of our services is paramount. Learn more about Palette security by reviewing the [Security](/security) section. - -
- -## SaaS Architecture and Data Flow - -The Palette SaaS platform can manage public clouds (AWS, Azure, Google Cloud) and on-premises data center (VMware, OpenStack, bare metal). The architecture and data flow slightly differ based on whether the target environment is a public or an on-premises data center. - -### SaaS to Public Clouds - -The following diagram illustrates the data flow for the Palette SaaS platform to manage the EKS cluster using the user's cloud account in AWS: - - -![spectro_cloud](/architecture_architecture-overview_saas.png) - -There are two main data flows represented in provisioning flow (red) and monitoring flow (green). - -* **Provisioning data flow**: A tenant user from the browser or API client (e.g., Terraform provider) to configure Cluster Profile, Cloud Configuration (e.g., which cloud account to use, cloud-specific placement settings like VPC, subnet), and cluster specifications (e.g., cluster size, node instance type, etc.). This information is sent to Palette. In turn, Palette will invoke the cloud API to talk to the cloud endpoint using the cloud credentials specified to provision the Kubernetes cluster. Once the cluster is provisioned, a Palette management agent will be pushed and installed in the cluster. This agent will receive the Cluster Profile and Cluster Specifications as the desired state from SaaS. The agent will further inspect the desired state and pull additional add-on integrations from Palette's public package registry, or optionally a private package registry hosted by the tenant user. Once all required add-on integrations are installed, the agent will send a message to SaaS to indicate the full-stack K8s provisioning is completed. - - -* **Monitoring data flow**: The agent will periodically report the cluster health status back to the Palette SaaS platform. The agent will also stay in the watch loop to check if the cluster's stat matches the declared desired state. If there is any deviation (e.g. a worker node is accidentally shut down by a user directly from the cloud console), the agent can either send an alert message or based on the policy, do auto reconciliation/self-healing to bring the cluster back to match with the desired state. If there is an updated Cluster Profile or Cluster Spec, the agent will receive the updated and desired state from SaaS. It will then enforce the desired state by making cluster configuration changes accordingly. - -### SaaS to Private Clouds / Data Center / Bare Metal -For private clouds like VMware, since the Palette SaaS platform does not have direct access to the private cloud endpoint (e.g., vCenter), there is one extra component, Palette Private Cloud Gateway, to be deployed in a private cloud environment to act as the local orchestrator and the proxy between Palette’s SaaS platform and cloud endpoint. The following diagram illustrates the data flow for the Palette SaaS platform to manage an on-prem VMware private data center: - - -![spectro_cloud](/architecture_architecture-overview_on-prem.png) - - -## Self-Hosted Architecture and Data Flow -Although the Palette SaaS platform fully supports both public clouds and data centers, for some customers, especially with regulated industry or air-gapped environments, they may prefer to install Palette in their own environment behind the firewall, so that they can control the platform upgrade cycles and ensure no sensitive data are exposed. For these use cases Palette supports a self-hosted on-premises installation. The platform updates and add-on integration contents can be optionally downloaded from an on-prem private repository instead of pulling from Palette’s hosted public repository. - -![spectro_cloud](/architecture_architecture-on-prem-detailed.png) diff --git a/content/docs/02.5-architecture/03-orchestration-spectrocloud.md b/content/docs/02.5-architecture/03-orchestration-spectrocloud.md deleted file mode 100644 index 0551e839e9..0000000000 --- a/content/docs/02.5-architecture/03-orchestration-spectrocloud.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "Order of Operations" -metaTitle: "Provision and Order of Operations in Palette" -metaDescription: "The methods of workload cluster provisioning for K8S clusters with Palette" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Order of Operations - -Spectro Cloud Palette provisions standard, upstream Kubernetes clusters using [Cluster API](https://cluster-api.sigs.k8s.io/). - -Cluster API is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters. - -Cluster API demonstratively manages the lifecycle of a cluster (creation, scaling, enhancement, and deletion) and helps in automating the process of cluster lifecycle management for platform operations. Cluster API also helps in consistent and repeated cluster deployment across multiple infrastructure environments. - -
- -## Workload Cluster Provisioning - -![workload_cluster_provisioning.png](/architecture_orchestartion-spectrocloud_provision-flow.png) - - -
- -
- -1. New K8S cluster request from user submitted to the cluster management system. - - -2. Palette creates the Cluster-API (CAPI) custom-resource specifications for the target cloud, e.g in VMware this would translate to: Cluster, vSphereCluster, KubeadmControlPlane (with replica 1), and VSphereMachineTemplate. These resources are created in the management cluster. - - -3. Cluster API and corresponding cluster-api provider, e.g: cluster-api-provider-vsphere, provisions the first control-plane node CP-A on the target cloud. - - -4. When CP-A is operational, the management platform will install a Palette agent into the workload cluster and then perform a pivot of the Cluster API resources. - - -5. CP-A agent will retrieve the latest specifications for the cluster, e.g: 3 control-plane, and 3 workers. CP-A will generate and update the remaining CAPI resources, e.g: update replicas to 3 for KubeadmControlPlane, create the worker's MachineDeployment or VSphereMachineTemplate. Cluster API running in CP-A will provision the remaining control plane and worker nodes. - - -6. The Palette agent will install all the additional add-ons as specified by the cluster's cluster profile (e.g: logging, monitoring, security). - - - We do not hard code credentials. Palette uses the cloud-init process to inject the user-defined SSH keys into the clusters. - Login using the command: - ssh -i sshKeyHere spectro@host - - - - -## Why Palette Pivots - -
- -Palette's decentralized model is based on a "decentralized management - local policy enforcement" scalable architecture. - -![distributed_orchestration.png](/architecture_orchestartion-spectrocloud_distributed-flow.png) - - -
- -As part of the workload K8s cluster provisioning, only the first control-plane node is launched by Cluster API, running in the Palette management cluster. Once the control-plane node is operational, Cluster API resources are _pivoted_ from the management platform into the target workload cluster. - -The target workload cluster is then responsible for provisioning and maintaining the remaining control-plane and worker nodes. All Day-2 operations which result in node changes, including OS/K8s upgrades, scaling, and K8s certificate rotation, are triggered by changes to the Cluster API resources in the target workload cluster. - -Palette pivots these clusters for several reasons, related to scalability and availability: - -* **Scalability** - The management platform scales to meet the demand of all your workload clusters as the number of tenant clusters and nodes increases in size. - -* **Resiliency** - Even if the management platform were to experience an outage, the workload clusters would retain their resiliency capabilities, auto-recovery, launching of new nodes on failures, auto-scaling, and other policies still work! - -* **Intermittent network resiliency** - The design supports use cases where the workload clusters can still operate in intermittent and disconnected network availability situations. - - -
\ No newline at end of file diff --git a/content/docs/02.5-architecture/04-networking-ports.md b/content/docs/02.5-architecture/04-networking-ports.md deleted file mode 100644 index f3db067a38..0000000000 --- a/content/docs/02.5-architecture/04-networking-ports.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: "Network Ports" -metaTitle: "Network Communication and Ports Management Platform on Prem" -metaDescription: "Port-Direction-Purpose Management Platform and Workload Clusters" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Network Communication and Ports - - -Depending on what version of Palette you are using, the internal architecture and network communication will be different. Before Palette 4.0.0 the management platform communicated with the workload cluster via NATS. Starting with Palette 4.0.0, the management platform communicates with the workload cluster via gRPC. Use the tabs below to view the network communication and ports for each architecture. - - -
- - - - -## SaaS Network Communications and Ports - - - - -The following ports must be reachable from a network perspective for Palette SaaS to function correctly. - -![SaaS Network Diagram with ports](/architecture_networking-ports_saas-network-diagram.png "title=SaaS Network Diagram with ports") - -
- -#### SaaS Managed - - -![SaaS network diagram displaying the network paths for edge](/architecture_networking-ports_saas-network-diagram-edge.png) - - -
- - - - -NATS is deprecated and will be removed in a future release. Starting with Palette 4.0.0, gRPC is used for all communication between the management platform and the workload cluster. - - - - -The following ports must be reachable from a network perspective for Palette to operate properly. - -## Management Platform - -|Port |Direction|Purpose | -|:---------------|:---------|:-----------------------| -|HTTPS (tcp/443) |INBOUND |Browser/API access to management platform .| -|HTTPS (tcp/443) |INBOUND |gRPC communication between Palette and the workload cluster.| -|NATS (tcp/4222) |INBOUND |Agent running inside connecting to management platform [Deprecated]| - - -## Workload Cluster - - -|Port |Direction | Purpose| -|:---------------|:---------|:--------------| -|HTTPS (tcp/443) |OUTBOUND | API access to management platform and gRPC| -|HTTPS (tcp/443) |OUTBOUND | gRPC, Registry (packs, integrations), Pack containers, Application Updates| -|NATS (tcp/4222) |OUTBOUND |Registry (packs, integrations), Pack containers, Application Updates [Deprecated]| - - - -You can expose inbound port 22 for SSH if you would like to access your cluster nodes for troubleshooting remotely. This is entirely optional and not required for Palette to operate appropriately. - - - - -## Self-Hosted Network Communications and Ports - -The following ports must be reachable from a network perspective for Palette self-hosted to function correctly. - - -![On-prem network diagram](/architecture_networking-ports_network-diagram.png "#title="network diagram") - -
- - - - -NATS is deprecated and will be removed in a future release. Starting with Palette 4.0.0, gRPC is used for all communication between the management platform and the workload cluster. - - - -## Management Platform - -|Port |Direction|Purpose | -|:---------------|:---------|:-----------------------| -|HTTPS (tcp/443) |INBOUND |Browser/API access to management platform, gRPC| -|NATS (tcp/4222) |INBOUND |Message Bus for workload clusters [Deprecated]| -|HTTPS (tcp/443) |OUTBOUND |vSphere vCenter API, Registry (packs, integrations), Pack containers, app updates, gRPC| -|HTTPS (tcp/6443)|OUTBOUND |Workload K8s cluster API Server| - - -## Workload Cluster - - -|Port |Direction | Purpose| -|:---------------|:---------|:--------------| -|HTTPS (tcp/443) |OUTBOUND | API access to management platform| -|NATS (tcp/4222) |OUTBOUND |Agent communication via message bus. [Deprecated] | -|HTTPS (tcp/443) |OUTBOUND |vSphere vCenter API, gRPC, Registry (packs, integrations), Pack containers, Application updates| - - - -You can expose inbound port 22 for SSH if you would like to access your cluster nodes for troubleshooting remotely. This is entirely optional and not required for Palette to operate appropriately. - - - - - -
- - - -## SaaS Network Communications and Ports - -The following ports must be reachable from a network perspective for Palette SaaS to function correctly. - -![SaaS Network Diagram with ports](/architecture_networking-ports_network-diagram_nats.png "title=SaaS Network Diagram with ports") - -
- -#### SaaS Managed - - -![SaaS network diagram displaying the network paths for edge](/architecture_networking-ports_saas-network-diagram-edge_nats.png) - - -The following ports must be reachable from a network perspective for Palette to operate properly. - -## Management Platform - -|Port |Direction|Purpose | -|:---------------|:---------|:-----------------------| -|HTTPS (tcp/443) |INBOUND |Browser/API access to management platform| -|NATS (tcp/4222) |INBOUND |Agent running inside connecting to management platform| - - -## Workload Cluster - - -|Port |Direction | Purpose| -|:---------------|:---------|:--------------| -|HTTPS (tcp/443) |OUTBOUND | API access to management platform| -|NATS (tcp/4222) |OUTBOUND |Registry (packs, integrations), Pack containers, Application Updates| -|NATS (tcp/4222) |OUTBOUND |Registry (packs, integrations), Pack containers, Application Updates| - - - -You can expose inbound port 22 for SSH if you would like to access your cluster nodes for troubleshooting remotely. This is entirely optional and not required for Palette to operate appropriately. - - - - -## Self-Hosted Network Communications and Ports - -The following ports must be reachable from a network perspective for Palette self-hosted to function correctly. - - -![On-prem network diagram](/architecture_networking-ports_on_prem_network-diagram.png "#title="network diagram") - -## Management Platform - -|Port |Direction|Purpose | -|:---------------|:---------|:-----------------------| -|HTTPS (tcp/443) |INBOUND |Browser/API access to management platform| -|NATS (tcp/4222) |INBOUND |Message Bus for workload clusters| -|HTTPS (tcp/443) |OUTBOUND |vSphere vCenter API, Registry (packs, integrations), Pack containers, app updates.| -|HTTPS (tcp/6443)|OUTBOUND |Workload K8s cluster API Server| - - -## Workload Cluster - - -|Port |Direction | Purpose| -|:---------------|:---------|:--------------| -|HTTPS (tcp/443) |OUTBOUND | API access to management platform| -|NATS (tcp/4222) |OUTBOUND |Agent communication via message bus | -|HTTPS (tcp/443) |OUTBOUND |vSphere vCenter API, Registry (packs, integrations), Pack containers, Application updates. - - - -You can expose inbound port 22 for SSH if you would like to access your cluster nodes for troubleshooting remotely. This is entirely optional and not required for Palette to operate appropriately. - - - -
-
- - diff --git a/content/docs/02.5-architecture/04.1-grps-proxy.md b/content/docs/02.5-architecture/04.1-grps-proxy.md deleted file mode 100644 index 3f78ba19b3..0000000000 --- a/content/docs/02.5-architecture/04.1-grps-proxy.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: "gRPC and Proxies" -metaTitle: "gRPC and Proxies" -metaDescription: "Learn about gRPC and how a proxy is used to communicate between the management platform and the workload cluster." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# gRPC and Proxies - -Palette uses [gRPC](https://grpc.io) to communicate between the management platform and the workload cluster. gRPC is a high-performance, open-source universal Remote Procedure Call (RPC) framework. It is used to build distributed applications and services. gRPC is based on HTTP/2 and uses protocol buffers ([protobuf](https://protobuf.dev/)) as the underlying data serialization framework. - -
- - - -Refer to the [Network Ports](/architecture/networking-ports) documentation for a detailed network architecture diagram with gRPC and to learn more about the ports used for communication. - - - -
- -When gRPC is used with network proxies, the proxy servers may or may not support gRPC or require additional configuration to allow gRPC traffic to pass through. The following table summarizes the different scenarios and whether or not the proxy server supports gRPC. - - -| **Scenario** | **Description** | **Proxy Supported** | -|:-------------|:----------------|:--------------------| -| gRCP with HTTP/HTTPS - No SSL bump| gRPC traffic is sent over HTTP/HTTPS, and the proxy does not perform a Secure Socket Layer (SSL) bump. This is universally supported. | ✅ | -| gRPC with HTTP/HTTPS - SSL bump | gRPC traffic is sent over HTTP/HTTPS, and the proxy performs an SSL bump. Support varies by vendor. | ⚠️ | -| gRPC with [Squid](https://wiki.squid-cache.org) Open Source Proxy | gRPC traffic is sent over HTTP/HTTPS, and the proxy performs an SSL bump. Supported in some scenarios but requires additional configuration. | ❌ or ⚠️ | - - -The following sections provide more information about gRPC and proxies. - -
- - ## Proxy Without SSL Bump - -Because gRPC is based on HTTP/2, any proxy server that supports the [HTTP CONNECT](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/CONNECT) method can be used to forward gRPC traffic. No configuration is required for this scenario. The exception is when the proxy server performs an SSL bump, discussed in the [Proxy With SSL Bump](/architecture/grps-proxy#proxywithsslbump) section. - -
- - - -SSL bump is a technique used to decrypt and inspect HTTPS traffic. When SSL bump is enabled, the proxy server terminates the Transport Layer Security (TLS) connection and establishes a new TLS connection to the destination server. In this scenario, the proxy server must support gRPC and may require additional configuration. - - - -## Proxy With SSL Bump - -Several vendors provide proxy servers that support gRPC. Some of the vendors may require additional configurations or the use of a specific version of the proxy server. We encourage you to review your proxy server documentation for more information. - -When you review the vendor documentation, search for information about gRPC and HTTP/2. We provide the following links to some vendors' documentation that addresses HTTP/2 and gRPC support. - - -- [F5](https://my.f5.com/manage/s/article/K47440400) - - -- [Palo Alto](https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA10g000000CmdQCAS) - - -- [Fortinet](https://docs.fortinet.com/document/fortigate/7.4.0/administration-guide/710924/https-2-support-in-proxy-mode-ssl-inspection) - - -- [Check Point](https://support.checkpoint.com/results/sk/sk116022) - - -## Squid Proxy With SSL Bump - -A common open-source proxy server is [Squid](https://wiki.squid-cache.org). Squid is a caching proxy for the Web supporting HTTP, HTTPS, FTP, and more. Squid supports gRPC but requires additional configuration. gRPC with SSL bump does not work with all versions of Squid, such as versions 5 and 6. Review the [SSL Bump issue](https://bugs.squid-cache.org/show_bug.cgi?id=5245) to learn more about the issue and track the progress of the fix. - -If you are using a Squid version not affected by the issue, you can configure Squid with SSL bump to support gRPC. Use the [Configuring SSL Bumping in the Squid service](https://support.kaspersky.com/KWTS/6.1/en-US/166244.htm) guide to learn how to configure Squid with SSL bump. Additionally, you may have to configure exclusion rules when using SSL bumping with gRPC. Refer to the [Adding exclusions for SSL Bumping](https://support.kaspersky.com/KWTS/6.1/en-US/193664.htm) to learn more. - - - -
\ No newline at end of file diff --git a/content/docs/02.5-architecture/04.5-palette-public-ips.md b/content/docs/02.5-architecture/04.5-palette-public-ips.md deleted file mode 100644 index 0d028cbdb8..0000000000 --- a/content/docs/02.5-architecture/04.5-palette-public-ips.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: "IP Addresses" -metaTitle: "IP Addresses" -metaDescription: "Palette's public IP Addresses." -icon: "" -hideToC: false -fullWidth: false ---- - - # Overview - - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - In this section, you can find the public IP addresses that support Spectro Cloud SaaS operations. These IP addresses are essential to ensure seamless communication between your infrastructure and our platform. - - -Allow the following IP address ranges in your network configuration to enable traffic to and from the Spectro Cloud SaaS platform. - -| **IP Address**| **Region** | -|---|---| -|44.232.106.120 | North West U.S. | -|44.233.247.65 | North West U.S. | -|52.35.163.177 | North West U.S. | -|13.52.68.206 | South West U.S. | -|18.144.153.171 | South West U.S. | -|52.6.49.233 | North East U.S. | -|54.158.209.13 | North East U.S. | -|54.80.29.137 | North East U.S. | - - -## Palette Domains - -Palette uses the following domains for communication between the management platform and the workload cluster. - -
- - - -NATS and the associated port, 4222, are deprecated and will be removed in a future release. Starting with Palette 4.0.0, gRPC is used for all communication between the management platform and the workload cluster. - - - - - -|Domain |Ports | -|:---------------|:---------| -|api.spectrocloud.com |443 | -|api1.spectrocloud.com |443 | -|api2.spectrocloud.com |443 | -|api3.spectrocloud.com |443 | -|message.spectrocloud.com |443, 4222 | -|message1.spectrocloud.com |443, 4222 | -|message2.spectrocloud.com |443, 4222 | -|message3.spectrocloud.com |443, 4222 | -|console.spectrocloud.com |443 | -|proxy.console.spectrocloud.com |443 | -|registry.spectrocloud.com |443 | -|saas-repo.console.spectrocloud.com |443 | -|registry.spectrocloud.com |443 | -|maasgoldenimage-console.s3.amazonaws.com |443 | -|openstackgoldenimage-console.s3.amazonaws.com |443 | -|edgegoldenimage-console.s3.amazonaws.com |443 | -|vmwaregoldenimage-console.s3.amazonaws.com |443 | -| registry1.spectrocloud.com |443 | -| registry2.spectrocloud.com |443 | -| registry3.spectrocloud.com |443 | -| 415789037893.dkr.ecr.us-east-1.amazonaws.com |443 | -| 415789037893.dkr.ecr.us-west-2.amazonaws.com |443 | - - -
- - diff --git a/content/docs/02.5-architecture/05-palette-namespaces-podes.md b/content/docs/02.5-architecture/05-palette-namespaces-podes.md deleted file mode 100644 index ef49eb31ac..0000000000 --- a/content/docs/02.5-architecture/05-palette-namespaces-podes.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: "Namespaces and Pods" -metaTitle: "Palette specific namespaces and pods mapping" -metaDescription: "Palette specific namespaces and pods mapping" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Palette Specific Namespaces and Pods - -The page encompasses the set of Palette specific namespaces and pods belonging to each of these individual namespaces. -The information is organised as follows: - -* Namespace-Pod mapping for Palette Tenant Cluster -* Namespace-Pod mapping for Palette Gateways (PCG) -* Namespace-Pod mapping for Palette On-Prem Enterprise Cluster - -# Palette Tenant Cluster NameSpaces with Pods - -The following table gives the namespace to pod mapping for Palette Tenant Cluster. - -|PALETTE NAMESPACES | PODS | -|------|-------| -|capi-webhook-system |capi-controller-manager-< UUID>| -||capi-kubeadm-bootstrap-controller-manager-< UUID >| -||capi-kubeadm-control-plane-controller-manager-< UUID >| -|cert-manager|cert-manager-< UUID >| -||cert-manager-cainjector-< UUID> | -||cert-manager-webhook-< UUID > | -|cluster-< UUID > | capi-controller-manager-< UUID > -||capi-kubeadm-bootstrap-controller-manager-< UUID > -||capi-kubeadm-control-plane-controller-manager-< UUID > -||capv-controller-manager-< UUID > -||cluster-management-agent-< UUID > -|cluster-< UUID > |metrics-server-< UUID > | -| |palette-controller-manager-< UUID >| -|kube-system |calico-kube-controllers-< UUID > -| |calico-node-< UUID > -| |coredns-< UUID > -| |etcd-vmdynamictest-cp-< UUID >| -| |kube-apiserver-vmdynamictest-cp-< UUID > | -| |kube-controller-manager-vmdynamictest-cp-< UUID > | -| |kube-proxy-< UUID > | -| |kube-scheduler-vmdynamictest-cp-< UUID > | -| |kube-vip-vmdynamictest-cp-< UUID > | -|reach-system |reach-controller-manager-< UUID > | - - - -# Palette PCG NameSpaces with Pods - -The following table gives the namespace to pod mapping for Palette vSphere Gateway. - -|PALETTE NAMESPACES|PODS | -|--|--| -|capi-webhook-system|capi-controller-manager-< UUID > | -| |capi-kubeadm-bootstrap-controller-manager-< UUID > | -| |capi-kubeadm-control-plane-controller-manager-< UUID > | -| |capv-controller-manager-< UUID > | -|cert-manager | cert-manager-< UUID > | -| | cert-manager-cainjector-< UUID > | -| | cert-manager-webhook-< UUID > | | -|cluster-< UUID > |capi-controller-manager-< UUID >| -| |capi-kubeadm-bootstrap-controller-manager-< UUID > | -| |capi-kubeadm-control-plane-controller-manager-< UUID > | -| | capv-controller-manager-< UUID > | -| | capv-static-ip-controller-manager-< UUID > | -| |cluster-management-agent-< UUID > | -| | ipam-controller-manager-< UUID > | metrics-server-< UUID > | -| | palette-controller-manager-< UUID > | -|jet-system | jet-< UUID > | -|| spectro-cloud-driver-< UUID > | -|kube-system |calico-kube-controllers-< UUID > | -| |calico-node-< UUID > | |coredns-< UUID > | | || | coredns-< UUID > | -| | etcd-gateway1-cp-< UUID > | -| | kube-apiserver-gateway1-cp-< UUID > | -| | kube-controller-manager-gateway1-cp-< UUID > | | -| | kube-proxy-< UUID > || -| |kube-scheduler-gateway1-cp-< UUID > || -| | kube-vip-gateway1-cp-< UUID |> -| | vsphere-cloud-controller-manager-< UUID > | | -| | vsphere-csi-controller-< UUID > || -| | vsphere-csi-node-< UUID > || -|reach-system | reach-controller-manager-< UUID > | - -# Enterprise NameSpaces with Pods - -The following table gives the namespace to pod mapping for Palette On-Prem Enterprise Clusters. - -|PALETTE NAMESPACES|PODES| -|--|--| -| capi-webhook-system |capi-controller-manager-< UUID > -| |capi-kubeadm-bootstrap-controller-manager-< UUID > -| |capi-kubeadm-control-plane-controller-manager-< UUID > -| |capv-controller-manager-< UUID > -| |ipam-controller-manager-< UUID > -|cert-manager |cert-manager-< UUID > -| |cert-manager-cainjector-< UUID > -| |cert-manager-webhook-< UUID > -|cluster-mgmt-< UUID >|capi-kubeadm-bootstrap-controller-manager-< UUID > -| |capi-kubeadm-control-plane-controller-manager-< UUID > -| |capv-controller-manager-< UUID > -| |capv-static-ip-controller-manager-9< UUID > -| |cluster-management-agent-< UUID > -| |ipam-controller-manager-< UUID > -| |metrics-server-< UUID > -| |palette-controller-manager-< UUID > -|cp-system| spectro-cp-ui-< UUID > -|hubble-system |auth-< UUID >|auth-< UUID > -| | cloud-fb8< UUID > -| | configserver-< UUID > -| | event-< UUID > -| | hashboard-< UUID > -| | hutil-< UUID > -| | mgmt-< UUID > -| | mongo-0 -| | mongo-1 -| | mongo-2 -| | packsync-1< UUID > -| | spectrocluster-< UUID > -| | system-< UUID > -| | timeseries-< UUID > -| | user-< UUID > -|ingress-nginx |ingress-nginx-admission-create-spwch -| |ingress-nginx-admission-patch-< UUID > -| |ingress-nginx-controller-< UUID > -|jet-system| jet-< UUID > -|kube-system|calico-kube-controllers-< UUID > -| | calico-node-< UUID > -| | calico-node-w< UUID > -| | coredns-< UUID > -| | etcd-vsphere-spectro-mgmt-cp-< UUID > -| | kube-apiserver-vsphere-spectro-mgmt-cp-< UUID > -| | kube-controller-manager-vsphere-spectro-mgmt-cp-< UUID > -| | kube-proxy-bl< UUID > -| | kube-proxy-l< UUID > -| | kube-scheduler-vsphere-spectro-mgmt-cp-< UUID > -| | kube-vip-vsphere-spectro-mgmt-cp-< UUID > -| | vsphere-cloud-controller-manager-< UUID > -| | vsphere-csi-controller-df< UUID > -| | vsphere-csi-node-< UUID > -| | vsphere-csi-node-rhm< UUID > -|nats-system| nas-< UUID > -|ui-system |spectro-ui-< UUID > diff --git a/content/docs/03-cluster-profiles.md b/content/docs/03-cluster-profiles.md deleted file mode 100644 index e47aae5e53..0000000000 --- a/content/docs/03-cluster-profiles.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "Cluster Profiles" -metaTitle: "Understanding Cluster Profiles" -metaDescription: "Understanding the Cluster Profiles Concept and how they make Spectro Cloud powerful" -icon: "bundles" -hideToC: true -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; - -# Overview -**Cluster Profiles** are like templates that are created with preconfigured layers/components that are required for -workload cluster deployments. Cluster Profiles provide a way for driving consistency across workload cluster -deployments. You can create as many profiles as required. - -A Cluster Profile can contain an environment-specific configuration and can be created to meet specific types of Workload Cluster deployment needs. As an example, you may create a Development Cluster Profile with a very basic configuration or a Production Cluster Profile with additional security, monitoring, and logging layers. - -You may also build Special Purpose Profiles to deploy Workload Clusters for use cases such as AI/ML or High Performance Computing (HPC). Cluster Profiles can be created to be of the type Core Infra, Add-on, or Full. - -![Cluster Profile Types](/cluster_profiles.png) - -Cluster creation requires an Infrastructure or Full Cluster Profile to be selected, and optionally, one or more Add-on profile(s). The same Add-on Layer category may exist in more than one of the Add-on profiles. The Profile would then read, for example: OS, Kubernetes, Networking, Storage, Monitoring, Ingress. - -# Layers - -Cluster Profile Layers are built using content packages which contain integration-specific templates, charts, and manifest. These content packages can either be of two types: - - * **Palette Packs** - These content packages are built using Spectro Cloud's proprietary content format. Spectro Cloud maintains a public registry of Palette Packs that are available to all Tenants. - - - * **Helm Charts** - These charts are a collection of Kubernetes resource files capable of deploying services ranging in varying complexities. Palette provides a few stable public Helm registries out of the box. Tenants can also add any public or private Helm registries to leverage charts from those registries. Palette promotes Container Storage Interface (CSI) and Container Network Interface (CNI) layers to be added as Helm Charts from customized Helm registries and linked to Spectro Registry packs. - -# Core Infrastructure Cluster Profile -A **Core Infrastructure Cluster Profile** is constructed using the four Core Infrastructure layers: the OS, Kubernetes, the networking, and the storage layers. These profiles are environment specific and are constructed using cloud-specific layers. - -![Core Infra Profile - Azure](/cluster_profile_azure.png) - -# Add-On Cluster Profile -An **Add-on Cluster Profile** consists of various integrations and can be constructed using layers such as: - -- System apps -- Authentication -- Security -- Monitoring -- Logging -- Ingress -- Load balancer -- Helm Charts - -![Add-On Profile](/addon_profile.png) - -# Full Cluster Profile -A **Full Cluster Profile** consists of the Core Infrastructure layers and as many additional Add-on layers as required. The Core Infrastructure layers are cloud specific. - -![Full Cluster Profile](/full_profile.png) - -The next sections provide the details of creating and managing Cluster Profiles. - - -
diff --git a/content/docs/03-cluster-profiles/01-task-define-profile.md b/content/docs/03-cluster-profiles/01-task-define-profile.md deleted file mode 100644 index b0c4eb7271..0000000000 --- a/content/docs/03-cluster-profiles/01-task-define-profile.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: "Create a Cluster Profile" -metaTitle: "Create a Cluster Profile" -metaDescription: "Learn how to create a cluster profile in Palette." -icon: "" -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - - `video: /aws-full-profile.mp4` - -# Basic Information and Core Layers - -Cluster profiles are created by configuring various layers of the Kubernetes infrastructure stack. To create a **New Cluster Profile**, follow these steps: - -1. Provide the **Basic Information** such as: - - |**Parameter** |**Description** | - |---------|---------| - |**Name** | Give a name for the new cluster. | - |**Version** | Include the [Cluster Profile Version](/cluster-profiles/task-define-profile#clusterprofileversioning) number for the cluster under which the cluster profile needs to be created. See below for more information. | - |**Description** | Provide quick description of your cluster. This is optional. | - |**Profile Type (Full, Infrastructure, Add-on)**| Dictates the layers that can be configured in the cluster profile. If the cluster profile type is Infrastructure or Full, you are able to select a Cloud Type or Data Center environments. For more information on [Add-on](/cluster-profiles/task-define-profile#Addon) types go to step four. | - |**Tags** | Tags on a cluster profile are propagated to the VMs deployed on the cloud/data center environments when clusters are created from the cluster profile. This is optional. | - - -2. In the **Cloud Type** section, select the **Environment** you are working with. This list displays the environments supported in Palette. - - -3. Configure the **Profile Layers** of the infrastructure stack. The following layers are considered **Core Infrastructure** layers. Configuring these layers is mandatory for Full or Infrastructure cluster profiles. - - **Note**: These layers are not configurable for **Add-On** cluster profiles: - - - OS - - Kubernetes - - Network - - Storage - - Select the **Registry**, **Pack Name**, **Pack Version**, and **Pack Values** and click on **Next Layer** to go through each profile layer to completely build the core infrastructure. - - **Note**: Container Storage Interface (CSI) and Container Network Interface (CNI) layers can be added as Helm Charts from customized Helm registries and linked to Spectro Registry packs. - - -4. **Add-on Layers** are additional layers such as **Monitoring**, **Security**, **Load Balancers**, **Ingress**, **Logging**, **Monitoring**, **Security**, **Authentication**, **Service Mesh** etc. may be added and configured as desired. These layers may be configured for the profiles of the type **Full** or **Add-On**. These add-on layers can be added in one of the following ways: - - - - Add New Pack - Add a Palette Pack from a pack registry or a Helm Chart from a chart registry. The public Palette Pack registry and a few popular Helm chart repositories are already available out of the box. Additional pack registries or public/private chart registries can be added to Palette. - - - - - - Import from cluster - Charts can be discovered from an existing Kubernetes Cluster. One or more of these discovered charts can be added to the cluster profile. During discovery, charts discovered from a cluster may not be available in any of the chart repositories available with Palette. Users can provide the registry information on hosting these charts during the import process to complete addition of such charts. - - - - - Add Manifest - Layers can be constructed using raw manifests to provision Kubernetes resources that are not available via Palette or Charts. Pack Manifests provide a pass through mechanism wherein additional Kubernetes resources can be orchestrated on to a cluster along with rest of the stack. - - - - -
- Configure each layer as follows: - - - - - Versions- Choose the desired version. Choices include pinning to a specific version (e.g. 1.1.1) or picking a major or minor train such as 1.x or 1.1.x. Picking a major/minor train results in a dynamic version association. The latest release from that train is linked to the pack at any given point. Future release updates on the train will result in the pack being relinked to the newest version. This allows clusters to always be at the latest released version, without having to make subsequent updates to the profile. - - - - - Configuration Parameters - The configuration option and version selected might provide configuration parameters to provide granular control or fine-tune certain aspects of the functionality. For the packs provided out of the box, the configuration parameters are set to values based on common best practices. Users may override these parameters as desired. Additionally, for certain layers, Palette provides a bunch of presets to quickly enable or configure a feature within the add-on. These presets are a group of properties presets with defaults to provide a quick and easy way to modify a set of relevant properties. If available, users can also enable one or more presets as appropriate. - - - - - Manifest - Attach additional manifests to the layer if desired. Attached manifests provide a way for provisioning additional Kubernetes resources that support an integration or an add-on. Certain integrations offered through packs or charts, may require creation of resources like Secrets or CustomResourceDefinition (CRDs) in order to complete the installation end to end. This can be achieved by adding one or more Attach Manifests to the layer. - - - - - -
- - -Palette allows users to deploy the same pack to multiple layers which can be required in certain scenarios, where an integration needs to be installed multiple times with different configuration. As an example, you may have two or more applications in the profile that need to use the Postgres database. You will be required to launch the Postgres database twice in this case with different configurations. - -In order to allow packs to be added multiple times in a profile, add the following key to the pack values in the yaml editor: - - spectrocloud.com/display-name: - - where `` is a name unique across a cluster profile and the cluster. - - **Example:** - - pack: - # The namespace (on the target cluster) to install this chart - # When not found, a new namespace will be created - namespace: "external-dns" - # Custom pack name for multi-layer support - spectrocloud.com/display-name: "dns-1" - - If the same pack is needed at another layer, repeat the above block with the same namespace but a different name such as `dns-2`. Display names used for a pack across layers should be unique. - - -
- - -By default Palette uses Helm chart release name in the format packName-chartName. In cases where a lengthy release name causes some complicacy we can customize Helm chart releaseNames using the format below: - - -**Example:** - -```yaml -pack: - namespace: kube-system - releaseNameOverride: - actual_chart_name1: custom_name1 - actual_chart_name2: custom_name2 -``` - - - -## Cluster Profile Versioning - -Palette enables users to create multiple versions of a cluster profile within the scope of a single profile name. The **Version** field of the cluster profile takes a semantic versioning format (only numbers supported) as below: - - **`major.minor.patch`** represented as: Version 1.1.2 - - Profile versioning is an optional field with a default value of **1.0.0** . The users can create multiple versions of a cluster profile under a single profile name and each of these versions can have its own pack configurations. - -Cluster profile versions are grouped under their unique names and their uniqueness is decided by the name and version within the scope and promotes backward compatibility to profile changes. - - **Example:** Profile-1 can have multiple versions like 1.0.0 and 2.0.1. These versions are grouped under the **Cluster Profile Name** Profile-1. The menu next to the cluster profile name contains the different versions under that name. - - The version numbers can be edited from the **Settings > Edit Info** option from the Cluster Profile page. While deleting the profile, select the version to be deleted. - - - - - - diff --git a/content/docs/03-cluster-profiles/01.5-create-add-on-profile.md b/content/docs/03-cluster-profiles/01.5-create-add-on-profile.md deleted file mode 100644 index fe3a57f193..0000000000 --- a/content/docs/03-cluster-profiles/01.5-create-add-on-profile.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: "Create an Add-on Profile" -metaTitle: "Create an Add-on Profile" -metaDescription: "Learn how to create an add-on cluster profile." -icon: "" -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - -# Overview - - -Add-on cluster profiles offer a range of benefits for workload cluster deployments. These profiles provide enhanced functionality by allowing the addition of various layers such as system apps, authentication, security, monitoring, logging, ingress, and load balancers to the cluster. - -This capability allows you to customize and configure clusters based on specific requirements. Add-on cluster profiles follow a modular approach, making managing and maintaining cluster configurations more flexible. Add-on profiles also promote reusability, allowing profiles to be used across multiple environments, projects, and tenants. Additionally, add-on cluster profiles support integration-specific templates, charts, and manifests, providing flexibility and customization options for workload cluster deployments. - - -## Pack Labels and Annotations - -You can specify Namespace labels and annotations to Add-on packs, and packs that are for Container Storage Interfaces (CSI) and Container Network Interfaces (CNI) drivers. These labels and annotations are applied to the Namespace that the pack is deployed to, or to a specific Namespace if specified. You can apply labels and annotations to the pack's YAML file. - -The following parameters are available for specifying Namespace labels and annotations: - -| **Parameter** | **Description** | **Type** | -| --- | --- | --- | -| `namespace` | The Namespace that the pack is deployed to. If the Namespace does not exists, then Palette will create the Namespace. | string | -| `additionalNamespaces`| A list of additional Namespaces that Palette will create. | map | -| `namespaceLabels` | A list of key-value pairs for labels applied to the Namespace. | map | -| `namespaceAnnotations` | A list of key-value pairs for annotations applied to the Namespace. | map | - - - -The following example shows how to specify Namespace labels and annotations for an Add-on Pack, a CSI pack, and a CNI pack. In the example pack YAML configuration, the `wordpress` Namespace is created. An additional Namespace titled `wordpress-storage` is also created. In the parameters sections, `namespaceLabels` and `namespaceAnnotations`, each entry has a key and a value. The key is the name of the target Namespace, and the value is the value of the label or annotation. - - -
- -```yaml hideClipboard -pack: - namespace: "wordpress" - additionalNamespaces: - "wordpress-storage" - - namespaceLabels: - "monitoring": "org=spectro,team=dev" - "wordpress-storage": "user=demo-user" - "default": "user=demo-user" - - namespaceAnnotations: - "monitoring": "monitoring.io/enable=true" - "wordpress-storage": "storage.metrics.io/format=json" -``` - - - - -# Create an Add-on Profile - -Use the following steps to learn how to create an add-on cluster profile. - - -## Prerequisites - -* Your Palette account role must have the `clusterProfile.create` permission to create an Add-on cluster profile. Refer to the [Cluster Profile](/user-management/palette-rbac/project-scope-roles-permissions#clusterprofile) permissions documentation for more information about roles and permissions. - - -## Create Steps - - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Profiles**. - - -3. Click on **Add Cluster Profile**. - - -4. Fill out the following input values and ensure you select **Add-on** for the type. Click on **Next** to continue. - -
- - | Field | Description | - |----|----| - | **Name**| The name of the profile. | - |**Description**| Use the description to provide context about the profile. | - | **Version**| Assign a version to the profile. The default value is `1.0.0`. | - | **Type**| **Add-on** | - | **Tags**| Assign any desired profile tags you want. | - - -5. Select the type of layer to add to the cluster profile. - -
- - | Type | Description | - |---|---| - | **Pack** | A pack is a collection of files and configurations that can be deployed to a cluster to add functionality or customize the cluster's behavior.| - | **Helm**| You can specify a Helm chart as a layer in an add-on profile.| - | **Manifest**| A manifest is a Kubernetes configuration file that describes the desired state of a Kubernetes resource, such as deployment, service, or pod and is used to create or modify that resource in a cluster.| - - - - -6. Depending on your selected type, fill out the required input fields and click on **Confirm & Create**. - - -
- - ![A view of the manfiest create process and the YAML code in the text editior](/clusters_imported-clusters_attach-add-on-profile_manfest-view.png) - -
- -7. If you want to add additional layers, repeat steps five and six. Otherwise, click on **Next** to review the profile. - - -8. Click on **Finish Configuration** to create the cluster profile. - - - -You now have an add-on cluster profile. You can reuse the profile and apply it to several clusters. You can also update a cluster profile and decide what clusters to apply the new version to. Refer to the [Update Cluster Profile](/cluster-profiles/task-update-profile) guide for more information about update operations. - - -# Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. Navigate to left **Main Menu** and select **Profiles**. - - - -3. Select your cluster profile to review its layers or make changes. - - -
- diff --git a/content/docs/03-cluster-profiles/02-task-update-profile.md b/content/docs/03-cluster-profiles/02-task-update-profile.md deleted file mode 100644 index 60be214418..0000000000 --- a/content/docs/03-cluster-profiles/02-task-update-profile.md +++ /dev/null @@ -1,288 +0,0 @@ ---- -title: 'Update Cluster Profiles' -metaTitle: 'Update Cluster Profiles' -metaDescription: 'Learn how to update cluster profiles in Palette.' -icon: '' -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -You update a cluster profile to change the configuration of one or more layers in the profile stack. You can also update basic profile information such as the name, description, and tags. - -# Prerequisites - -There are no prerequisites. - - -# Modify Basic Profile Information - -The following steps will guide you in updating basic profile information. - -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. From the left **Main Menu**, select **Profiles**. - - -3. Click the profile you want to update. Palette displays the profile stack. - - -4. Click the **Settings drop-down Menu** and choose **Edit Info**. - -
- - You can modify the name, version, description, and tags. Updated tags are not propagated to previously created clusters. However, tag changes will apply to new clusters you create that use the updated profile. - -
- - -5. Save your changes. - - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. From the left **Main Menu**, select **Profiles**. - - -3. Click the profile you updated. Palette displays the profile details and profile stack. - - -4. Check that profile details displays your changes. - - - -# Update a Pack Layer - -The following steps will guide you in making updates to a layer in the profile. - -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. From the left **Main Menu**, select **Profiles**. - - -3. Click the profile you want to update. Palette displays the profile details and profile stack - - -4. Click the layer to update. Palette displays the profile stack. To add a pack layer, select one of the following options: - -
- - - **Add New Pack** - - **Import from cluster** - - **Add Manifest** - - -5. You can do the following: - - - Choose a new pack to add, or import one from another cluster. - - - Edit pack settings in the YAML file. - - - Add, edit, or remove a manifest. - - - Remove non-core pack layers from the profile. Click the layer to display its details and click the **trash can** icon next to **Edit Pack**. - -
- - - - Operating System (OS) Kubernetes, Networking, and Storage are considered core layers and cannot be removed. - - - - - - Delete the profile by navigating to the **Settings drop-down Menu** and choosing **Delete**. - - - -6. Confirm your updates. - -Clusters that use the updated profile are notified of the changes. You can update clusters to use the latest profile definition at any time. - - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. From the left **Main Menu**, select **Profiles**. - - -3. If you deleted the profile, verify it is no longer displayed on the **Cluster Profiles** page. - - -4. If you made changes, click the profile you updated. Palette displays the profile details and profile stack. - - -5. Check that layers are added to or removed from the stack. - - -6. If you added, removed, or modified a manifest, click the layer in the stack that you updated and verify the manifest changes. - - - -# Update the Pack Version - -Packs typically contain changes between versions, such as the addition or removal of parameters and policies. The following steps guide you in updating configurations. - -
- - - -When updating to a new pack version, these rules apply: - -
- -- You should not copy the pack configuration from one version to another, as the newer version often contains an adjusted configuration that is tailored to that version. Instead, you should integrate your changes manually in the new version. - - -- Updating to a newer Kubernetes version must be done incrementally, one minor version at a time. - - -- Select a specific target version instead of a group that ends in ``.x`` -We do not recommend downgrading packs to the previous version. - -
- -
- -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. From the left **Main Menu**, select **Profiles**. - - -3. Click the profile you want to update. Palette displays the profile stack. - - -4. Click on the pack layer to update. - - -5. In the **Edit Pack** page, select a specific target version, not a group that ends in ``.x``. Palette displays the difference between the current version at left and the new version at right. The target version is displayed in the header.

- - Differences between the displayed configurations are as follows: - -
- - - - **Red highlighting**: indicates text that is not present in the new configuration. - -
- - Red highlighting indicates lines you may have added in the current configuration. You can use the arrow icon that displays between the two configurations to transfer the lines to the new version. - -
- -
- - - These lines may also have been removed because they are no longer valid in the new configuration. If you need them, you should copy the lines to the new version. Similarly, you should copy any settings from the current configuration. - -
-
- - - **Green highlighting**: indicates additions in the new configuration that are not present in the current version. - -
- - #### Example of Difference Between Current and New Configurations - - - ![Screenshot that shows Palette's pack diff user interface with red highlight at left and green highlight at right](/integrations_pack_diffs.png) - -
- -
- - - **Contrasting shades** of red and green highlight in the same line indicates differences occur in only part of the line. - -
- - #### Example of Line Changes in Current and New Configurations - - ![Screenshot that shows Palette's pack diff user interface with contrasting shades of red and green highlight in the same line](/integrations_pack_line_diffs.png) - - -
- - -6. Check for red-highlighting in the configuration that is missing in the new configuration. - -
- - - If there are any lines you added, use the arrow to transfer the lines to the new version. - -
- - - If there are lines you did not add that are red highlighted, they have been removed in the new version, and you should **not** copy them over. - - -7. Check for changed settings in the new configuration and copy settings from the current configuration to the new version. - - -8. Review new sections in the new configuration. You should adopt them, as they are typically needed to support the new version. - - -9. Check for changes in the same line that have a different value. If it is not a customization you made, you should adopt the new value, as it is known to be compatible with the new version. - - -10. Confirm your updates. - - -# Validate - - - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. From the left **Main Menu**, select **Profiles**. - - -3. Click the profile you updated. Palette displays the profile stack. - - -4. Check that the updated layer displays the new pack version. - -
- - Palette indicates any misconfigurations with a dot displayed on the problematic layer in the stack and a message letting you know there is an issue. - - -5. Click on the pack layer and review its configuration. Apply fixes and confirm your updates. - - -6. Repeat the process until Palette indicates the configuration works. - - - -
- - -
- - -
- - - -
- diff --git a/content/docs/03-cluster-profiles/03-cluster-profile-import-export.md b/content/docs/03-cluster-profiles/03-cluster-profile-import-export.md deleted file mode 100644 index 3638321eab..0000000000 --- a/content/docs/03-cluster-profiles/03-cluster-profile-import-export.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: "Import Export Cluster Profiles" -metaTitle: "Import and Export Palette Cluster Profiles" -metaDescription: "The method for importing and exporting Cluster Profile on Spectro Cloud" -icon: "" -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - -# Overview -Palette enables cluster profiles to be exported and then imported across multiple environments, projects and tenants. This smoothens the reuse and sharing of huge profiles with large number of add-ons and integrations. - -# Prerequisites - -* [Export](/cluster-profiles/cluster-profile-import-export#exportclusterprofile) the cluster profile file in JSON format from the Palette console. - - -* The packs in the exported profile should be available in the target environment during import. - - -* The `macros` used in the exported profile should be available in the target environment during import. If not [create the macros](/clusters/cluster-management/macros#createyourmacro) at the target environment. - -# Use Cases - - -The Export/Import Cluster Profile use cases: - -
- -* Export / Import use case is most suitable for different environments like stage & dev saas setups. - -# Export Cluster Profile - -To Import Palette cluster profiles the existing profile needs to be first exported as json file from the Palette console. To export follow the steps as below: - -
- -* As a `Tenant` or `Project` administrator login to the Palette console. - - -* Select the `Profiles` option from the left ribbon menu. - - -* Select `Cluster Profiles` option from the top menu. - - -* From the listed cluster profiles, select the profile to be exported. - - -* From the profile details page, click `Export profile`. - - -* The profile will be downloaded as json file to the system. - - -* Save the downloaded file for import. - - -While exporting the profile, the sensitive pack values will be masked and must be updated during import. - - -# Import Cluster Profile - - -To import a cluster profile: - -
- -1. As a `Tenant` or `Project` administrator login to the Palette console. - - -2. Select the `Profiles` option from the left ribbon menu. - - -3. Select `Cluster Profiles` option from the top menu. - - -4. To import an existing cluster profile, click on `Import Cluster Profile`. - - -5. In the import cluster profile wizard, - * Click the `Upload file` button to upload the already exported profile JSON file. - * Validate the file contents to avoid duplicate profile names and versions. In the case of a profile already existing with the same name and version combination, an error message is displayed. Customize the name or version number to avoid conflicts and ambiguities. - * Once the file contents are validated, a wizard to `Select Repositories` is open, If there are multiple repositories with the imported profile packs at the destination. Select the repository from which the packs need to be fetched from the UI drop down and confirm. - * Once all the information is provided, confirm the profile creation process to have the profile created and listed. This profile can be used in the same way as any cluster profile for every cluster operations such as deployments, updations, etc. - -**Note:** If there is only single repository where the imported packs are present within the destination, the `Select Repositories` option will not appear. - -
-
diff --git a/content/docs/03-cluster-profiles/04-examples.md b/content/docs/03-cluster-profiles/04-examples.md deleted file mode 100644 index 3233b4e15d..0000000000 --- a/content/docs/03-cluster-profiles/04-examples.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Cluster Profile Examples" -metaTitle: "Cluster Profile Examples" -metaDescription: "The method for creating a Cluster Profile for AWS on Spectro Cloud" -icon: "" -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Examples - -Cluster profiles can be built to launch clusters for specific use cases. Clusters launched for development purposes are typically minimal and do not require advanced integrations. Production clusters on the other hand tend to be more comprehensive with many more integrations. The following are examples of cluster profiles built for development and production purposes: - - - - - -## Development Cluster Profile - -![Development Profile](/development.png) - -* All layers are built with smart tags to enable automatic upgrades of clusters to the newest releases. -* Kubernetes dashboard is the only integration enabled. - - - - - -## Production Cluster Profile - -![Production Profile](/production.png) - -* All layers are pinned to specific versions -* Automatic upgrades are disabled -* Centralized logging enabled - Elastic Search, Fluentd, Kibana -* Centralized monitoring enabled - Prometheus, Grafana -* Runtime-security enabled - Sysdig Falco -* Service observability enabled - Istio -* Role-based access control enabled - Permissions Manager -* Load balancer to expose services externally - MetalLB - - - - diff --git a/content/docs/03-cluster-profiles/10-byoos.md b/content/docs/03-cluster-profiles/10-byoos.md deleted file mode 100644 index 63f42e5305..0000000000 --- a/content/docs/03-cluster-profiles/10-byoos.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Bring Your Own OS (BYOOS)" -metaTitle: "Bring Your Own OS (BYOOS)" -metaDescription: "Learn how to use your own OS images with a cluster profile" -icon: "" -hideToC: true -fullWidth: false ---- - -# Overview - -With Palette, you can bring your own operating system and use it with your Kubernetes clusters using the [Bring Your Own Operating System (BYOOS)](/glossary-all#bringyourownoperatingsystem(byoos)) feature. The BYOOS pack allows you to upload your own OS images, configure the necessary drivers, and customize the OS to meet your specific requirements. - -Bringing your own operating system provides several benefits, including the ability to control your own dependencies, improve performance, and ensure compatibility with your existing applications. With BYOOS, you can choose the OS that best fits your needs, whether it's a commercial or open-source distribution, and integrate it with your Kubernetes clusters. - -The BYOOS feature is especially useful for enterprises and organizations that have strict requirements around security, compliance, or specific hardware configurations. With the ability to bring your own OS, you can ensure that your Kubernetes clusters meet these requirements, without compromising on performance or functionality. - -BYOOS in Palette gives you greater flexibility, control, and customization options when managing your Kubernetes clusters. You can tailor your OS to your specific needs, ensuring your clusters perform optimally and meet your organization's unique requirements. - -To learn more about BYOOS, use the following resources to learn more. - - -# Resources - -- [Create Images with Image Builder](/cluster-profiles/byoos/image-builder) - - -- [BYOOS Pack](/integrations/byoos) - -
\ No newline at end of file diff --git a/content/docs/03-cluster-profiles/10-byoos/10-image-builder.md b/content/docs/03-cluster-profiles/10-byoos/10-image-builder.md deleted file mode 100644 index b057e3ce51..0000000000 --- a/content/docs/03-cluster-profiles/10-byoos/10-image-builder.md +++ /dev/null @@ -1,317 +0,0 @@ ---- -title: "Create Images with Image Builder" -metaTitle: "Create Images with Image Builder" -metaDescription: "Learn how to use the Image Builder project to create images for Palette" -icon: "" -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -You can create and deploy custom images to most infrastructure providers using various tools. Many infrastructure providers have tools that you can use to create custom images for the platform, such as [AWS EC2 Image Builder](https://aws.amazon.com/image-builder/) for AWS or [Azure VM Image Builder](https://azure.microsoft.com/en-us/products/image-builder) for Azure. You can also use platform agnostic tools, such as [HashiCorp Packer](https://developer.hashicorp.com/packer), or something more tailored to Kubernetes, such as the [Kubernetes Image Builder](https://image-builder.sigs.k8s.io/introduction.html) (KBI) project. - - -## Kubernetes Image Builder - -KIB is a project designed to help users create images for various platforms. The project is a consolidation of multiple tools that together work to create an artifact, or in simpler terms, a custom image. - -You can use the custom images created by KIB with Palette, assuming the infrastructure provider is supported in Palette. Use the following diagram to understand how you can use KIB to create custom images that you can use with Palette. - -![A diagram displaying the steps for creating a custom image](/cluster-profiles_byoos_image-builder_workflow-diagram.png)
- -1. You will download the KIB project and configure the image builder's **packer.json** file. - - -2. Use the `make` command to create a custom image containing a specific Operating System (OS) version and flavor. - - -3. The custom image is created and distributed to the target regions you specified in the **packer.json** file. - - -4. Create a cluster profile pointing to your custom image. - - -5. Deploy a host cluster using your cluster profile containing the custom image. - - -This guide will teach you how to use the Kubernetes Image Builder to create images for your infrastructure provider so that you can use the custom image in a cluster profile. - -# Prerequisites - - -* Palette v3.4.0 or greater. - - -* [Git](https://git-scm.com/downloads) v2.39.1 or greater. - - -* Access credentials to the target infrastructure provider. KBI, through the help of Packer, deploys a compute instance to the target environment during the image creation process. - - -* The cloud provider you choose may have different requirements. Review the KIB [documentation](https://image-builder.sigs.k8s.io/capi/capi.html) for your provider to learn more about the provider prerequisites. - - -* [HashiCorp Packer](https://developer.hashicorp.com/packer/tutorials/docker-get-started/get-started-install-cli) installed v1.8.6 or greater. - -
- - - -To use a commercial OS, you must provide the license before starting the image creation process. - - - -# Create an Image - -The following steps will guide you through creating your image. You will create a custom Red Hat Enterprise Linux (RHEL) for Amazon Web Services (AWS). RHEL is a commercial product, so you will need license subscription credentials, but you can use the same steps for a non-RHEL image. The critical point to take away in this guide is using KIB to create the image. - -
- -1. Clone the KIB repository. - -
- - - - - ```shell - git clone https://github.com/kubernetes-sigs/image-builder.git - ``` - - - - - ```shell - git clone git@github.com:kubernetes-sigs/image-builder.git - ``` - - - - - -2. Switch the directory into the image builder folder. - -
- - ```shell - cd image-builder/images/capi - ``` - -3. Open up the image builder [documentation site](https://image-builder.sigs.k8s.io/introduction.html) in your web browser and review the steps for the infrastructure provider you want to build an image for. - - - -4. If you are using a commercial OS such as RHEL, set the required environment variables per the KIB documentation. For RHEL, the following environment variables are required. Replace the placeholder values with your actual credentials. - -
- - ```shell - export RHSM_USER=REPLACE_ME - export RHSM_PASS=REPLACE_ME - ``` - - If you want to debug the Packer compute instance in case of an error, set the following environment variable. The Packer flag will allow you to remote connect to the instance versus Packer's default behavior of terminating the instance. - -
- - ```shell - export PACKER_FLAGS=-on-error=ask - ``` - -5. Navigate to the **packer** folder and open up the folder for the target infrastructure provider. Review the file **packer.json**. Make any configuration changes you desire, such as the Kubernetes version, cloud credentials, network settings, instance size, image regions etc. You must make changes in the file's `variables` section. Only a condensed version of the 'variables' object below is used for illustrative purposes to enhance the reader's experience. - -
- - ```json hideClipboard - "variables": { - ... - "ami_groups": "", - "ami_regions": "us-east-1, us-west-2", - "ami_users": "", - "ansible_common_vars": "", - "ansible_extra_vars": "", - "ansible_scp_extra_args": "", - "ansible_user_vars": "", - "aws_access_key": "", - "aws_profile": "", - "aws_region": "us-east-1", - "aws_secret_key": "", - "aws_security_group_ids": "", - "aws_session_token": "", - "build_timestamp": "{{timestamp}}", - "builder_instance_type": "m5.xlarge", - .... - }, - ``` - -
- - - - The file **packer.json** contains many variables you can use to customize the image. We recommend you review the KIB [documentation](https://image-builder.sigs.k8s.io/capi/capi.html) for your provider as it explains each variable. - - - - - -6. Set the credentials for your infrastructure provider. Each infrastructure provider supports different methods for providing credentials to Packer. You can review each infrastructure provider's authentication section by visiting the [Packer plugins site](https://developer.hashicorp.com/packer/plugins) and selecting your provider on the left **Main Menu**. - - - -7. Next, find the `make` command for your provider. You can use the following command to get a list of all available RHEL options. Replace the `grep` filter with the provider you are creating an image for. - -
- - ```shell - make | grep rhel - ``` - - Output: - ```shell hideClipboard - build-ami-rhel-8 Builds RHEL-8 AMI - build-azure-sig-rhel-8 Builds RHEL 8 Azure managed image in Shared Image Gallery - build-azure-vhd-rhel-8 Builds RHEL 8 VHD image for Azure - build-node-ova-local-rhel-7 Builds RHEL 7 Node OVA w local hypervisor - build-node-ova-local-rhel-8 Builds RHEL 8 Node OVA w local hypervisor - ... - ``` - -8. Issue the `make` command that aligns with your target provider. In this example, `build-ami-rhel-8 ` is the correct command for an RHEL AWS AMI creation. - -
- - ```shell - make build-ami-rhel-8 - ``` - - Output: - ```shell hideClipboard - amazon-ebs.{{user `build_name`}}: output will be in this color. - - ==> amazon-ebs.{{user `build_name`}}: Prevalidating any provided VPC information - ==> amazon-ebs.{{user `build_name`}}: Prevalidating AMI Name: capa-ami-rhel-8-v1.24.11-1683320234 - amazon-ebs.{{user `build_name`}}: Found Image ID: ami-0186f9012927dfa39 - ==> amazon-ebs.{{user `build_name`}}: Creating temporary keypair: packer_64556dab-95d3-33e6-bede-f49b6ae430cb - ==> amazon-ebs.{{user `build_name`}}: Creating temporary security group for this instance: packer_64556dae-fb5a-3c7d-2106-1c8960c6d60e - ==> amazon-ebs.{{user `build_name`}}: Authorizing access to port 22 from [0.0.0.0/0] in the temporary security groups... - ==> amazon-ebs.{{user `build_name`}}: Launching a source AWS instance... - amazon-ebs.{{user `build_name`}}: Instance ID: i-06a8bf22b66abc698 - .... - ``` - -9. Once the build process is complete, note the image ID. - -
- - ```shell hideClipboard - Build 'amazon-ebs.{{user `build_name`}}' finished after 22 minutes 29 seconds. - - ==> Wait completed after 22 minutes 29 seconds - - ==> Builds finished. The artifacts of successful builds are: - --> amazon-ebs.{{user `build_name`}}: AMIs were created: - us-east-1: ami-0f4804aff4cf9c5a2 - - --> amazon-ebs.{{user `build_name`}}: AMIs were created: - us-east-1: ami-0f4804aff4cf9c5a2 - ``` - - -10. Login to [Palette](https://console.spectrocloud.com). - - - -11. Navigate to the left **Main Menu** and select **Profiles**. - - - -12. Click on the **Add Cluster Profile** to create a new cluster profile that uses your new custom image. - - - -13. Fill out the inputs fields for **Name**, **Description**, **Type** and **Tags**. Select the type **Full** and click on **Next**. - - -14. Select your infrastructure provider. In this example, **AWS** is selected. - - - -15. Select the **BYOOS** pack. Use the following information to find the BYOOS pack. - -* Pack Type: OS -* Registry: Public Repo -* Pack Name: Bring Your Own OS (BYO-OS) -* Pack Version: 1.0.x or higher - -16. Update the pack YAML to point to your custom image. You can use the tag values Packer assigns to the image to help you identify the correct value to provide the pack YAML. In the example output below, the tag values `distribution_version` and `distribution` are used to determine the correct values for the YAML. - -
- - ```shell hideClipboard - ==> amazon-ebs.{{user `build_name`}}: Creating AMI tags - amazon-ebs.{{user `build_name`}}: Adding tag: "build_date": "2023-05-10T17:19:37Z" - amazon-ebs.{{user `build_name`}}: Adding tag: "build_timestamp": "1683739177" - amazon-ebs.{{user `build_name`}}: Adding tag: "kubernetes_cni_version": "v1.2.0" - amazon-ebs.{{user `build_name`}}: Adding tag: "source_ami": "" - amazon-ebs.{{user `build_name`}}: Adding tag: "containerd_version": "1.6.20" - amazon-ebs.{{user `build_name`}}: Adding tag: "distribution_release": "Enterprise" - + amazon-ebs.{{user `build_name`}}: Adding tag: "distribution": "rhel" - amazon-ebs.{{user `build_name`}}: Adding tag: "image_builder_version": "" - amazon-ebs.{{user `build_name`}}: Adding tag: "kubernetes_version": "v1.24.11" - + amazon-ebs.{{user `build_name`}}: Adding tag: "distribution_version": "8 - ``` - - In this example, the YAML is updated to point to the RHEL image created earlier. Use the table below to learn more about each variable. - -
- - | **Parameter** | **Description** | **Type** | - |---|----|----| - | `osImageOverride` | The ID of the image to use as the base OS layer. This is the image ID as assigned in the infrastructure environment it belongs to. Example: `ami-0f4804aff4cf9c5a2`. | string| - | `osName` | The name of the OS distribution. Example: `rhel`. | string | - | `osVersion`| The version of the OS distribution. Example: `8` | string| - -
- - ```yaml - pack: - osImageOverride: "ami-0f4804aff4cf9c5a2" - osName: "rhel" - osVersion: "8" - ``` - - - ![View of the cluster profile wizard](/clusters_byoos_image-builder_cluster-profile-byoos-yaml.png) - - -17. Click on **Next layer** to add the Kubernetes layer. - - -18. Select the desired Kubernetes distribution and version. Click on the **** button to reveal the YAML editor. - - -19. Complete the remainder of the cluster profile creation wizard by selecting the next cluster profile layers. - -You now have a cluster profile that uses the custom image you created using the [Kubernetes Image Builder](https://image-builder.sigs.k8s.io/introduction.html) project. - -
- - - -When deploying a host cluster, choosing the appropriate cloud provider and region where the image was distributed is critical to successfully launching a cluster using a custom image in the cluster profile. Failure to do so may result in Palette's inability to launch a cluster. - - - -# Validate - -Use the following steps to validate your custom image. - -1. You can validate that the custom image is working correctly by deploying a compute instance in the respective infrastructure provider you created the image in using the custom image. Review the compute instance logs to learn more about the problems if you encounter any issues. - - -2. Next, deploy a host cluster that uses the cluster profile you created containing the custom image. Verify the cluster is deployed correctly and without any issues. If you encounter any problems, review the event logs of the cluster to gain more details about the issue. Check out the [Deploy a Cluster](/clusters/public-cloud/deploy-k8s-cluster/) tutorial for additional guidance on deploying a host cluster. diff --git a/content/docs/03.5-system-profile.md b/content/docs/03.5-system-profile.md deleted file mode 100644 index 82930d1924..0000000000 --- a/content/docs/03.5-system-profile.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "System Profiles" -metaTitle: "Understanding System Profiles" -metaDescription: "Understanding the System Profiles Concept and how they make Palette powerful" -icon: "bundles" -hideToC: true -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; - -# Overview - -System profiles provide a way to bootstrap an edge appliance with an initial set of virtual and containerized applications. Similar to [cluster profiles](/cluster-profiles), system profiles are templates created using one or more layers that are based on packs or helm charts. - -System profiles modeled on Palette UI should be downloaded and provided input to the edge system. Upon bootstrap, when the edge appliance registers back with the SaaS console, it links to the system profile. Any subsequent changes made to the profile after registration are propagated down to the edge appliance. -
- -## Create a System Profile - -Here are the steps to create a system profile: - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Go to **Profiles**, open the **System Profile** tab, and click **Add System Profiles**. - - -3. Provide profile information such as system profile name, description (optional), and tags (optional) system profile. - - -4. Add one or more layers using one of the following methods: - - * **Add New Pack** - Add a Palette Pack from a pack registry or a [Helm Chart](/registries-and-packs/helm-charts/) from a chart registry. The public Spectro Cloud Pack registry and a few popular helm chart repositories are already available out of the box. Additional pack registries or public/private chart registries can be added to Palette. - * **Add Manifest** - Layers can be constructed using raw manifests to provide Kubernetes resources unavailable via Palette or Charts. In addition, pack Manifests provide a pass-through mechanism wherein additional Kubernetes resources can be orchestrated onto a cluster along with the rest of the stack. - - -5. Click the `Confirm and Create` button to save the configuration. - - -6. Click the `Next` button to review the information and `Finish` to create and save the system profile. -
- -**Note:** Palette enables the [Export](/cluster-profiles/cluster-profile-import-export#exportclusterprofile) and [Import](/cluster-profiles/cluster-profile-import-export#importclusterprofile) of System profiles across multiple environments, projects and tenants. - -## Download System Profile - -1. Login to Palette management console. - - -2. Go to ‘Profiles’ and open the ‘System Profile’. - - -3. Open an existing system profile. - - -4. Click the `Download System Profile` button at the bottom of the panel to download the profile definition as an archive (with extension `tgz`). -
- - -## Sample Snapshots - -### System Profile dashboard - -![system-profile-1.png](/system-profile-1.png) - -### Add a new pack while creating the system profile - -![system-profile-2.png](/system-profile-2.png) - -### Add a new manifest while creating the system profile - -![system-profile-3.png](/system-profile-3.png) - -### Download system profile - -![system-profile-4.png](/system-profile-4.png) diff --git a/content/docs/04-clusters.md b/content/docs/04-clusters.md deleted file mode 100644 index 1130473c86..0000000000 --- a/content/docs/04-clusters.md +++ /dev/null @@ -1,294 +0,0 @@ ---- -title: "Clusters" -metaTitle: "Creating clusters on Palette" -metaDescription: "The methods of creating clusters for a speedy deployment on any CSP" -icon: "clusters" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Cluster Overview - -Kubernetes clusters in Palette are instantiated from cluster profiles. A cluster definition in Palette consists of a reference to a cluster profile, cloud configuration, as well as the cluster size and placement configuration. The following high-level tasks are performed as part of the cluster creation: - -* Orchestration of computing, network, and storage resources on the cloud environments along with the required placement infrastructure. - - -* Installation and configuration of various Kubernetes components like Kubelet, API servers, etcd, or scheduler. - - -* Installation and configuration of the cloud-specific network (CNI) and storage (CSI) plugins. - - -* Securing the cluster infrastructure and configuration according to the relevant OS, Kubernetes, and cloud security best practices. - - -* Deployment of additional Add-ons such as Prometheus, Permissions Manager, or Vault, as specified in the Cluster Profile. - -# Images - -Palette provides Virtual Machine (VM) images for cluster-computing infrastructure out of the box for the most recent versions of Operating Systems such as Ubuntu or CentOS. These images are security-hardened based on the respective CIS Benchmarks. In addition, Kubernetes components such as kubelet, kubeadm, etc. are preinstalled in these images. The specific image for a cluster is derived from the Operating System and Kubernetes packs configured in the cluster profile. - -The out-of-the-box images are hosted in the public cloud (AWS - AMI, Azure - VHD) or Palette's storage repository (vSphere - OVA). During provisioning, the image is copied (if missing) to the desired cloud region or downloaded onto a private data center. - -## Customization - -Palette provides various forms of customization options for VM images. All these customization options require a private pack registry to be set up with customized OS packs. - -### Customize Out-of-the Box Images - -The Palette out-of-the-box images are security-hardened and have Kubernetes components preinstalled. Additional components can be installed on the images at runtime by defining one or more Ansible roles in the customized OS pack. Palette's orchestration engine creates a new image by instantiating a VM instance from the out-of-the-box image and executing the specified Ansible roles on the instance. This custom image is used for cluster provisioning. The customized image is tagged with a unique signature generated from the pack definition so that it can be reused for future cluster provisioning requests. - -# Security - -Palette secures the Kubernetes clusters provisioned by following security best practices at the Operating System, Kubernetes, and Cloud Infrastructure levels. - -## Operating System - -The Palette out-of-the-box VM images are hardened in accordance with the relevant OS CIS benchmark. Additionally, the images are scanned for vulnerabilities regularly, and fixes are applied to these images when available from the provider. The upgraded images are released in the form of updated OS packs in the Palette Pack Registry and are available to the users to apply to their existing clusters at a time convenient to them. - -## Kubernetes - -Kubernetes components and configuration are hardened in accordance with the Kubernetes CIS Benchmark. Palette executes Kubebench, a CIS Benchmark scanner by Aqua Security, for every Kubernetes pack to ensure the master and worker nodes are configured securely. - -## Cloud Infrastructure - -Palette follows security best practices recommended by the various cloud providers when provisioning and configuring the computing, network, and storage infrastructure for the Kubernetes clusters. These include practices such as isolating master and worker nodes in dedicated network domains and limiting access through the use constructs like security groups. - - - The security measures mentioned above are implemented for Palette's out-of-the-box OS and Kubernetes packs. For customized OS Kubernetes packs, users are responsible for taking the relevant measures to secure their clusters. - - -# Day-2 Management - -Palette provides several options to manage Kubernetes clusters on an ongoing basis. These include opportunities to scale up/down the cluster by adding/reducing the number of nodes in a node pool, add extra worker pools, resizing nodes in a node pool by modifying the instance type, and adding additional fault domains such as availability zones to a node pool. - - - Cluster management operations result updating cluster definitions in Palette's database. The updated definition is retrieved by the management agent running in the cluster. A rolling upgrade is then performed to bring associated clusters to their desired state. - - -# Cluster Health - -Palette monitors the cluster infrastructure regularly and reports health on the management console. - - -Overall health is computed based on the following factors: - -* **Heartbeat** - The Palette management agent, which runs inside the cluster, periodically sends a heartbeat to the management console. Missing heartbeats typically indicate of a problem such as a cluster infrastructure going down or lack of network connectivity. Failure to detect heartbeats over a 10 minute period results in an unhealthy status for the cluster. - - Palette provides health check information for your workload clusters. This information is a value add to the cluster monitoring activities. In case a cluster goes to an unhealthy state, the last received healthy heartbeat can help you in troubleshooting. - -![Cluster_Health_Heart_Beat](/doc_cluster_clusters-cluster-heart-beat.png) - -* **Node Conditions** - Kubernetes maintains the status for each cluster node in the form of conditions such as DiskPressure, MemoryPressure, or NetworkUnavailable. Palette monitors these conditions and reports back to the management console. Any node condition indicating a problem with the node, results in an unhealthy status for the cluster. - - -* **Metrics** - Palette collects usage metrics such as CPU, Disk, or Memory. The cluster is marked as unhealthy if the usage metrics cross specific thresholds over some time. - -# Usage Monitoring - -Palette continuously monitors cluster resources and reports the usage for the cluster as well as individual nodes. The following metrics are reported on the Cluster Overview page of the management console. By default, the metrics are only displayed for the worker nodes in the cluster: - -* **Cores Used** - A cluster-wide breakdown of the number of cores used. - - -* **CPU Usage** - Current CPUs used across all cluster nodes. Additionally, usage over some time is presented as a chart. - - -* **Memory Usage** - Current memory used across all cluster nodes. Additionally, usage over a while is presented as a chart. - - -* **CPU Requests** - Total CPUs requested across all pods. - - -* **Memory Requests** - Total memory requested across all pods. - -![Cluster Update Details](/cluster_usage_metrics.png) - -Additionally, usage metrics for individual nodes and node conditions are accessible from the Node Details page. - -# Application Services - -Palette enables quick access to the application services installed on the Kubernetes clusters by providing a link to those on the management console. These include the applications and services deployed through Palette and the ones deployed through any other means. Services are monitored on an ongoing basis, and all services of the type LoadBalancer or NodePort are displayed on the management console. - -![Cluster Update Details](/cluster_services.png "#width=500px") - -# Troubleshooting - -Typically, when a cluster lifecycle action such as provisioning, upgrade, or deletion runs into a failure, it does not result in an outright error on the cluster. Instead, the Palette orchestration engine follows the reconciliation pattern, wherein the system repeatedly tries to perform various orchestration tasks to bring the cluster to its desired state until it succeeds. Initial cluster provisioning or subsequent updates can run into various issues related to cloud infrastructure availability, lack of resources, or networking issues. - -## Cluster Conditions - -Palette maintains specific milestones in a lifecycle and presents them as **Conditions**. - -Examples include: - - - Creating Infrastructure - - - - Adding Control Plane Node - - - - Customizing Image - -The active condition indicates what task Palette orchestration system is trying to perform. If a task fails, the condition is marked as *failed*, with relevant error messages. Reconciliation, however, continues behind the scenes, and continuous attempts are made to perform the task. Failed conditions are a great source of troubleshooting provisioning issues. - -![Cluster Update Details](/cluster_conditions.png "#width=400px") - -For example, failure to create a Virtual Machine in AWS due to the exceeded vCPU limit will cause this error to be shown to the end-users. Then, they can choose to bring down some workloads in the AWS cloud to free up space. The next time a VM creation task is attempted, it will succeed, and the condition will be marked as a success. - -## Rolling upgrade - -Palette will perform a rolling upgrade on the nodes for any fundamental changes to the Cluster Configuration file. Below are some of the actions that will result in a rolling upgrade: - -* OS layer changes - - -* Kubernetes layer changes - - -* Kubernetes version upgrade - - -* Kubernetes control plane upsize - - -* Machine pool updates for disk size - - -* Changes in availability zones - - -* Changes in instance types - - -* Certificate renewal and many more. - -Palette keeps track of the reason that triggered the rolling upgrade on the nodes in the cluster and is made accessible under **Cluster Overview** > **Upgrade details**. - -![upgrade-details1.png](/upgrade-details1.png) - -Besides actions taken by the user, Palette also performs a rolling upgrade if the cluster nodes' health degrades. Palette keeps track of the nodes machine's health and will relaunch the node when the machine health check fails. - -![upgrade-details2.png](/upgrade-details2.png) - -The following are some sample scenarios where the node health is considered as degraded: - - * Kubelet not up for 10 mins. - - - * Network unavailability for 10 mins. - - - * New node doesn't get ready in 30 mins. - -## Event Stream - -Palette maintains an event stream with low-level details of the various orchestration tasks being performed. This event stream is a good source for identifying issues in the event an operation does not complete for a long time. - -
- - - -* Cluster events are retained for the last 1000 events. - - -* Due to the Palette reconciliation logic, intermittent errors appear in the event stream.

As an example, errors might show up in the event stream regarding being unable to reach the node after launching a node. However, the errors clear up once the node comes up. -
-
- Error messages that persist over a long time or errors indicating issues with underlying infrastructure are an indication of a real problem. - -
- -## Download Cluster Logs - -At times, you may be required to work with the Palette Support Team to troubleshoot an issue. Palette provides the ability to gather logs from the clusters it manages. Problems that occur during the orchestration lifecycle, may require access to the various containers, nodes, and Kube system logs. Palette automates this log-collection process and provides an easy download option from the Palette UI console. Hence, it reduces the burden on the operator to log in to various cluster nodes individually and fetch these logs. -
- -### Collect the Logs - -1. Select the running cluster. - - -2. Go to **Settings* and select **Download Logs**. - - -3. Choose the desired log from the below options: - * Kube-System Logs - * Logs of all the Kubernetes components. - * Logs - * Spectro namespace logs for the last one hour. - * Node Logs - * Contains the Spectro log, system log, and the cloud-init log information collected for the last ten thousand lines of code. - -4. Click **Download Logs**. - - This message will display on the UI: "The request was sent successfully. The download will be available soon." There is an average wait time of five (5) minutes. - - At the end of this short log fetching interval, the message will be displayed on the UI: "The logs archive for {Cluster-name} was created successfully." - - -5. Click **Download <*cluster-name*> logs** to download the **Logs** folder to your local machine. - - -6. Unzip and rename the Logs folder as per the customer's choice. - - - -* In addition to the log contents briefed above, the folder will also contain a Manifest.yaml file describing the CRDs, Deployments, Pods, ConfigMap, Events, and Nodes details of the Cluster. - -* Palette recommends its users attach these logs, along with the support request, for accelerated troubleshooting. - -* Expect an average log fetching time of five (5) minutes for the ready-to-download message to appear on the UI, once the download log is clicked. - -* The downloaded Zip file will be by default named as **spectro_logs.zip**. The users can unzip and choose a name of convenience. - - - -# Proxy Whitelist - -This table lists the proxy requirements for enabling the Palette management console. - -| Top-level Domain | Port | Description | -| ------------------------- | ---- | -------------------------------------------- | -| docker.io | 443 | Third party container images. | -| docker.com | 443 | Third party container images. | -| gcr.io | 443 | Spectro Cloud and 3rd party container images. | -| ghcr.io | 443 | Third party container images. | -| github.com | 443 | Third party content. | -| googleapis.com | 443 | Spectro Cloud images. | -| grafana.com | 443 | Grafana container images and manifests. | -| k8s.gcr.io | 443 | Third party container images. | -| projectcalico.org | 443 | Calico container images. | -| registry.k8s.io | 443 | Third party container images. | -| raw.githubusercontent.com | 443 | Third party content. | -| spectrocloud.com | 443 | Spectro Cloud Palette SaaS. | -| s3.amazonaws.com | 443 | Spectro Cloud VMware OVA files. | -| quay.io | 443 | Third party container images. | -| ecr.us-east-1.amazonaws.com | 443 | OCI Pack images. | -| ecr.us-west-2.amazonaws.com | 443 | OCI Pack images. | - -# Scope - -Clusters are launched from within Projects in Palette, and they belong to a single project. In the **Project** view, all clusters that are launched from that project are listed for users with the Project Administrator role or Cluster Administrator role. A Tenant Administrator can get an aggregated view of clusters running across all projects from the **Organization** view, as follows: -
- -1. Log in to the **Palette Management Console** as a Tenant Administrator. - - -2. Go to the **Clusters** option from the sidebar to list all the clusters belonging to all the users under that Tenant Administrator. - -
- -# Additional Resources - -The next section provides details for setting workload clusters in various environments. - -
diff --git a/content/docs/04-clusters/01-public-cloud.md b/content/docs/04-clusters/01-public-cloud.md deleted file mode 100644 index 9922b6e2fa..0000000000 --- a/content/docs/04-clusters/01-public-cloud.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Public Cloud Clusters" -metaTitle: "Creating new clusters on Spectro Cloud" -metaDescription: "The methods of creating clusters for a speedy deployment on any CSP" -icon: "server" -hideToC: false -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import Tooltip from 'shared/components/ui/Tooltip'; - - -# Public Cloud Clusters - -Palette supports provisioning new workload clusters on public clouds using cloud providers' infrastructure. It achieves this by provisioning new virtual machines (VMs) for the control plane and worker pools and uses their managed Kubernetes services such as EKS, AKS, GKE, TKE, and more. - -Workload clusters are instantiated from cloud specific [_Cluster Profiles_](/cluster-profiles) templates that are created with pre-configured layers and components required for cluster deployments. You can use one of the cluster profiles provided out-of-the-box or create a new one. - -# Get Started - -Learn how to deploy a cluster to a public cloud provider by using Palette. Check out the [Deploy a Cluster with Palette](/clusters/public-cloud/deploy-k8s-cluster) tutorial to get started. - - -# Supported Environments - -The following pages provide detailed instructions for setting up new workload clusters in the various environments. - -* [Amazon Web Services](/clusters/public-cloud/aws) -* [Azure](/clusters/public-cloud/azure) -* [Cox Edge](/clusters/public-cloud/cox-edge) -* [Google Cloud](/clusters/public-cloud/gcp) -* [Tencent](/clusters/public-cloud/tke) - diff --git a/content/docs/04-clusters/01-public-cloud/01-aws.md b/content/docs/04-clusters/01-public-cloud/01-aws.md deleted file mode 100644 index acbbb4e1af..0000000000 --- a/content/docs/04-clusters/01-public-cloud/01-aws.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "AWS" -metaTitle: "Creating new clusters on Palette" -metaDescription: "The methods of creating clusters for a speedy deployment on any CSP" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - -# Overview - -Palette supports integration with [Amazon Web Services](https://aws.amazon.com/). You can deploy and manage [Host Clusters](/glossary-all#hostcluster) in AWS. To get started check out the [Register and Manage AWS Accounts](/clusters/public-cloud/aws/add-aws-accounts). - - - -# Get Started - -Learn how to deploy a cluster to AWS by using Palette. Check out the [Deploy a Cluster with Palette](/clusters/public-cloud/deploy-k8s-cluster) tutorial to get started. - - - -
- -# Resources - -To learn more about Palette and AWS clusters, check out the following resources: - -- [Register and Manage AWS Accounts](/clusters/public-cloud/aws/add-aws-accounts) - - -- [Create and Manage AWS IaaS Cluster](/clusters/public-cloud/aws/create-cluster) - - -- [Create and Manage AWS EKS Cluster](/clusters/public-cloud/aws/eks) - - -- [Cluster Management Day Two Operations](/clusters/cluster-management) - - -- [AWS Architecture](/clusters/public-cloud/aws/architecture) - - -- [Required IAM Policies](/clusters/public-cloud/aws/required-iam-policies) - - -- [Cluster Removal](/clusters/cluster-management/remove-clusters) diff --git a/content/docs/04-clusters/01-public-cloud/01-aws/02-architecture.md b/content/docs/04-clusters/01-public-cloud/01-aws/02-architecture.md deleted file mode 100644 index 9cca41e6d8..0000000000 --- a/content/docs/04-clusters/01-public-cloud/01-aws/02-architecture.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: "Architecture" -metaTitle: "AWS Architecture with Palette" -metaDescription: "Learn about Palette and the architecture used to support Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# AWS IaaS Architecture - - -The following are some architectural highlights of the Amazon Web Services (AWS) clusters that Palette provisions: - - -- Kubernetes nodes can be distributed across multiple availability zones (AZs) to achieve high availability (HA). For each of the AZs that you select, a public subnet and a private subnet is created. - - -- All the control plane nodes and worker nodes are created within the private subnets, so there is no direct public access available. - - -- A Network Address Translation (NAT) Gateway is created in the public subnet of each AZ, to allow nodes in the private subnet to be able to go out to the internet or call other AWS services. - - -- An Internet Gateway (IG) is created for each Virtual Private Cloud (VPC), to allow Secure Shell Protocol (SSH) access to the bastion node for debugging purposes. SSH into Kubernetes nodes is only available through the bastion node. A bastion node helps to provide access to the Amazon Elastic Compute Cloud (EC2) instances. This is because the EC2 instances are created in a private subnet and the bastion node operates as a secure, single point of entry into the infrastructure. The bastion node can be accessed via SSH or Remote Desktop (RDP). - - -- The Kubernetes API Server endpoint is accessible through an Elastic Load Balancing (ELB), which load balances across all the control plane nodes. - -![A diagram of AWS architecture](/clusters_aws_architecture_aws_cluster_architecture.png) - - -# AWS EKS Architecture - -Palette also supports deploying and managing AWS Elastic Kubernetes Service (EKS) clusters. Review the architectural highlights pertaining to EKS when managed by Palette. - -- Cluster resources such as Virtual Machines (VMs) can be provisioned into an existing infrastructure (gateways, VPCs, subnets etc.) as part of static provisioning as well as new dedicated infrastructure as part of dynamic provisioning. - - -- Palette supports the usage of [EKS Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate-profile.html). - - -- Spot instance support - - ![eks_cluster_architecture.png](/clusters_aws_create-and-manage-aws-eks-cluster_architecture.png) - - -## Disable OIDC Associate Provider -You can disable the OIDC associate provider if the service provider restricts the cluster deployment with the OIDC associate provider in the enabled state. Customize the EKS Kubernetes pack YAML values with the following option: - -
- - - -```yaml - disableAssociateOIDCProvider: true -``` - -# AWS Instance Type and Pod Capacity -Choose the instance type and the number of instances to be launched by calculating the number of expected pods. You should also calculate the number of pods scheduled per node for an instance type. Improperly sized nodes can cause cluster creation to fail due to resource unavailability. - -The following section describes calculating the pod capacity for AWS instance types. This calculation will help you select the proper instance type and the number of desired workers in a worker pool. We recommend for most workloads choosing an instance that can support at least 30 pods. - -## Formula for Pod Calculation -Number of pods = N * (M-1) + 2 - -Where: -* **N** is the number of Elastic Network Interfaces (ENI) of the instance type (Maximum network interfaces). -* **M** is the number of IP addresses of a single ENI (Private IPv4 addresses per interface/IPv6 addresses per interface). -* Values for **N** and **M** for each instance type can be referred from [this document](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI). - -## Example Calculation: -* For instance type = t3.medium -* For values of N = 3, and M = 6 (values derived from AWS [document](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) ) -* N * (M-1) + 2 = 3(6-1)+2 =17 pods/instances -* In this example, at least two (2) t3.medium instances are needed to reach the minimum of 30 pods threshold. - -When setting the desired size of the worker pool, make the choice as per pod requirement. In the example provided, two instances of t3.medium are needed to satisfy the resource requirement of an EKS cluster. - - -## Spot Instances - -By default, worker pools are configured to use on-demand instances. However, to take advantage of discounted spot instance pricing you can specify spot instances when creating a cluster. The **On-Spot** option can be selected in the node configuration page during cluster creation. This option allows you to specify a maximum bid price for the nodes as a percentage of the on-demand price. Palette tracks the current price for spot instances and launches nodes, when the spot price falls in the specified range. - -# Tags - -You can assign tags to clusters deployed to AWS. Tags can help you with user access control management and more granularly restrict access to various Palette resources, including clusters. Check out the [Resource Filters](/clusters/cluster-management/cluster-tag-filter/create-add-filter) documentation page to learn more about using tags to restrict resource access. - -The custom tags you create are assigned to the clusters during the creation process. Tags follow the key-value-pair format: `department: finance`. -In addition to the custom tags provided by you, Palette-provisioned AWS resources will receive the following default tags. - -| Key | Value | Description | -|------------------------------------------------------------|---------------|--------------------------------------------------------------------------------| -| `Name` | [clusterName-resource] | The name of the AWS resource. Use the format [cluster name] - [resource type name]. Example: `mycluste2r-vpc` | -| `kubernetes.io/cluster/[clusterName]`. | owned | This tag only applies to cluster nodes. Used for Palette internal purposes to help manage the lifecycle of the cluster. | -| `sigs.k8s.io/cluster-api-provider-aws/cluster/[clusterName]` | owned | Used for Palette internal purposes to help manage the lifecycle of the cluster. | -| `sigs.k8s.io/cluster-api-provider-aws/role` | common | Used for Palette internal purposes to help manage the lifecycle of the cluster. | -| `spectro__ownerUid` | [uniqueId] | The Palette tenant's id. Example: `1356fc37ab1aac03a5d66b4c`. | - - - -# Automatic Network Discovery - - -You must add a set of specific tags to enable automatic subnet discovery by Palette for integration with the AWS load balancer service. Add the following tags Virtual Private Network (VPC) public subnets. Replace the value `yourClusterName` with your cluster's name. - -
- -- `kubernetes.io/role/elb = 1` -- `sigs.k8s.io/cluster-api-provider-aws/role = public` -- `kubernetes.io/cluster/[yourClusterName] = shared` -- `sigs.k8s.io/cluster-api-provider-aws/cluster/[yourClusterName] = owned` \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/01-aws/05-add-aws-accounts.md b/content/docs/04-clusters/01-public-cloud/01-aws/05-add-aws-accounts.md deleted file mode 100644 index da1e8d59fb..0000000000 --- a/content/docs/04-clusters/01-public-cloud/01-aws/05-add-aws-accounts.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: "Register and Manage AWS Accounts" -metaTitle: "Add an AWS Account to Palette" -metaDescription: "Learn how to add an AWS account to Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Add AWS Account - -Palette supports integration with AWS Cloud Accounts. This also includes support for [AWS GovCloud (US)](https://aws.amazon.com/govcloud-us/?whats-new-ess.sort-by=item.additionalFields.postDateTime&whats-new-ess.sort-order=desc) accounts. This section explains how to create an AWS cloud account in Palette. You can use any of the following authentication methods to register your cloud account. - -- [AWS credentials](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html) - -- [Security Token Service (STS)](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) - -
- - - - - - - - - -To add an AWS cloud account using static access credentials follow these steps: - -## Prerequisites - -- An AWS account -- Sufficient access to create an IAM role or IAM user. -- Palette IAM policies. Please review the [Required IAM Policies](/clusters/public-cloud/aws/required-iam-policies) section for guidance. - - -## Add AWS Account to Palette -1. Create an IAM Role or IAM User for Palette. Use the following resources if you need additional help. - - [IAM Role creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html). - - [IAM User creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). - - -2. In the AWS console, assign the Palette required IAM policies to the role or the IAM user that Palette will use. - - -3. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. - - -4. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add AWS Account**. - - -5. In the cloud account creation wizard provide the following information: - * **Account Name:** Custom name for the cloud account. - - * **Description:** Optional description for the cloud account. - * **Partition:** Choose **AWS** from the drop-down menu. - - * **Credentials:** - * AWS Access key - * AWS Secret access key - - -6. Click the **Validate** button to validate the credentials. - -7. Once the credentials are validated, the **Add IAM Policies** toggle displays. Toggle **Add IAM Policies** on. - -8. A drop-down menu displays a lists of available AWS IAM policies in your AWS account. Select any desired IAM policies you want to assign to Palette IAM role or IAM user. - - -## Validate - -You can validate the account is available in Palette by reviewing the list of cloud accounts. To review the list of cloud accounts navigate to the left **Main Menu**. Click on **Tenant Settings**. Next, click on **Cloud Accounts**. Your newly added AWS cloud account is listed under the AWS sections. - - - - - -To add an AWS cloud account using STS credentials follow the steps below: - -## Prerequisites - -- An AWS account -- Sufficient access to create an IAM role or IAM user. -- Palette IAM policies. Please review the [Required IAM Policies](/clusters/public-cloud/aws/required-iam-policies) section for guidance. - - -## Add AWS Account to Palette - -1. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. - - -2. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add AWS Account**. - - -3. In the cloud account creation wizard give the following information: - * **Account Name** - * **Description** - * Select **STS** authentication for validation: - - -4. You will be provided with information on the right hand-side of the wizard. You will need this information to create an IAM Role for Palette. The following table lists out the information provided by the wizard after your selects **STS**. - - |**Parameter**|**Description**| - |---------|---------------| - |**Trusted Entity Type**| Another AWS account| - |**Account ID**|Copy the Account ID displayed on the UI| - |**Require External ID**| Enable| - |**External ID**|Copy the External ID displayed on the UI| - |**Permissions Policy**|Search and select the 4 policies added in step #2| - |**Role Name**|SpectroCloudRole| - -5. In the AWS console, create a new IAM role for Palette. Use the following resources if you need additional help. - - [IAM Role creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html). - - [IAM User creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). - - -6. In the AWS console, assign the [Palette required IAM policies](/clusters/public-cloud/aws/required-iam-policies) to the role that Palette will use. - - -7. In the AWS console, browse to the **Role Details** page and copy the Amazon Resource Name (ARN) for the role. - - -8. In Palette, paste the role ARN into the **ARN** input box. - - -9. Click the **Validate** button to validate the credentials. - - -## Validate - -You can validate the account is available in Palette by reviewing the list of cloud accounts. To review the list of cloud accounts navigate to the left **Main Menu**. Click on **Tenant Settings**. Next, click on **Cloud Accounts**. Your newly added AWS cloud account is listed under the AWS sections. - - - - - - - - - - - -Palette supports integration with [AWS GovCloud (US)](https://aws.amazon.com/govcloud-us/?whats-new-ess.sort-by=item.additionalFields.postDateTime&whats-new-ess.sort-order=desc). Using Palette you can deploy Kubernetes clusters to your AWS GovCloud account. To get started with AWS GovCloud and Palette, use the following steps. -
- - - - - -## Prerequisites - -- An AWS account -- Sufficient access to create an IAM role or IAM user. -- Palette IAM policies. Please review the [Required IAM Policies](/clusters/public-cloud/aws/required-iam-policies) section for guidance. - -## Add AWS GovCloud Account to Palette - -1. Create an IAM Role or IAM User for Palette. Use the following resources if you need additional help. - - [IAM Role creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html). - - [IAM User creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). - - -2. In the AWS console, assign the Palette required IAM policies to the role or the IAM user that Palette will use. - - -3. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. - - -4. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add AWS Account**. - - -5. In the cloud account creation wizard provide the following information: - * **Account Name:** Custom name for the cloud account. - - * **Description:** Optional description for the cloud account. - * **Partition:** Choose **AWS GovCloud US** from the drop-down menu. - - * **Credentials:** - * AWS Access key - * AWS Secret access key - - -6. Click the **Validate** button to validate the credentials. - -7. Once the credentials are validated, the **Add IAM Policies** toggle displays. Toggle **Add IAM Policies** on. - -8. A drop-down menu displays a lists of available AWS IAM policies in your AWS account. Select any desired IAM policies you want to assign to Palette IAM role or IAM user. - - -## Validate - -You can validate the account is available in Palette by reviewing the list of cloud accounts. To review the list of cloud accounts navigate to the left **Main Menu**. Click on **Tenant Settings**. Next, click **Cloud Accounts**. Your newly added AWS cloud account is listed under the AWS sections. - - - - - -To add an AWS GovCloud cloud account using STS credentials follow the steps below: - -## Prerequisites - -- An AWS account -- Sufficient access to create an IAM role or IAM user. -- Palette IAM policies. Please review the [Required IAM Policies](/clusters/public-cloud/aws/required-iam-policies) section for guidance. - - -## Add AWS GovCloud Account to Palette - -1. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. - - -2. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add AWS Account**. - - -3. In the cloud account creation wizard give the following information: - * **Account Name** - * **Description** - * Select **STS** authentication for validation: - - -4. You will be provided with information on the right hand-side of the wizard. You will need this information to create an IAM Role for Palette. The following table lists out the information provided by the wizard after you selects **STS**. - - |**Parameter**|**Description**| - |---------|---------------| - |**Trusted Entity Type**| Another AWS account| - |**Account ID**|Copy the Account ID displayed on the UI| - |**Require External ID**| Enable| - |**External ID**|Copy the External ID displayed on the UI| - |**Permissions Policy**|Search and select the 4 policies added in step #2| - |**Role Name**|SpectroCloudRole| - -5. In the AWS console, create a new IAM role for Palette. Use the following resources if you need additional help. - - [IAM Role creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html). - - [IAM User creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). - - -6. In the AWS console, assign the [Palette required IAM policies](/clusters/public-cloud/aws/required-iam-policies) to the role that Palette will use. - - -7. In the AWS console, browse to the **Role Details** page and copy the Amazon Resource Name (ARN) for the role. - - -8. In Palette, paste the role arn into the **ARN** input box. - - -9. Click the **Validate** button to validate the credentials. - - -## Validate - -You can validate the account is available in Palette by reviewing the list of cloud accounts. To review the list of cloud accounts navigate to the left **Main Menu**. Click on **Tenant Settings**. Next, click on **Cloud Accounts**. Your newly added AWS cloud account is listed under the AWS sections. - - - - -
- -
- -# Next Steps - -Now that you have added an AWS account to Palette, you can start deploying Kubernetes clusters to your AWS account. To learn how to get started with deploying Kubernetes clusters to AWS, check out the [Create and Manage AWS IaaS Cluster](/clusters/public-cloud/aws/create-cluster) guide or the [Create and Manage AWS EKS Cluster](/clusters/public-cloud/aws/eks) guide. \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/01-aws/08-create-cluster.md b/content/docs/04-clusters/01-public-cloud/01-aws/08-create-cluster.md deleted file mode 100644 index 7af3f17dcb..0000000000 --- a/content/docs/04-clusters/01-public-cloud/01-aws/08-create-cluster.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: "Create and Manage AWS IaaS Cluster" -metaTitle: "Create and Manage AWS Cluster" -metaDescription: "Learn how to add and manage a cluster deployed to AWS." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Create and Manage AWS IaaS Cluster - -Palette supports creating and managing Kubernetes clusters deployed to an AWS account. This section guides you on how to create a Kubernetes cluster in AWS that is managed by Palette. - -# Prerequisites - -The following prerequisites must be met before deploying a cluster to AWS: - -- Access to an AWS cloud account - - -- You have added an AWS account in Palette. Review the [Add AWS Account](/clusters/public-cloud/aws/add-aws-accounts) for guidance. - - -- An infrastructure cluster profile. Review the [Create Cluster Profiles](/cluster-profiles/task-define-profile) for guidance. - - -- An [EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the target region. - - -- Palette creates compute, network, and storage resources in AWS during the provisioning of Kubernetes clusters. Ensure there is sufficient capacity in the preferred AWS region for the creation of the following resources: - - vCPU - - VPC - - Elastic IP - - Internet Gateway - - Elastic Load Balancers - - NAT Gateway - - - - - -The following tags should be added to the virtual private network (VPC) public subnets to enable automatic subnet discovery for integration with AWS load balancer service. Replace the value `yourClusterName` with your cluster's name. - -
- -- `kubernetes.io/role/elb = 1` -- `sigs.k8s.io/cluster-api-provider-aws/role = public` -- `kubernetes.io/cluster/[yourClusterName] = shared` -- `sigs.k8s.io/cluster-api-provider-aws/cluster/[yourClusterName] = owned` - -
- - -# Deploy an AWS Cluster - -Use the following steps to provision a new AWS cluster: - -1. Log in to [Palette](https://console.spectrocloud.com) and ensure you are in the correct project scope. - - -2. Navigate to the left **Main Menu** and click on **Clusters** - - -3. Click on **Add New Cluster** - - -4. You will receive a prompt asking you if you want to deploy a new cluster or import an existing cluster. Click on **Deploy New Cluster** - - -5. Select **AWS** and click on **Start AWS Configuration** - - -6. Populate the wizard page with the following information: name, description, tags and select AWS account. Tags on a cluster are propagated to the VMs deployed to the computing environments. Click on **Next** after you have filled out all the required information. - - -7. Select a cluster profile. Click on **Next**. - - -8. Review and customize pack parameters, as desired. By default, parameters for all packs are set with values, defined in the cluster profile. - - -9. Provide the AWS cloud account and placement information. - -
- - |**Parameter**| **Description**| - |-------------|---------------| - |**Cloud Account** | Select the desired cloud account. AWS cloud accounts with AWS credentials need to be pre-configured in project settings.| - |**Region** | Choose the preferred AWS region where you would like to provision clusters.| - |**SSH Key Pair Name** | Choose the desired SSH Key pair. SSH key pairs need to be pre-configured on AWS for the desired regions. The selected key is inserted into the provisioned VMs.| - |**Static Placement** | Check the **Static Placement** box if you want to deploy resources into pre-existing VPCs and subnets. Review the [Static Placement](/clusters/public-cloud/aws/create-cluster#staticplacement) table below to learn more about the required input fields.| - | **Private API Server LB**| Enable to deploy the cluster load balancer in a private subnet. This feature requires Palette to have direct network connectivity with the private subnet or a [Private Cluster Gateway](/clusters/data-center/maas/install-manage-maas-pcg) deployed in the environment.| - -
- - #### Static Placement - - |Parameter|Description| - |---|---| - |**VPCID**: Select the Virtual Private Cloud (VPC) ID network from the **drop-down Menu**.| - |**Control plane subnet**: Select the control plane network from the **drop-down Menu**.| - |**Worker Network**: Select the worker network from the **drop-down Menu**. | - - - -10. Configure the master and worker node pools. A master and a worker node pool are configured by default. This is the section where you can specify the availability zones (AZ), instance types, [instance cost type](/clusters/public-cloud/aws/architecture#spotinstances), disk size, and the number of nodes. Click on **Next** after you have completed configuring the node pool. - -
- - - - You can add new worker pools if you need to customize certain worker nodes to run specialized workloads. As an example, the default worker pool may be configured with the m3.large instance types for general-purpose workloads, and another worker pool with instance type g2.2xlarge can be configured to run GPU workloads. - - - - - - - - -12. An optional taint label can be applied to a node pool during the cluster creation. For an existing cluster, the taint label can be edited, review the [Node Pool](/clusters/cluster-management/node-pool) management page to learn more. Toggle the **Taint** button to create a label. - - -13. Enable or disable node pool taints. If tainting is enabled, then you need to provide values for the following parameters: - - |**Parameter**| **Description**| - |-------------|---------------| - |**Key** |Custom key for the taint.| - |**Value** | Custom value for the taint key.| - | **Effect** | Make the choice of effect from the drop-down menu. Review the effect table bellow for more details. | - - #### Effect Table - - |**Parameter**| **Description**| - |-------------|---------------| - | **NoSchedule**| A pod that cannot tolerate the node taint and should not be scheduled to the node. - | **PreferNoSchedule**| The system will avoid placing a non-tolerant pod to the tainted node but is not guaranteed. - | **NoExecute**| New pods will not be scheduled on the node, and existing pods on the node if any on the node will be evicted they do not tolerate the taint. | - - -14. If you checked the **Static Placement** box in the **Cluster config** page, you can specify additional AWS [security groups](https://docs.aws.amazon.com/vpc/latest/userguide/security-groups.html) to apply to the worker group nodes. Use the **Additional Security Groups (Optional) drop-down Menu** to select additional security groups. - - -15. Click on **Next**. - - -16. The settings page is where you can configure the patching schedule, security scans, backup settings, and set up Role Based Access Control (RBAC). Review the cluster settings and make changes if needed. Click on **Validate**. - -17. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Provisioning IaaS clusters can take 15 - 30 minutes depending on the cluster profile and the node pool configuration. - -The cluster details page of the cluster contains the status and details of the deployment. Use this page to track the deployment progress. - - -# Validate - -You can validate that your cluster is up and available by reviewing the cluster details page. - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. The **Clusters** page contains a list of the available clusters Palette manages. Click on the row for the cluster you wish to review its details page. - - - -4. From the cluster details page, verify the **Cluster Status** field displays **Running**. - - -# Next Steps - -Now that you have a Kubernetes cluster deployed, you can start developing and deploying applications to your clusters. We recommend you review the day two responsibilities and become familiar with the cluster management tasks. Check out the [Manage Clusters](/clusters/cluster-management) documentation to learn more about day two responsibilities. \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/01-aws/09-eks.md b/content/docs/04-clusters/01-public-cloud/01-aws/09-eks.md deleted file mode 100644 index 09b7d5da78..0000000000 --- a/content/docs/04-clusters/01-public-cloud/01-aws/09-eks.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: "Create and Manage AWS EKS Cluster" -metaTitle: "Creating new AWS EKS clusters on Palette" -metaDescription: "Learn how to deploy and manage AWS EKS clusters with Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - - -# Create and Manage AWS EKS Cluster - -Palette supports creating and managing AWS Elastic Kubernetes Service (EKS) clusters deployed to an AWS account. This section guides you on how to create an AWS EKS cluster in AWS that is managed by Palette. - -# Prerequisites - -The following prerequisites must be met before deploying a cluster to AWS: - -- Access to an AWS cloud account -- Palette integration with AWS account. Review the [Add AWS Account](/clusters/public-cloud/aws/add-aws-accounts) for guidance. -- An infrastructure cluster profile for AWS EKS. Review the [Create Cluster Profiles](/cluster-profiles/task-define-profile) for guidance. -- An [EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the target region. -- Palette creates compute, network, and storage resources in AWS during the provisioning of Kubernetes clusters. Ensure there is sufficient capacity in the preferred AWS region for the creation of the following resources: - - vCPU - - VPC - - Elastic IP - - Internet Gateway - - Elastic Load Balancers - - NAT Gateway - - - - - -The following tags should be added to the virtual private network (VPC) public subnets to enable automatic subnet discovery for integration with AWS load balancer service. Replace the value `yourClusterName` with your cluster's name. - -
- -- `kubernetes.io/role/elb = 1` -- `sigs.k8s.io/cluster-api-provider-aws/role = public` -- `kubernetes.io/cluster/[yourClusterName] = shared` -- `sigs.k8s.io/cluster-api-provider-aws/cluster/[yourClusterName] = owned` - -
- -# Deploy an AWS Cluster - -Use the following steps to provision a new AWS EKS cluster: - -1. Ensure you are in the correct project scope. - - -2. Navigate to the left **Main Menu** and click on **Clusters** - - -3. Click on **Add New Cluster** - - -4. You will receive a prompt asking you if you want to deploy a new cluster or import an existing cluster. Click on **Deploy New Cluster** - - -5. Select **AWS** and click on **Start AWS Configuration** - - -6. Populate the wizard page with the following information: name, description, tags and AWS account. Tags on a cluster are propagated to the VMs deployed to the target environments. Click on **Next** after you have filled out all the required information. - -7. Selected **Managed Kubernetes** and click on your cluster profile that supports AWS EKS. Click on **Next**. - - -8. Review and customize pack parameters, as desired. By default, parameters for all packs are set with values, defined in the cluster profile. Click on **Next**. - - -9. Provide the AWS cloud account and placement information. - - |**Parameter**| **Description**| - |-------------|---------------| - |**Cloud Account** | Select the desired cloud account. AWS cloud accounts with AWS credentials need to be pre-configured in project settings.| - |**Static Placement** | By default, Palette uses dynamic placement, wherein a new VPC with a public and private subnet is created to place cluster resources for every cluster.
These resources are fully managed by Palette and deleted, when the corresponding cluster is deleted. Turn on the **Static Placement** option if it's desired to place resources into preexisting VPCs and subnets.| - |**Region** | Choose the preferred AWS region where you would like the clusters to be provisioned.| - |**SSH Key Pair Name** | Choose the desired SSH Key pair. SSH key pairs need to be pre-configured on AWS for the desired regions. The selected key is inserted into the VMs provisioned.| - |**Cluster Endpoint Access**| Select Private, Public or Private & Public, in order to control communication with the Kubernetes API endpoint. For more information, refer to the [Amazon EKS cluster endpoint access control](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) reference guide. If you set the cluster endpoint to Public, specify `0.0.0.0/0` in the Public Access CIDR field to open it to all possible IP addresses. Otherwise, Palette will not open it up entirely. | - |**Public Access CIDR** |This setting controls which IP address CIDR range can access the cluster. To fully allow unrestricted network access, enter `0.0.0.0/0` in the field. For more information, refer to the [Amazon EKS cluster endpoint access control](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) reference guide.| - |**Enable Encryption**|The user can enable secret encryption by toggling **Enable Encryption**. Provide the provider KMS key ARN to complete the wizard. Review [EKS Cluster Encryption](/clusters/public-cloud/aws/eks/#eksclustersecretsencryption) for more details.| - |**Worker Pool Update**|Optionally enable the option to update the worker pool in parallel.| - - -10. Make the choice of updating the worker pool in parallel, if required. Click on **Next**. - - -11. Configure the master and worker node pools. A single master and a worker node pool are configured by default. This is the section where you can specify the availability zones (AZ), instance types, [instance cost type](/clusters/public-cloud/aws/architecture#spotinstances), disk size, and the number of nodes. Use the following tables to better understand the available input options. - - |**Parameter**| **Description**| - |-------------|----------------| - |**Name** | A descriptive name for the node pool.| - |**Size** | Make your choice of minimum, maximum and desired sizes for the worker pool. The size of the worker pool will scale between the minimum and maximum size under varying workload conditions. Review the [AWS Instance Type and Pod Capacity](/clusters/public-cloud/aws/architecture#awsinstancetypeandpodcapacity) documentation for help in determining the proper instance type and size. | - |[Taints](/clusters/cluster-management/taints#overviewontaints): |Optionally enable node affinity optionally to attracts pods to a set of nodes| - |[Labels](/clusters/cluster-management/taints#overviewonlabels): |Optionally enable labels to constrain a pod to only run on a particular set of nodes| - |**Instance Type** | Select the AWS instance type to be used for all nodes in the node pool.| - - * Cloud Configuration settings: - - |**Parameter**| **Description**| - |-------------|----------------| - |**Instance Option**:| Choose between on-demand or spot instances| - |**Instance Type**:| Choose an instance type | - |**Availability Zones**:|Select at least one availability zone within the VPC| - |**Disk Size**|Make the choice of disk size as per requirement| - - * You can create one or more Fargate profiles for the EKS cluster to use. - - |**Parameter**| **Description**| - |-------------|---------------| - |**Name** |Provide a name for the Fargate profile.| - |**Subnets** |Pods running on Fargate Profiles are not assigned public IP addresses, so only private subnets (with no direct route to an Internet Gateway) are accepted for this parameter. For dynamic provisioning, this input is not required and subnets are automatically selected.| - |**Selectors** |Define pod selector by providing a target namespace and optionally labels. Pods with matching namespace and app labels are scheduled to run on dynamically provisioned compute nodes.
You can have up to five selectors in a Fargate profile and a pod only needs to match one selector to run using the Fargate profile.| - - - -You can add new worker pools if you need to customize certain worker nodes to run specialized workloads. As an example, the default worker pool may be configured with the m3.large instance types for general-purpose workloads, and another worker pool with instance type g2.2xlarge can be configured to run GPU workloads. - - - -12. An optional taint label can be applied to a node pool during the cluster creation. For a an existing cluster, the taint label can be edited, review the [Node Pool](/clusters/cluster-management/node-pool) management page to learn more. Toggle the **Taint** button to create a label. - - -13. Enable or disable node pool taints. If tainting is enabled then you need provide values for the following parameters: - - |**Parameter**| **Description**| - |-------------|---------------| - |**Key** |Custom key for the taint.| - |**Value** | Custom value for the taint key.| - | **Effect** | Make the choice of effect from the drop-down menu. Review the effect table bellow for more details. | - - #### Effect Table - - |**Parameter**| **Description**| - |-------------|---------------| - | **NoSchedule**| A pod that cannot tolerate the node taint and should not be scheduled to the node. - | **PreferNoSchedule**| The system will avoid placing a non-tolerant pod to the tainted node but is not guaranteed. - | **NoExecute**| New pods will not be scheduled on the node, and existing pods on the node if any on the node will be evicted they do not tolerate the taint. | - -14. Click on **Next**. - -15. The settings page is where you can configure patching schedule, security scans, backup settings, setup role based access control (RBAC), and enable [Palette Virtual Clusters](/devx/palette-virtual-clusters). Review the settings and make changes if needed. Click on **Validate**. - -16. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Be aware that provisioning an AWS EKS clusters can take several minutes. - -The cluster details page of the cluster contains the status and details of the deployment. Use this page to track the deployment progress. - - -# Validate - -You can validate your cluster is up and running by reviewing the cluster details page. Navigate to the left **Main Menu** and click on **Clusters**. The **Clusters** page contains a list of all available clusters managed by Palette. Click on the row for the cluster you wish to review its details page. Ensure the **Cluster Status** field contains the value **Running**. - - -# EKS Cluster Secrets Encryption - -Palette encourages using AWS Key Management Service (KMS) to provide envelope encryption of Kubernetes secrets stored in Amazon Elastic Kubernetes Service (EKS) clusters. This encryption is -a defense-in-depth security strategy to protect sensitive data such as passwords, docker registry credentials, and TLS keys stored as [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/). - -## Prerequisites - -* KMS key created in the AWS account. -* KMS key is of the type symmetric. -* KMS key policy permits the following actions; encrypt and decrypt. - -## Configure KMS - -The IAM User or IAM role that Palette is using must have the following IAM permissions. - -```json hideClipboard -kms:CreateGrant, -kms:ListAliases, -kms:ListKeys, -kms:DescribeKeys -``` -Ensure the IAM role or IAM user can perform the required IAM permissions on the KMS key that will be used for EKS. -You can enable secret encryption during the EKS cluster creation process by toggling the encryption button providing the Amazon Resource Name (ARN) of the encryption key. The encryption option is available on the cluster creation wizard's **Cluster Config** page. diff --git a/content/docs/04-clusters/01-public-cloud/01-aws/10-required-iam-policies.md b/content/docs/04-clusters/01-public-cloud/01-aws/10-required-iam-policies.md deleted file mode 100644 index abc8b73045..0000000000 --- a/content/docs/04-clusters/01-public-cloud/01-aws/10-required-iam-policies.md +++ /dev/null @@ -1,868 +0,0 @@ ---- -title: "Required IAM Policies" -metaTitle: "Required IAM Policies or Palette" -metaDescription: "A list of required IAM policies that Palette requires." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Required IAM Policies - -Palette requires proper Amazon Web Services (AWS) permissions to operate and perform actions on your behalf. -The following policies include all the permissions needed for cluster provisioning with Palette. -
- -* **PaletteControllersPolicy** - - -* **PaletteControlPlanePolicy** - - -* **PaletteNodesPolicy** - - -* **PaletteDeploymentPolicy** - -Additional IAM policies may be required depending on the use case. For example, AWS Elastic Kubernetes Service (EKS) requires the **PaletteControllersEKSPolicy**. Check out the [Controllers EKS Policy](/clusters/public-cloud/aws/required-iam-policies#controllersekspolicy) section to review the IAM policy. - -
- - - -You can attach a maximum of ten managed policies to an IAM User or role. Exceeding this limit will result in cluster deployment failures. If you find yourself in a scenario where you are exceeding the limit, consider combining policies into a custom-managed policy. -You can learn more about AWS IAM limits in the [IAM Quotas](https://docs.aws.amazon.com/us_en/IAM/latest/UserGuide/reference_iam-quotas.html) reference guide. - - - - - - - - -**Last Update**: April 20, 2023 - -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "iam:DeleteOpenIDConnectProvider", - "iam:GetOpenIDConnectProvider", - "iam:ListOpenIDConnectProviders", - "iam:TagOpenIDConnectProvider", - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeInstanceRefreshes", - "ec2:AllocateAddress", - "ec2:AssociateRouteTable", - "ec2:AttachInternetGateway", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateInternetGateway", - "ec2:CreateLaunchTemplate", - "ec2:CreateLaunchTemplateVersion", - "ec2:CreateNatGateway", - "ec2:CreateRoute", - "ec2:CreateRouteTable", - "ec2:CreateSecurityGroup", - "ec2:CreateSubnet", - "ec2:CreateTags", - "ec2:CreateVpc", - "ec2:DeleteInternetGateway", - "ec2:DeleteLaunchTemplate", - "ec2:DeleteLaunchTemplateVersions", - "ec2:DeleteNatGateway", - "ec2:DeleteRouteTable", - "ec2:DeleteSecurityGroup", - "ec2:DeleteSubnet", - "ec2:DeleteTags", - "ec2:DeleteVpc", - "ec2:DescribeAccountAttributes", - "ec2:DescribeAddresses", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeImages", - "ec2:DescribeInstances", - "ec2:DescribeInternetGateways", - "ec2:DescribeKeyPairs", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeNatGateways", - "ec2:DescribeNetworkInterfaceAttribute", - "ec2:DescribeNetworkInterfaces", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:DescribeVpcAttribute", - "ec2:DescribeVpcs", - "ec2:DetachInternetGateway", - "ec2:DisassociateAddress", - "ec2:DisassociateRouteTable", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyNetworkInterfaceAttribute", - "ec2:ModifySubnetAttribute", - "ec2:ModifyVpcAttribute", - "ec2:ReleaseAddress", - "ec2:ReplaceRoute", - "ec2:RevokeSecurityGroupIngress", - "ec2:RunInstances", - "ec2:TerminateInstances", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeTags", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:RemoveTags", - "iam:CreateOpenIDConnectProvider", - "tag:GetResources" - ], - "Resource": [ - "*" - ], - "Effect": "Allow" - }, - { - "Action": [ - "autoscaling:CreateAutoScalingGroup", - "autoscaling:UpdateAutoScalingGroup", - "autoscaling:CreateOrUpdateTags", - "autoscaling:StartInstanceRefresh", - "autoscaling:DeleteAutoScalingGroup", - "autoscaling:DeleteTags" - ], - "Resource": [ - "arn:*:autoscaling:*:*:autoScalingGroup:*:autoScalingGroupName/*" - ], - "Effect": "Allow" - }, - { - "Condition": { - "StringLike": { - "iam:AWSServiceName": "autoscaling.amazonaws.com" - } - }, - "Action": [ - "iam:CreateServiceLinkedRole" - ], - "Resource": [ - "arn:*:iam::*:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling" - ], - "Effect": "Allow" - }, - { - "Condition": { - "StringLike": { - "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" - } - }, - "Action": [ - "iam:CreateServiceLinkedRole" - ], - "Resource": [ - "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing" - ], - "Effect": "Allow" - }, - { - "Condition": { - "StringLike": { - "iam:AWSServiceName": "spot.amazonaws.com" - } - }, - "Action": [ - "iam:CreateServiceLinkedRole" - ], - "Resource": [ - "arn:*:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot" - ], - "Effect": "Allow" - }, - { - "Action": [ - "iam:PassRole" - ], - "Resource": [ - "arn:*:iam::*:role/*.cluster-api-provider-aws.sigs.k8s.io" - ], - "Effect": "Allow" - }, - { - "Action": [ - "secretsmanager:CreateSecret", - "secretsmanager:DeleteSecret", - "secretsmanager:TagResource" - ], - "Resource": [ - "arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/*" - ], - "Effect": "Allow" - }, - { - "Action": [ - "s3:DeleteObject", - "s3:PutBucketOwnershipControls", - "s3:PutBucketPolicy", - "s3:PutBucketPublicAccessBlock", - "s3:PutObjectAcl", - "s3:PutObject" - ], - "Resource": [ - "arn:*:s3:::*" - ], - "Effect": "Allow" - } - ] -} -``` - - - - - -**Last Update**: April 20, 2023 - -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeImages", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ], - "Effect": "Allow" - } - ] -} -``` - - - - - -**Last Update**: May 2, 2021 - -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": [ - "*" - ], - "Effect": "Allow" - }, - { - "Action": [ - "secretsmanager:DeleteSecret", - "secretsmanager:GetSecretValue" - ], - "Resource": [ - "arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/*" - ], - "Effect": "Allow" - }, - { - "Action": [ - "ssm:UpdateInstanceInformation", - "ssmmessages:CreateControlChannel", - "ssmmessages:CreateDataChannel", - "ssmmessages:OpenControlChannel", - "ssmmessages:OpenDataChannel", - "s3:GetEncryptionConfiguration" - ], - "Resource": [ - "*" - ], - "Effect": "Allow" - } - ] -} -``` - - - - -**Last Update**: April 20, 2023 - -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "cloudformation:CreateStack", - "cloudformation:DescribeStacks", - "cloudformation:UpdateStack", - "ec2:CreateSnapshot", - "ec2:DeleteSnapshot", - "ec2:DescribeSnapshots", - "ec2:DescribeTags", - "ec2:DescribeVolumesModifications", - "ec2:DescribeKeyPairs", - "iam:AttachGroupPolicy", - "iam:CreatePolicy", - "iam:CreatePolicyVersion", - "iam:DeletePolicy", - "iam:DeletePolicyVersion", - "iam:DetachGroupPolicy", - "iam:GetGroup", - "iam:GetInstanceProfile", - "iam:GetPolicy", - "iam:GetUser", - "iam:ListPolicies", - "iam:ListPolicyVersions", - "pricing:GetProducts", - "sts:AssumeRole", - "sts:GetServiceBearerToken", - "iam:AddRoleToInstanceProfile", - "iam:AddUserToGroup", - "iam:CreateGroup", - "iam:CreateInstanceProfile", - "iam:CreateUser", - "iam:DeleteGroup", - "iam:DeleteInstanceProfile", - "iam:RemoveRoleFromInstanceProfile", - "iam:RemoveUserFromGroup" - ], - "Resource": "*" - } - ] -} -``` - - - - - - -## Controllers EKS Policy - -If you plan to deploy host clusters to AWS EKS, make sure to attach the **PaletteControllersEKSPolicy**. - -**Last Update**: April 20, 2023 - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "ssm:GetParameter" - ], - "Resource": [ - "arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/*" - ], - "Effect": "Allow" - }, - { - "Condition": { - "StringLike": { - "iam:AWSServiceName": "eks.amazonaws.com" - } - }, - "Action": [ - "iam:CreateServiceLinkedRole" - ], - "Resource": [ - "arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS" - ], - "Effect": "Allow" - }, - { - "Condition": { - "StringLike": { - "iam:AWSServiceName": "eks-nodegroup.amazonaws.com" - } - }, - "Action": [ - "iam:CreateServiceLinkedRole" - ], - "Resource": [ - "arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup" - ], - "Effect": "Allow" - }, - { - "Condition": { - "StringLike": { - "iam:AWSServiceName": "eks-fargate.amazonaws.com" - } - }, - "Action": [ - "iam:CreateServiceLinkedRole" - ], - "Resource": [ - "arn:*:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate" - ], - "Effect": "Allow" - }, - { - "Action": [ - "iam:AddClientIDToOpenIDConnectProvider", - "iam:CreateOpenIDConnectProvider", - "iam:DeleteOpenIDConnectProvider", - "iam:ListOpenIDConnectProviders", - "iam:UpdateOpenIDConnectProviderThumbprint" - ], - "Resource": [ - "*" - ], - "Effect": "Allow" - }, - { - "Action": [ - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:DetachRolePolicy", - "iam:DeleteRole", - "iam:CreateRole", - "iam:TagRole", - "iam:AttachRolePolicy" - ], - "Resource": [ - "arn:*:iam::*:role/*" - ], - "Effect": "Allow" - }, - { - "Action": [ - "iam:GetPolicy" - ], - "Resource": [ - "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" - ], - "Effect": "Allow" - }, - { - "Action": [ - "eks:DescribeCluster", - "eks:ListClusters", - "eks:CreateCluster", - "eks:TagResource", - "eks:UpdateClusterVersion", - "eks:DeleteCluster", - "eks:UpdateClusterConfig", - "eks:UntagResource", - "eks:UpdateNodegroupVersion", - "eks:DescribeNodegroup", - "eks:DeleteNodegroup", - "eks:UpdateNodegroupConfig", - "eks:CreateNodegroup", - "eks:AssociateEncryptionConfig", - "eks:ListIdentityProviderConfigs", - "eks:AssociateIdentityProviderConfig", - "eks:DescribeIdentityProviderConfig", - "eks:DisassociateIdentityProviderConfig" - ], - "Resource": [ - "arn:*:eks:*:*:cluster/*", - "arn:*:eks:*:*:nodegroup/*/*/*" - ], - "Effect": "Allow" - }, - { - "Action": [ - "ec2:AssociateVpcCidrBlock", - "ec2:DisassociateVpcCidrBlock", - "eks:ListAddons", - "eks:CreateAddon", - "eks:DescribeAddonVersions", - "eks:DescribeAddon", - "eks:DeleteAddon", - "eks:UpdateAddon", - "eks:TagResource", - "eks:DescribeFargateProfile", - "eks:CreateFargateProfile", - "eks:DeleteFargateProfile" - ], - "Resource": [ - "*" - ], - "Effect": "Allow" - }, - { - "Condition": { - "StringEquals": { - "iam:PassedToService": "eks.amazonaws.com" - } - }, - "Action": [ - "iam:PassRole" - ], - "Resource": [ - "*" - ], - "Effect": "Allow" - }, - { - "Condition": { - "ForAnyValue:StringLike": { - "kms:ResourceAliases": "alias/cluster-api-provider-aws-*" - } - }, - "Action": [ - "kms:CreateGrant", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ], - "Effect": "Allow" - } - ] -} -``` - -# Restricting Palette VPC Permissions - -You can choose to have Palette operate in a static or dynamic environment. You can configure Palette to perform an AWS cluster creation into an existing VPC. The following policy allows Palette to operate but restricts its access to the [Principle of Least Privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). - - -
-
- - - - -This is a policy for those who want to restrict Palette to a single VPC and not give Palette access to create or delete VPCs. - -
- - -### Minimum Dynamic Permissions - - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DescribeInstances", - "iam:RemoveRoleFromInstanceProfile", - "ec2:AttachInternetGateway", - "iam:AddRoleToInstanceProfile", - "ec2:DeleteRouteTable", - "ec2:AssociateRouteTable", - "ec2:DescribeInternetGateways", - "ec2:CreateRoute", - "ec2:CreateInternetGateway", - "ec2:DescribeVolumes", - "ec2:DescribeKeyPairs", - "ec2:DescribeNetworkAcls", - "ec2:DescribeRouteTables", - "ec2:CreateTags", - "ec2:CreateRouteTable", - "ec2:RunInstances", - "ec2:ModifyInstanceAttribute", - "ec2:TerminateInstances", - "ec2:DetachInternetGateway", - "ec2:DisassociateRouteTable", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeIpv6Pools", - "ec2:DeleteVpc", - "ec2:CreateSubnet", - "ec2:DescribeSubnets", - "iam:CreateInstanceProfile", - "ec2:DisassociateAddress", - "ec2:DescribeAddresses", - "ec2:CreateNatGateway", - "ec2:DescribeRegions", - "ec2:CreateVpc", - "ec2:DescribeDhcpOptions", - "ec2:DescribeVpcAttribute", - "ec2:DescribeNetworkInterfaces", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeNetworkInterfaceAttribute", - "ec2:CreateSecurityGroup", - "ec2:ModifyVpcAttribute", - "iam:DeleteInstanceProfile", - "ec2:ReleaseAddress", - "iam:GetInstanceProfile", - "ec2:DescribeTags", - "ec2:DeleteRoute", - "ec2:DescribeNatGateways", - "ec2:DescribeIpamPools", - "ec2:AllocateAddress", - "ec2:DescribeSecurityGroups", - "ec2:DescribeImages", - "ec2:DescribeVpcs", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:DescribeTags", - "secretsmanager:CreateSecret", - "secretsmanager:DeleteSecret", - "secretsmanager:TagResource", - "secretsmanager:GetSecretValue", - "autoscaling:StartInstanceRefresh", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "eks:DescribeCluster", - "eks:ListClusters", - "cloudformation:CreateStack", - "cloudformation:DescribeStacks", - "cloudformation:UpdateStack", - "ecr:GetAuthorizationToken", - "iam:PassRole", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DescribeTargetHealth", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage", - "ec2:DeleteInternetGateway", - "ec2:DeleteNatGateway", - "ec2:DeleteNetworkInterface", - "ec2:DeleteSecurityGroup", - "ec2:DeleteSubnet", - "ec2:DeleteTags", - "ssm:UpdateInstanceInformation", - "ssmmessages:CreateControlChannel", - "ssmmessages:CreateDataChannel", - "ssmmessages:OpenControlChannel", - "ssmmessages:OpenDataChannel", - "pricing:GetProducts", - "sts:AssumeRole", - "ec2:ReplaceRoute", - "ec2:ModifyNetworkInterfaceAttribute", - "ec2:AssociateAddress", - "tag:GetResources", - "ec2:ModifySubnetAttribute" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "iam:PassRole" - ], - "Resource": [ - "arn:*:iam::*:role/*.cluster-api-provider-aws.sigs.k8s.io" - ] - } - ] -} -``` - -
- - - - -This is a policy for those who want to restrict Palette to a single VPC and not give Palette access to create or delete VPCs. - -
- -### Minimum Static Permissions - - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:DescribeInstances", - "iam:RemoveRoleFromInstanceProfile", - "pricing:GetProducts", - "sts:AssumeRole", - "ec2:DescribeRegions", - "ec2:DescribeKeyPairs", - "ec2:DescribeVpcs", - "ec2:DescribeVpcAttribute", - "ec2:DescribeSubnets", - "cloudformation:DescribeStacks", - "cloudformation:CreateStack", - "cloudformation:UpdateStack", - "ec2:DescribeRouteTables", - "ec2:DescribeNatGateways", - "ec2:DescribeSecurityGroups", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DescribeTags", - "secretsmanager:CreateSecret", - "secretsmanager:TagResource", - "secretsmanager:GetSecretValue", - "secretsmanager:DeleteSecret", - "iam:GetInstanceProfile", - "iam:AddRoleToInstanceProfile", - "iam:CreateInstanceProfile", - "iam:DeleteInstanceProfile", - "ec2:RunInstances", - "ec2:ModifyInstanceAttribute", - "ec2:TerminateInstances", - "autoscaling:StartInstanceRefresh", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "ssm:UpdateInstanceInformation", - "ec2:DescribeAvailabilityZones", - "eks:DescribeCluster", - "eks:ListClusters", - "ec2:CreateSecurityGroup", - "ec2:DeleteSecurityGroup", - "ec2:RevokeSecurityGroupIngress", - "ssmmessages:CreateControlChannel", - "ssmmessages:CreateDataChannel", - "ssmmessages:OpenControlChannel", - "ssmmessages:OpenDataChannel", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DescribeTargetHealth", - "ec2:CreateTags", - "ec2:DescribeNetworkInterfaces", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "ec2:DisassociateAddress", - "ec2:DescribeAddresses", - "ec2:DescribeVolumes", - "ec2:DescribeImages", - "ec2:ModifyVpcAttribute", - "s3:GetEncryptionConfiguration", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:DescribeVolumesModifications", - "ec2:DetachVolume", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "ec2:DetachInternetGateway", - "ec2:DeleteNetworkInterface", - "tag:GetResources", - "ec2:ReleaseAddress", - "ec2:ModifyNetworkInterfaceAttribute", - "ec2:DescribeNetworkInterfaceAttribute", - "ec2:AllocateAddress", - "ec2:AssociateAddress" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "iam:PassRole" - ], - "Resource": [ - "arn:*:iam::*:role/*.cluster-api-provider-aws.sigs.k8s.io" - ] - } - ] -} -``` - -
- -
- - - - -The following are important points to be aware of. - -- Ensure that the role created contain all the policies defined above. - -- These IAM policies cannot be used as an inline policy, as it exceeds the 2048 non-whitespace character limit by AWS. - -- The following warning is expected and can be ignored: These policies define some actions, resources, or conditions that do not provide permissions. To grant access, policies must have an action that has an applicable resource or condition. - - - -# Global Role Additional Policies: - -There may be situations where additional node-level policies must be added to your deployment. For instance, when you create a host cluster with the **AWS EBS CSI** storage layer, ensure **AmazonEBSCSIDriverPolicy** is included. To add additional node-level policies, switch to the **Tenant Admin** project, and click on the **Tenant Settings** on the **Main Menu**. Click on **Cloud Accounts**. Add an account if one does not exists. After validation of the AWS credentials, ensure `Add IAM policies` are enabled. You can specify additional amazon resource names (ARN) to be attached. The attached policies will be included to all the clusters launched with this specific AWS cloud Account. - -
- -** AmazonEBSCSIDriverPolicy:** -```yml -roleName: "custom-ng-role" - roleAdditionalPolicies: - - "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" -``` diff --git a/content/docs/04-clusters/01-public-cloud/03-azure.md b/content/docs/04-clusters/01-public-cloud/03-azure.md deleted file mode 100644 index 9a28db9cc0..0000000000 --- a/content/docs/04-clusters/01-public-cloud/03-azure.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "Azure" -metaTitle: "Creating new clusters on Palette" -metaDescription: "The methods of creating clusters for a speedy deployment on any CSP" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -Palette supports integration with [Microsoft Azure](https://azure.microsoft.com/en-us/). You can deploy and manage [Host Clusters](/glossary-all#hostcluster) in Azure. To get started check out the [Register and Manage Azure Cloud Account](/clusters/public-cloud/azure/azure-cloud). - -
- -# Resources - -To learn more about Palette and Azure cluster creation and its capabilities check out the following resources: - -- [Register and Manage Azure Cloud Account](/clusters/public-cloud/azure/azure-cloud) - - -- [Create and Manage Azure Cluster](/clusters/public-cloud/azure/create-azure-cluster#deployinganazurecluster) - - -- [Deleting an Azure Cluster](/clusters/public-cloud/azure/create-azure-cluster#deletinganazurecluster) - - -- [Force Delete a Cluster](/clusters/public-cloud/azure/create-azure-cluster#forcedeleteacluster) - - -- [Cluster Management Day Two Operations](/clusters/cluster-management) - - -- [Azure Architecture](/clusters/public-cloud/azure/architecture) - - -- [Cluster Removal](/clusters/cluster-management/remove-clusters) - diff --git a/content/docs/04-clusters/01-public-cloud/03-azure/00-architecture.md b/content/docs/04-clusters/01-public-cloud/03-azure/00-architecture.md deleted file mode 100644 index 50dd348d2b..0000000000 --- a/content/docs/04-clusters/01-public-cloud/03-azure/00-architecture.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Architecture" -metaTitle: "Azure Architecture with Palette" -metaDescription: "Learn about how Palette integrates with Azure and the architecture that powers the integration" -hideToC: false -fullWidth: false ---- - -# Azure IaaS Architecture - -The following are some architectural highlights of Azure clusters deployed by Palette: - -- Azure cluster resources are placed within an existing Resource Group. - - -- Nodes are provisioned within a Virtual Network that is auto-created or preexisting, with one subnet for control plane nodes and one for worker nodes. These two subnets are secured with separate Network Security Groups. Both subnets can span across multiple availability zones (AZs). - - -- Worker nodes are distributed across multiple AZs. - - -- None of the control plane nodes and worker nodes have public IPs attached. The Kubernetes API Server endpoint is accessed through a public load balancer. - - -![An Azure IaaS architecture diagram](/clusters_azure_architecture_iaas-overview.png) - - - -# Azure AKS Architecture - -The integration between Palette and Azure AKS unlocks the following capabilities. - -- Palette platform enables containerized applications' effortless deployment and management with fully managed AKS. - - -- Palette provides the you with a with serverless Kubernetes experience, an integrated continuous integration and continuous delivery (CI/CD) experience, and enterprise-grade security and governance. - - -- Palette helps you unite the development and operations to a single platform. This unification helps you achieve faster builds, delivery, and scaling of applications with credence. - - -- The infrastructure has event-driven autoscaling and triggers that enable elastic provisioning for self-managed infrastructure. - - -- Leverage extensive authentication and authorization capabilities by using Azure Active Directory and dynamic rules enforcement, across multiple clusters with Azure Policy. - - -![An Azure AKS architecture diagram](/clusters_azure_architecture_aks-diagram.png) - - -# Azure Storage - -During an Azure cluster deployment, Palette creates an [Azure storage account](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-overview) and storage container. Palette copies the base virtual hard disk (VHD) image to the Palette default storage container in the default Palette storage account. The storage account Palette created has unrestricted access and has an auto-generated name. You can attach a custom storage account or storage containers to the Azure cluster. - -Before the Azure cluster creation process, you must have created custom storage accounts or containers. All custom storage accounts and containers will be listed in the **Cluster config** page during the cluster creation process. If you need help creating a custom storage account or container, check out the Azure [Create a Storage Account](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-create?tabs=azure-portal) guide or the Azure [Manage Containers](https://learn.microsoft.com/en-us/azure/storage/blobs/blob-containers-portal) guide. - - -The following section covers a few scenarios where you have the need to customize Azure storage in an Azure cluster. - -## Custom Name - -If you need a custom name for the storage resources, you must create the storage resource and attach it to the cluster. Palette, by default, creates a storage account and container with an auto-generated name. Specify a custom storage account or container with the custom name during the cluster creation process. You can attach a custom storage account, custom container, or both if needed. - - -## Restrict User Access - -To restrict the user access to the storage resource, apply custom policies, or limit the network access, then you need to attach a custom storage account or container to the Azure cluster that contains the desired security customization. - -## Network Access - -Clusters that use a Palette self-hosted [Private Cloud Gateway](/clusters/public-cloud/azure/gateways/) (PCG), should use a custom storage account and container that are restricted to the VNet that the PCG and cluster are located in. Ensure you disable public access and use private access for the Azure storage account. - - -# Tags - -You can assign tags to clusters deployed to Azure. Tags can help you with user access control management and more granularly restrict access to various Palette resources, including clusters. Check out the [Resource Filters](/clusters/cluster-management/cluster-tag-filter/create-add-filter) documentation page to learn more about using tags to restrict resource access. - -The custom tags you create are assigned to the clusters during the creation process. Tags follow the key-value pair format: `department:finance`. - - -### Reserved Tags - -The following tags are reserved for internal purposes and are not available for usage. Palette will return an error if you use any of the following tags. - -
- - -- `azure` - - -- `microsoft` - - -- `windows` diff --git a/content/docs/04-clusters/01-public-cloud/03-azure/01-azure-cloud.md b/content/docs/04-clusters/01-public-cloud/03-azure/01-azure-cloud.md deleted file mode 100644 index 8e32395bd0..0000000000 --- a/content/docs/04-clusters/01-public-cloud/03-azure/01-azure-cloud.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: "Register and Manage Azure Cloud Account" -metaTitle: "Register and manage an Azure cloud account in Palette" -metaDescription: "This guide will help you register and manage an Azure cloud account in Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Register an Azure Cloud Account - -Palette supports deployment and managing Kubernetes clusters in an Azure account. This section guides you on how to create a Kubernetes cluster in Azure that is managed by Palette. - -## Video Demonstration - -The following video demonstrates the integration of Azure Cloud with the Palette console. - -`video: title: "Azure-cloud-account": ./azure.mp4` - - -This guide will provide steps for how you can integrate your Azure account with Palette. - -## Prerequisites - -* A [Palette Account](https://console.spectrocloud.com/) - -* An active [Azure cloud account](https://portal.azure.com/) with sufficient resource limits and permissions to provision compute, network, and security resources in the desired regions. - -* An [Azure App](https://learn.microsoft.com/en-us/azure/app-service/overview) with valid credentials. - -## Enable Azure Cloud Account Registration to Palette - -To register an Azure cloud account in the Palette console - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the **Project Overview** drop-down and switch to the **Tenant Admin**. - - -3. Select **Tenant Settings** from the left **Main Menu**. - - -4. From the Tenant Settings go to **Cloud Accounts** and click on **+ Add Azure Account**. - - -5. The Azure cloud account wizard requires the following information: - -| **Basic Information** |Description| -|-------------------------|-----------| -|Account Name| A custom account name| -|Client ID| Unique client Id from Azure console| -|Tenant ID| Unique tenant Id from Azure console| -|[Client Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-an-azure-active-directory-application)| Azure secret for authentication| -|Tenant Name| An optional tenant name| -|[Disable Properties](/clusters/public-cloud/azure/azure-cloud#disableproperties)| To disable the import of Azure networking details.| -|Toggle **Connect Private Cloud Gateway**| An option to select the [Self-Hosted PCG](/clusters/public-cloud/azure/gateways#overview) already created from the drop-down menu to link it to the cloud account. | - - - - For existing cloud accounts go to **Edit** and toggle the **Connect Private Cloud Gateway** option to select the created Gateway from the drop-down menu. - - - -6. Click on the **Confirm** button to complete the wizard. - - -### Disable Properties - -When the above information is provided to the cloud account creation wizard, Azure networking details will be sent to Palette console, which you can disable. To disable network calls from the Palette console to the Azure account, you can click **Disable Properties**. - -For this, we first need to create an Azure Active Directory (AAD) Application which can be used with role-based access control. Follow the steps below to create a new AAD application, assign roles, and create the client secret: - - -1. Follow the steps described [here](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-an-azure-active-directory-application) to create a new Azure Active Directory application. Note down your ClientID and TenantID. - - -2. On creating the application, a minimum required [ContributorRole](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#contributor) needs to be assigned. To assign any kind of role, the user must have a minimum role of [UserAccessAdministrator](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#user-access-administrator). The role can be assigned by following the [Assign Role To Application](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#assign-a-role-to-the-application) link. - - -3. Follow the steps described in the [Create an Application Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-a-new-application-secret) section to create the client application secret. Store the Client Secret safely as it will not be available as plain text later. - -
- -# Validate - -To validate the Azure Cloud account creation in Palette console: - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the **Project Overview** drop-down and switch to the **Tenant Admin**. - - -3. Select **Tenant Settings** from the left **Main Menu**. - - -4. From the Tenant Settings go to **Cloud Accounts** - - -5. Below the label **Azure**, the available Azure cloud accounts are listed. - -
- -# Manage Azure Account -After an Azure cloud account has been registered with Palette, you can change the integration settings or remove the Azure account with **Edit and Delete** capabilities respectively. - -## Edit the Azure Cloud Account - -To edit the Azure Cloud account created in Palette console: - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the **Project Overview** drop-down and switch to the **Tenant Admin**. - - -3. Select **Tenant Settings** from the left **Main Menu**. - - -4. From the Tenant Settings go to **Cloud Accounts** - - -5. Towards the name of the cloud account to be delete, click the **3 dots** kebab menu and select **Edit**. - - -6. Make the required changes and click on the **Confirm** button to complete the wizard. - -
- -## Delete the Azure Cloud Account - -To delete the Azure Cloud account created in Palette console: - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the **Project Overview** drop-down and switch to the **Tenant Admin**. - - -3. Select **Tenant Settings** from the left **Main Menu**. - - -4. From the Tenant Settings go to **Cloud Accounts** - - -5. Towards the name of the cloud account to be delete, click the **3 dots** kebab menu and select **Edit**. - - -6. Towards the name of the cloud account to be delete, click the **3 dots** kebab menu and select **Delete**. - - diff --git a/content/docs/04-clusters/01-public-cloud/03-azure/02-create-azure-cluster.md b/content/docs/04-clusters/01-public-cloud/03-azure/02-create-azure-cluster.md deleted file mode 100644 index b0560afab3..0000000000 --- a/content/docs/04-clusters/01-public-cloud/03-azure/02-create-azure-cluster.md +++ /dev/null @@ -1,204 +0,0 @@ ---- -title: "Create and Manage Azure IaaS Cluster" -metaTitle: "Creating new Azure clusters on Palette" -metaDescription: "The methods of creating an Azure cluster in Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Deploy an Azure Cluster - -You can deploy Azure clusters in the Palette platform. This section highlights the prerequisites and deployment steps of Palette Azure clusters. - -Azure clusters can be created under the following scopes: - -* Tenant Admin - -* Project Scope - This is the recommended scope. - -Be aware that clusters that are created under the **Tenant Admin** scope are not visible under Project scope . - -# Prerequisites - -The following prerequisites must be met before deploying a workload cluster in Azure: - -1. You must have an active Azure cloud account with sufficient resource limits and permissions to provision compute, network, and security resources in the desired regions. - - -2. Register your Azure cloud account in Palette as described in the [Creating an Azure Cloud account](/clusters/public-cloud/azure/azure-cloud) section. - - -3. A [cluster profile created](/cluster-profiles/task-define-profile) for Azure cloud. - -## Video Demonstration - -`video: title: "azure-cluster-creation": azure.mp4` - -## Deploy an Azure Cluster with Palette - -The following steps need to be performed to provision a new Azure cluster: - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Click on **Clusters** from the left **Main Menu**. - - -2. In the cluster page click **+ Add New Cluster** button and select **create new cluster**. - - -3. Select **Azure** as the cloud type and click on **Start Azure Configuration** to input cluster information - - -4. Provide the basic cluster information such as **Name**, **Description** (optional), and **Tags** (optional) and select the [**Azure Cloud Account**](/clusters/public-cloud/azure#creatinganazurecloudaccount) from the drop-down menu. Azure cloud accounts with credentials must be pre-configured in project settings. Click on the **Next** button. - - -5. Select the **Cluster Profile** created for the Azure environment. The profile definition will be used as the cluster construction template. Click on **Next**. - - -6. Review and override pack parameters as desired. By default, parameters for all packs are set with values defined in the Cluster Profile. Click on **Next**. - - -7. Provide the Azure Cloud account placement information for cluster configuration. If you have custom storage accounts or storage container available, they will be eligible for attachment. To learn more about attaching custom storage to a cluster, check out the [Azure storage](/clusters/public-cloud/azure/architecture#azurestorage) page. - - - - -If the Azure account is [registered](/clusters/public-cloud/azure/azure-cloud) with the option **Disable Properties** enabled and the cluster configuration option **Static Placement** is enabled, then the network information from your Azure account will not be imported by Palette. You can manually input the information for the **Control Plane Subnet** and the **Worker Network**, but be aware that drop-down menu selections will be empty. - - - -
- -|**Parameter**| **Description**| -|-------------|---------------| -| **Subscription** | From the drop-down menu, select the subscription that will be used to access Azure Services.| -| **Region** | Select a region in Azure in which the cluster should be deployed.| -| **Resource Group** | Select the Azure resource group in which the cluster should be deployed.| -| **Storage Account** | Optionally provide the storage account. Review the [Azure Storage section](/clusters/public-cloud/azure/architecture#azurestorage) for a custom storage use cases. | -| **Storage Container**| Optionally provide the Azure storage container. Review the [Azure Storage section](/clusters/public-cloud/azure/architecture#azurestorage) for a custom storage use cases.| -| **SSH Key** | The public SSH key for connecting to the nodes. Review Microsoft's [supported SSH](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys#supported-ssh-key-formats) formats. | -| **Static Placement** | By default, Palette uses dynamic placement, in which a new VPC with a public and private subnet is created to place cluster resources for every cluster. These resources are fully managed by Palette and deleted when the corresponding cluster is deleted.
If you want to place resources into pre-existing VPCs and subnets, you can enable the **Static Placement** option. Review the [Static Placement](#static-placement-table) table below for available parameters for static placement.| -|**Update worker pools in parallel**| Check the box to concurrently update the worker pools.| -|**Private API Server LB**|This option applies when the cluster is deployed via the [Azure Private Endpoint](/clusters/public-cloud/azure/gateways). You can enable this option if your API Server must have private access. Review the [Private API Server LB](#private-api-server-lb-table) table below for more details.| -|**Update worker pools in parallel**|If you have multiple worker pools, select the check box to enable simultaneous upgrade of all the pools. The default is sequential upgrade.| - -#### Static Placement Table - -| **Parameter** | **Description** | -|------------------------|------------------------------------------------------------| -| **Network Resource Group** | The logical container for grouping related Azure resources | -| **Virtual Network** | Select the virtual network from the drop-down menu. | -| **CIDR Block** | Select the CIDR address from the drop-down menu. | -| **Control Plane Subnet** | Select the control plane network from the dropdown menu. | -| **Worker Network** | Select the worker network from the drop-down menu. | - - - -#### Private API Server LB Table - - -| **Parameter** | **Description**| -|----------------------|----------------------------------------------------------------------------------------------------------------------------------------| -| **Private DNS Zone** | Optionally select the DNS Zone from the drop-down menu. If you do not select a DNS Zone, one will be generated and assigned.| -| **IP Allocation Method** | Allocate an available IP from the private endpoint VNet. Review the [IP Allocation Method Table](#ip-allocation-method-table) below for more details.| - -##### IP Allocation Method Table - -| **Parameter** | **Description** | -|----------------------|----------------------------------------------------------------------------------------------------------------------------------------| -| **Dynamic** | Use Dynamic Host Configuration Protocol (DHCP) to dynamically allocates IP addresses from the available Virtual Network IP CIDR range.| -| **Static** | You can specify a static IP address from the available Virtual Network IP range.| - -When you have provided all the cluster configuration details to the wizard, click on **Next** and proceed to node configuration. - -
- -7. Configure the master and worker node pools. A master and a worker node pool are configured by default. To learn more about the configuration options, review the [Node Pool](/clusters/cluster-management/node-pool) documentation page. - - - -You can add new worker pools to customize certain worker nodes to run specialized workloads. For example, the default worker pool may be configured with the Standard_D2_v2 instance types for general-purpose workloads and another worker pool with instance type Standard_NC12s_v3 can be configured to run GPU workloads. - - - -
- - -8. The settings page is where you can configure patching schedule, security scans, backup settings, setup role based access control (RBAC), and enable [Palette Virtual Clusters](/devx/palette-virtual-clusters). Review the settings and make changes if needed. Click on **Validate**. - - -9. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Be aware that provisioning IaaS clusters can take several minutes. - - -The cluster details page of the cluster contains the status and details of the deployment. Use this page to track the deployment progress. - -# Validate - -You can validate your cluster is up and running by reviewing the cluster details page. Navigate to the left **Main Menu** and click on **Clusters**. The **Clusters** page contains a list of all available clusters managed by Palette. Click on the row for the cluster you wish to review its details page. Ensure the **Cluster Status** field contains the value **Running**. -# Deleting an Azure IaaS Cluster - -The deletion of an Azure IaaS cluster results in the removal of all instances and associated resources created for the cluster. To perform a cluster deletion, use the following steps. - - -1. Ensure you are in the correct project scope. - - -2. Navigate to the left **Main Menu** and click on **Clusters** - - -3. Click on the cluster that you want to remove. - - -4. Click on the **Settings** drop-down menu. - - -5. Click on **Delete Cluster** - - -6. Type in the name of the cluster and click on **OK** - -The cluster status is updated to **Deleting** while cluster resources are being deleted. Once all resources are successfully deleted, the cluster status is updated to **Deleted** and is removed from the list of clusters. - -## Force Delete a Cluster - -If a cluster is stuck in the **Deletion** state for a minimum of 15 minutes it becomes eligible for force deletion. You can force delete a cluster from the tenant and project admin scope. -To force delete a cluster follow the same steps outlined in [Deleting an Azure IaaS Cluster](#deleting-an-azure-iaas-cluster). However, after 15 minutes, a **Force Delete Cluster** option is available in the **Settings** drop-down menu. The **Settings** drop-down menu will provide you with an estimated time left before the force deletion becomes available.. - -
- - - - -A force delete can result in resources Palette provisioned being missed in the removal process. Verify there are no remaining Palette provisioned resources such as: - -- Virtual Network (VNet) -- Static Public IPs -- Virtual Network Interfaces -- Load Balancers -- VHD -- Managed Disks -- Virtual Network Gateway - -Failure in removing provisioned resources can result in unexpected costs. - - - -## Validate - -To validate the Azure cluster creation and deletion status - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Click on **Cluster** on the left **Main Menu** - - -4. Click on the check box **Deleted only** to view all the clusters deleted in the last 72 hours. diff --git a/content/docs/04-clusters/01-public-cloud/03-azure/04-aks.md b/content/docs/04-clusters/01-public-cloud/03-azure/04-aks.md deleted file mode 100644 index 8d8c891913..0000000000 --- a/content/docs/04-clusters/01-public-cloud/03-azure/04-aks.md +++ /dev/null @@ -1,315 +0,0 @@ ---- -title: "Create and Manage Azure AKS Cluster" -metaTitle: "Creating new clusters on Palette" -metaDescription: "The methods of creating clusters for a speedy deployment on any CSP" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Overview - -Palette supports creating and managing Kubernetes clusters deployed to an Azure subscription. This section guides you on how to create an IaaS Kubernetes cluster in Azure that is managed by Palette. - -Azure clusters can be created under the following scopes: - -* Tenant admin - -* Project Scope - This is the recommended scope. - -Be aware that clusters that are created under the **Tenant Admin** scope are not visible under Project scope . - - -# Prerequisites - -These prerequisites must be met before deploying an AKS workload cluster: - -1. You need an active Azure cloud account with sufficient resource limits and permissions to provision compute, network, and security resources in the desired regions. - - -2. You will need to have permissions to deploy clusters using the AKS service on Azure. - - -3. Register your Azure cloud account in Palette as described in the [Creating an Azure Cloud Account](#creatinganazurecloudaccount) section below. - - -4. You should have a cluster profile created in Palette for AKS. - - -5. Associate an SSH key pair to the cluster worker node. - - -
- -## Additional Prerequisites - -There are additional prerequisites if you want to set up Azure Active Directory integration for the AKS cluster: - - - 1. A Tenant Name must be provided as part of the Azure cloud account creation in Palette. - - - 2. For the Azure client used in the Azure cloud account, these API permissions have to be provided: - - | | | - | --------------- | ------------------------------------- | - | Microsoft Graph | Group.Read.All (Application Type) | - | Microsoft Graph | Directory.Read.All (Application Type) | - - 3. You can configure these permissions from the Azure cloud console under **App registrations** > **API permissions** for the specified application. - - - -Palette **also** enables the provisioning of private AKS clusters via a private cloud gateway (Self Hosted PCGs). The Self-Hosted PCG is an AKS cluster that needs to be launched manually and linked to an Azure cloud account in Palette Management Console. [Click here for more..](/clusters/public-cloud/azure/gateways#overview) - - - -# Create an Azure Cloud Account - -`video: title: "Azure-cloud-account": azure.mp4` - -To create an Azure cloud account, we need: - -A custom Account Name -* Client ID -* Tenant ID -* Client Secret -* Tenant Name (optional) -* Toggle `Connect Private Cloud Gateway` option and select the [Self-Hosted PCG](/clusters/public-cloud/azure/gateways#overview) already created from the drop-down menu to link it to the cloud account. - -**Note:** - -For existing cloud account go to `Edit` and toggle the `Connect Private Cloud Gateway` option to select the created gateway from the drop down menu. - -For Azure cloud account creation, we first need to create an Azure Active Directory (AAD) application that can be used with role-based access control. Follow the steps below to create a new AAD application, assign roles, and create the client secret: - -
- -1. Follow the steps described [here](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-an-azure-active-directory-application) to create a new Azure Active Directory application. Note down your ClientID and TenantID. - - -2. On creating the application, assign a minimum required [ContributorRole](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#contributor). To assign any type of role, the user must have a minimum role of [UserAccessAdministrator](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#user-access-administrator). Follow the [Assign Role To Application](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#assign-a-role-to-the-application) link learn more about roles. - - -3. Follow the steps described in the [Create an Application Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-a-new-application-secret) section to create the client application secret. Store the Client Secret safely as it will not be available as plain text later. - -# Deploy an AKS Cluster - -
- - -`video: title: "aks-cluster-creation": aks.mp4` - - -The following steps need to be performed to provision a new cluster: -
- - -1. If you already have a profile to use, go to the **Cluster** > **Add a New Cluster** > **Deploy New Cluster** and select an Azure cloud. If you do not have a profile to use, reference the [Creating a Cluster Profile](https://docs.spectrocloud.com/cluster-profiles/task-define-profile) page for steps on how to create one. - - -2. Fill the basic cluster profile information such as **Name**, **Description**, **Tags** and **Cloud Account**. - - -3. In the **Cloud Account** dropdown list, select the Azure Cloud account or create a new one. See the [Creating an Azure Cloud Account](#creatinganazurecloudaccount) section above. - - -4. Next, in the **Cluster profile** tab from the **Managed Kubernetes** list, pick **AKS**, and select the AKS cluster profile definition. - - -5. Review the **Parameters** for the selected cluster profile definitions. By default, parameters for all packs are set with values defined in the cluster profile. - - -6. Complete the **Cluster config** section with the information for each parameter listed below. - - | **Parameter** | **Description** | - | ------------------ | -------------------------------------------------------------------------------------------- | - | **Subscription** | Select the subscription which is to be used to access Azure Services. | - | **Region** | Select a region in Azure in where the cluster should be deployed. | - | **Resource Group** | Select the resource group in which the cluster should be deployed. | - | **SSH Key** | The public SSH key for connecting to the nodes. Review Microsoft's [supported SSH](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys#supported-ssh-key-formats) formats. | - | **Static Placement** | By default, Palette uses dynamic placement, wherein a new VPC with a public and private subnet is created to place cluster resources for every cluster. These resources are fully managed by Palette and deleted when the corresponding cluster is deleted.
Turn on the **Static Placement** option if it is desired to place resources into preexisting VPCs and subnets. If the user is making the selection of **Static Placement** of resources, the following placement information needs to be provided: - ||**Virtual Resource Group**: The logical container for grouping related Azure resources. - || **Virtual Network**: Select the virtual network from dropdown menu. - || **Control plane Subnet**: Select the control plane network from the dropdown menu. - || **Worker Network**: Select the worker network from the dropdown. - |**Update worker pools in parallel**| Check the box to concurrently update the worker pools.| - - - -If the Palette [cloud account](/clusters/public-cloud/azure#creatinganazurecloudaccount) is created with **Disable Properties** and the cluster option -**Static Placement** is enabled, the network information from your Azure account will not be imported to Palette. You can manually input the information for the **Control Plane Subnet** and the **Worker Network**. - - - -7. Click **Next** to configure the node pools. - - -
- -The [maximum number](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#maximum-pods-per-node) of pods per node in an AKS cluster is 250. If you don't specify maxPods when creating new node pools, then the default value of 30 is applied. You can edit this value from the Kubernetes configuration file at any time by editing the `maxPodPerNode` value. Refer to the snippet below: - -
- -``` -managedMachinePool: - maxPodPerNode: 30 -``` - - - -# Node Pools - -This section guides you to through configuring Node Pools. As you set up the cluster, the **Nodes config** section will allow you to customize node pools. AKS Clusters are comprised of System and User node pools, and all pool types can be configured to use the Autoscaler, which scales out pools horizontally based on per node workload counts. - -A complete AKS cluster contains the following: - -
- -1. As a mandatory primary **System Node Pool**, this pool will run the pods necessary to run a Kubernetes cluster, like the control plane and etcd. All system pools must have at least a single node for a development cluster; one (1) node is enough for high availability production clusters, and three (3) or more is recommended. - - -2. **Worker Node** pools consist of one (1) or more per workload requirements. Worker node pools can be sized to zero (0) nodes when not in use. - -
- -## Create and Remove Node Pools - -During cluster creation, you will default to a single pool. - -
- -1. To add additional pools, click **Add Node Pool**. - - -2. Provide any additional Kubernetes labels to assign to each node in the pool. This section is optional, and you can use a `key:value` structure, press your space bar to add additional labels, and click the **X** with your mouse to remove unwanted labels. - - -3. To remove a pool, click **Remove** across from the title for each pool. - -
- -## Create a System Node Pool - -1. Each cluster requires at least one (1) system node pool. To define a pool as a system pool, check the box labeled **System Node Pool**. -
- - -Identifying a Node Pool as a System Pool will deactivate taints, and the operating system options within the Cloud Configuration section, as you can not to taint or change their OS from Linux. See the AKS Documentation for more details on pool limitations. - - -
- -2. Provide a name in the **Node pool name** text box. When creating a node, it is good practice to include an identifying name that matches the node in Azure. - - -3. Add the **Desired size**. You can start with three for multiple nodes. - - -4. Include **Additional Labels**. This is optional. - - -5. In the **Azure Cloud Configuration** section, add the **Instance type**. The cost details are present for review. - - -6. Enter the **Managed Disk** information and its size. - - -7. If you are including additional or multiple nodes to make a node pool, click the **Add Worker Pool** button to create the next node. - - -## Configure Node Pools - -In all types of node pools, configure the following. - -
- -1. Provide a name in the **Node pool name** text box. When creating a node, it is good practice to include an identifying name. - - **Note:** Windows clusters have a name limitation of six (6) characters. - - -2. Provide how many nodes the pool will contain by adding the count to the box labeled **Number of nodes in the pool**. Configure each pool to use the autoscaler controller. There are more details on how to configure that below. - - -3. Alternative to a static node pool count, you can enable the autoscaler controller, click **Enable Autoscaler** to change to the **Minimum size** and **Maximum size** fields which will allow AKS to increase or decrease the size of the node pool based on workloads. The smallest size of a dynamic pool is zero (0), and the maximum is one thousand (1000); setting both to the same value is identical to using a static pool size. - - -4. Provide any additional Kubernetes labels to assign to each node in the pool. This section is optional; you can use a `key:value` structure. Press your space bar to add additional labels and click the **X** with your mouse to remove unwanted labels. - - -5. In the **Azure Cloud Configuration** section: - - - Provide instance details for all nodes in the pool with the **Instance type** dropdown. The cost details are present for review. - -
- - -New worker pools may be added if you want to customize specific worker nodes to run specialized workloads. As an example, the default worker pool may be configured with the Standard_D2_v2 instance types for general-purpose workloads, and another worker pool with the instance type Standard_NC12s_v3 can be configured to run GPU workloads. - - -
- - - Provide the disk type via the **Managed Disk** dropdown and the size in Gigabytes (GB) in the **Disk size** field. - - -A minimum allocation of two (2) CPU cores is required across all worker nodes. - -A minimum allocation of 4Gi of memory is required across all worker nodes. - - -
- - - When are done setting up all node pools, click **Next** to go to the **Settings** page to **Validate** and finish the cluster deployment wizard. - - **Note**: Keep an eye on the **Cluster Status** once you click **Finish Configuration** as it will start as *Provisioning*. Deploying an AKS cluster does take a considerable amount of time to complete, and the **Cluster Status** in Palette will say *Ready* when it is complete and ready to use. - -
- -# Configure an Azure Active Directory - - -The Azure Active Directory (AAD) could be enabled while creating and linking the Azure Cloud account for the Palette Platform, using a simple check box. Once the cloud account is created, you can create the Azure AKS cluster. The AAD-enabled AKS cluster will have its Admin *kubeconfig* file created and can be downloaded from our Palette UI as the 'Kubernetes config file'. You need to manually create the user's *kubeconfig* file to enable AAD completely. The following are the steps to create the custom user *kubeconfig* file: - -
- -1. Go to the Azure console to create the Groups in Azure AD to access the Kubernetes RBAC and Azure AD control access to cluster resources. - - -2. After you create the groups, create users in the Azure AD. - - -3. Create custom Kubernetes roles and role bindings for the created users and apply the roles and role bindings, using the Admin *kubeconfig* file. - -
- - -The above step can also be completed using Spectro RBAC pack available under the Authentication section of Add-on Packs. - - -
- -4. Once the roles and role bindings are created, these roles can be linked to the Groups created in Azure AD. - - -5. The users can now access the Azure clusters with the complete benefits of AAD. To get the user-specific *kubeconfig* file, please run the following command: - - - `az aks get-credentials --resource-group --name ` - -
- -# References: - -[Use Kubernetes RBAC with Azure AD integration](https://docs.microsoft.com/en-us/azure/aks/azure-ad-rbac?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Faks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) - -[Azure Kubernetes Service (AKS)](https://docs.microsoft.com/en-us/azure/aks/) - - -
diff --git a/content/docs/04-clusters/01-public-cloud/03-azure/20-gateways.md b/content/docs/04-clusters/01-public-cloud/03-azure/20-gateways.md deleted file mode 100644 index 0792d51466..0000000000 --- a/content/docs/04-clusters/01-public-cloud/03-azure/20-gateways.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -title: "Self Hosted PCG" -metaTitle: "Creating a Self Hosted PCG on Palette" -metaDescription: "The methods of creating Self Hosted PCG on Palette for secured cluster deployment" -icon: "" -hideToC: false -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import Tooltip from 'shared/components/ui/Tooltip'; - -# Overview -
-Palette enables the provisioning of private AKS clusters within Azure Virtual networks (VNet) for enhanced security by offloading the orchestration to a Private Cloud Gateway deployed within the same account as the private AKS clusters. This private cloud gateway (Self Hosted PCGs) is an AKS cluster that needs to be launched manually and linked to an Azure cloud account in Palette Management Console. The following sections discuss the prerequisites and detailed steps towards deploying Palette self-hosted PCG for Azure Cloud Accounts. Once the self-hosted PCG is created and linked with an Azure cloud account in Palette, any Azure clusters provisioned using that cloud account will be orchestrated via the self-hosted PCG, thereby enabling the provisioning of Private AKS clusters. - -
- - -# Prerequisites - -* An active [Azure cloud account](https://portal.azure.com/) with sufficient resource limits and permissions to provision compute, network, and security resources in the desired regions. - - -* The [Azure CLI v2.0.0+](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) - - - -## Create a Virtual Network in the Azure Console - -Log in to the [Azure portal](https://portal.azure.com/) and create a [Virtual Network](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-networks-overview) (VNet). Ensure the VNET contains the following network settings: -
- -1. Three subnets. Each of the subnets should have a minimum of **333 available - IPs**. (Note: 333 IP's are required if you want to use Azure Container Networking Interface (CNI) networking and - not necessary if you are using Kubenet networking) - - -2. The VNet should have the ** `Microsoft.Authorization/roleAssignments/write` ** action required to connect the virtual network to the Azure Kubernetes Cluster if the network configuration is `Azure CNI.` for the cluster. - - -3. The VNet needs to be linked with: - * Azure Kubernetes Cluster - * Azure Bastion Host (Jump Box Virtual Machine) to connect to the Azure target Kubernetes cluster securely. - - - - **Note**: A bastion host is only required if accessing a Kubernetes cluster that is located inside a private Azure Virtual Network (VNet) that only exposes private endpoints. If you have direct network access to the VNet then you do not require a bastion host. Alternatively, you can also deploy a bastion host and remove it later if no longer required. - - - - -## Create an Azure Kubernetes Target Cluster in the Azure Console - - -1. Log in to the **Azure Portal** and go to **Kubernetes services**, and click **Create a Kubernetes Cluster** to initiate the cluster. Creation. - - -2. Fill up the essential information to **Create Cluster Wizard** and take care of the following information specifically. - - -3. Primary Node Pool: - * Node Size: Select a VM instance with a minimum of 16 GiB RAM and four vCPUs. - * Node Count: 1 or 3 as per user requirement - - -4. The scale method to be selected is manual. - - -5. Networking: Two options available for networking configuration are **Kubenet** and **Azure CNI** -
- -#### Kubenet: - -If this option is selected, the user needs to ensure that the VNet used is the VNet created at the previous step. This selection of VNet is not possible via Azure User Interface and hence can be achieved [programmatically](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet) as below: - -
- -``` -SUBNET_ID=$(az network vnet subnet show --resource-group resource-group-name --vnet-name vnet-name --name subnet-name --query id -o tsv) -``` -
- -``` -az aks create \ - --resource-group resource-group-name \ - --name cluster-name \ - --enable-private-cluster \ - --node-count count-value \ - --network-plugin kubenet \ - --vnet-subnet-id $SUBNET_ID \ - --max-pods 110 -``` - - #### Azure CNI: - -6. Security: For network security enable **Private cluster**. - -In Azure CNI network configuration, select the static [virtual network created](/clusters/public-cloud/azure/gateways#createanazurevirtualnetworkintheazureconsole) from the drop-down menu. - -
- -## Establish external connectivity with the Target Azure Kubernetes Cluster - -To establish a connection to the Target Azure Kubernetes Cluster get connected to the Bastian Host (Jump-Box) and then get connected to the target Azure Kubernetes cluster to be imported to Palette Console as self hosted PCG. - -### Get Connected to Bastian Host (Jump-Box) - -To establish the external connectivity for the private Kubernetes cluster, launch an Azure Virtual Machine as a jump-box with an SSH key attached. -
- - -Port Prerequisite: -Add an inbound network security group rule with destination port 22. - -
- -1. Open the client terminal of your choice to execute: -
- - -2. Ensure you have read-only access to the private key. Chmod is only supported on Linux subsystems (e.g. WSL on Windows or Terminal on Mac). - -``` - chmod 400 .pem -``` - -3. Run the example command below to connect to your VM. -
- -``` - ssh -i azureuser@public-ip-address-of-bastion-jump-box -``` - - -### Get connected to the Target Azure Kubernetes cluster - -After getting connected to the Bastion host, establish a connection to the Target Azure Kubernetes cluster. Refer to the below sample instructions: -
- - ``` - az login - ``` -
- - ``` - az account set --subscription 8710ff2b-e468-434a-9a84-e522999f6b81 - ``` -
- - ``` - az aks get-credentials --resource-group resource-group-name --nametarget-cluster-name - ``` -## Deploy Palette Self Hosted PCG to Palette Console -
- -1. Login to Palette console as Tenant Admin and go to Tenant settings. - - -2. Go to Private Cloud Gateways and select + Add New Private Cloud Gateway. - - -3. From the available options, select the `Self Hosted Gateway.` - -4. In the create gateway wizard, - * Private cloud gateway name: Custom gateway name - * Cloud type: Select the cloud type as Azure for Azure self-hosted PCG. - -5.Install the Palette agent (also check for prerequisites and instructions on Palette UI) - -**Example:** -``` - kubectl apply -f endpoint/v1/pcg/12345678901234/services/jet/manifest -``` -``` - kubectl apply -n cluster-1234abcd -f https://endpoint/v1/pcg/12345678901234/services/ally/manifest -``` - -6. The self-hosted PCG will be provisioned and will start running in the Palette console. The healthy self-hosted PCG can be managed from the Palette UI page. The healthy self-hosted PCG can be linked to Azure Cloud Account (optionally) to enjoy the enhanced security benefits. We support the [PCG migration](/enterprise-version/enterprise-cluster-management#palettepcgmigration) for the public cloud self-hosted PCGs as well. - - - -Palette users can launch Azure clusters without a PCG also. To enjoy the additional benefits of this private cloud Self-hosted PCG, users need to attach it to the Palette Azure cloud account. - - - -## Attach the Self Hosted PCG to the Azure Cloud Account - -The self-hosted PCG can be attached to an existing Azure Palette cloud account or while creating a new Azure Palette cloud account. Refer to the [Azure Cloud Account](/clusters/public-cloud/azure#creatinganazurecloudaccount) creation. - -
-
diff --git a/content/docs/04-clusters/01-public-cloud/05-gcp.md b/content/docs/04-clusters/01-public-cloud/05-gcp.md deleted file mode 100644 index dd956ef195..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05-gcp.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "GCP" -metaTitle: "Creating new clusters on Spectro Cloud" -metaDescription: "The methods of creating clusters for a speedy deployment on any CSP" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - - -# Overview - -Palette supports integration with Google Cloud Platform (GCP). You can deploy and manage Host Clusters in GCP. To get started with GCP, start by adding your GCP account in Palette. Check out the [Register and Manage GCP Accounts](/clusters/public-cloud/gcp/add-gcp-accounts). - - -# Get Started - -Learn how to deploy a cluster to a GCP by using Palette. Check out the [Deploy a Cluster with Palette](/clusters/public-cloud/deploy-k8s-cluster) tutorial to get started. - -# Resources - -To learn more about Palette and GCP clusters, check out the following resources: - -- [Register and Manage GCP Accounts](/clusters/public-cloud/gcp/add-gcp-accounts) - - -- [Create and Manage GCP IaaS Cluster](/clusters/public-cloud/gcp/add-gcp-accounts) - - -- [Create and Manage GCP GKE Cluster](/clusters/public-cloud/gcp/create-gcp-gke-cluster) - - -- [Architecture](/clusters/public-cloud/gcp/architecture) - - -- [Required IAM Permissions](/clusters/public-cloud/gcp/required-permissions) - - -- [Cluster Removal](/clusters/cluster-management/remove-clusters) \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/05-gcp/05-architecture.md b/content/docs/04-clusters/01-public-cloud/05-gcp/05-architecture.md deleted file mode 100644 index cbfde76d03..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05-gcp/05-architecture.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "Architecture" -metaTitle: "GCP Architecture with Palette" -metaDescription: "Learn about the architecture used to support Google Cloud using Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette supports Google Cloud Platform (GCP) as one of its public cloud environments. Using Palette, you can effectively manage the entire lifecycle of any combination of new or existing, simple or complex, small or large Kubernetes environments in GCP. Palette gives IT teams complete control, visibility, and production-scale efficiencies to provide developers with highly curated Kubernetes stacks and tools with enterprise-grade security. - -The following are some highlights of Palette-provisioned GCP clusters. - -
- -- Control plane nodes and worker nodes are placed within a single private subnet that spans different availability zones within a region. - - -- A new Virtual Private Cloud (VPC) Network is created with all the network infrastructure components, such as Cloud NAT and a Cloud Router. In addition, firewall rules are created to protect all the API endpoints. - - -- The Kubernetes API server endpoint is exposed through a Global Load Balancer. Applications deployed into the cluster can use a Regional Load Balancer to expose internal Kubernetes services. - - -![gcp_cluster_architecture.png](/gcp_cluster_architecture.png) \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/05-gcp/10-add-gcp-accounts.md b/content/docs/04-clusters/01-public-cloud/05-gcp/10-add-gcp-accounts.md deleted file mode 100644 index e0120f6ab0..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05-gcp/10-add-gcp-accounts.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: "Register and Manage GCP Accounts" -metaTitle: "Add a GCP Account to Palette" -metaDescription: "Learn how to add a GCP account to Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Add GCP Account - - -Palette supports integration with Google Cloud Platform (GCP) accounts. This section explains how to create a GCP cloud account in Palette. - -# Prerequisites - -* You must have a GCP service account available for use with Palette. For detailed instructions on creating a service account, refer to [Creating and managing service accounts](https://cloud.google.com/iam/docs/creating-managing-service-accounts). - - - -* The service account must, at a minimum, have the following roles. - - - [Kubernetes Engine Admin](https://cloud.google.com/iam/docs/understanding-roles#kubernetes-engine-roles) - - - [Compute Admin](https://cloud.google.com/iam/docs/understanding-roles#compute.admin) - - - [Service Account User](https://cloud.google.com/iam/docs/understanding-roles#iam.serviceAccountUser) - - - [Storage Object Viewer](https://cloud.google.com/iam/docs/understanding-roles#storage.objectViewer) - -
- - - - Alternatively, you can create a custom role and assign Palette the required GCP permissions. Check out the [Required IAM Permission](/clusters/public-cloud/gcp/required-permissions) for a detailed list of all permissions. - - - - - - -* Ensure you have access to the JSON credential file for your service account. For additional guidance, refer to the [GCP Credentials](https://developers.google.com/workspace/guides/create-credentials) documentation. - -# Create Account - - -1. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. - - -2. Navigate to the left **Main Menu** and select **Tenant Settings**. - - -3. Select **Cloud Accounts** and click on **Add GCP Account**. - - -4. In the cloud account creation wizard, provide the following information: - * **Account Name:** Custom name for the cloud account. - - * **JSON Credentials:** The JSON credentials object. - -
- - - - You can use the **Upload** button to upload the JSON file you downloaded from the GCP console. - - - - -5. Click the **Validate** button to validate the credentials. - - -6. When the credentials are validated, click on **Confirm** to save your changes. - -# Validate - -You can validate the account is available in Palette by reviewing the list of cloud accounts. - -
- -1. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. - - -2. To review the list of cloud accounts, navigate to the left **Main Menu** and click on **Tenant Settings**. - - -3. Next, click on **Cloud Accounts**. Your newly added GCP account is listed under the GCP section. - - -# Next Steps - - -Now that you have added an AWS account to Palette, you deploy clusters to your GCP account. To learn how to get started with deploying Kubernetes clusters to GCP, check out the [Create and Manage GCP IaaS Cluster](/clusters/public-cloud/aws/create-cluster) guide or the [Create and Manage AWS GKE Cluster](/clusters/public-cloud/aws/eks) guide. \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/05-gcp/20-create-gcp-iaas-cluster.md b/content/docs/04-clusters/01-public-cloud/05-gcp/20-create-gcp-iaas-cluster.md deleted file mode 100644 index ce715385e6..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05-gcp/20-create-gcp-iaas-cluster.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: "Create and Manage GCP IaaS Cluster" -metaTitle: "Create and Manage GCP IaaS Cluster" -metaDescription: "Learn how to add and manage an IaaS cluster deployed to GCP." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Create and Manage GCP IaaS Cluster - - -Palette supports creating and managing Kubernetes clusters deployed to a Google Cloud Platform (GCP) account. This section guides you to create an IaaS Kubernetes cluster in GCP that Palette manages. - -# Prerequisites - -Ensure the following requirements are met before you attempt to deploy a cluster to GCP: - -- Access to a GCP cloud account - - -- You have added a GCP account in Palette. Review the [Register and Manage GCP Accounts](/clusters/public-cloud/gcp/add-gcp-accounts) for guidance. - - -- An infrastructure cluster profile for GCP. Review the [Create Cluster Profiles](/cluster-profiles/task-define-profile) for guidance. - - -- An SSH Key that is uploaded to Palette and available for usage. Refer to the [SSH Keys](/clusters/cluster-management/ssh-keys) guide to learn how to create an SSH key and upload the public key to Palette. - - -- Palette creates compute, network, and storage resources while provisioning Kubernetes clusters. Ensure there is sufficient capacity in the preferred GCP region to create the following resources: - - Virtual Private Cloud (VPC) Network - - Static External IP Address - - Network Interfaces - - Cloud NAT - - Cloud Load Balancing - - Persistent Disks - - Cloud Router - - -# Deploy a GCP Cluster - -1. Log in to [Palette](https://console.spectrocloud.com) and ensure you are in the correct project scope. - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Click on **Add New Cluster**. - - -4. A prompt displays to either deploy or import a new cluster. Click on **Deploy New Cluster**. - - -5. Select **GCP** and click on **Start GCP Configuration**. - - -6. Populate the wizard page with the cluster name, description, and tags. Tags assigned to a cluster are propagated to the VMs deployed to the computing environments. - - -7. Select a GCP account, and Click on **Next**. - - - -8. Select the **Infrastructure Provider** row and click on one of your GCP cluster profiles. Click on **Next**. - - - -9. Review and customize pack parameters as desired. By default, parameters for all packs are set with values defined in the cluster profile. Click on **Next** to continue. - - -10. Fill out the following parameters and click on **Next** when you are done. - -
- - |Parameter|Description| - |---|---| - |**Project**|The project to which the cluster belongs.| - |**Region**|Choose the desired GCP region to deploy the cluster.| - |**SSH Key**|Choose the desired SSH key. Refer to the [SSH Keys](/clusters/cluster-management/ssh-keys) guide to learn how to create an SSH key and upload the public key to Palette.| - |**Static Placement** | Check the **Static Placement** box to deploy resources into a pre-existing VPC. Review the [Static Placement](/clusters/public-cloud/gcp/create-gcp-iaas-cluster#staticplacement) table below to learn more about the required input fields.| - - #### Static Placement - - |Parameter|Description| - |---|---| - |**Virtual Network**: Select the virtual network from the **drop-down Menu**.| - |**Control plane subnet**: Select the control plane network from the **drop-down Menu**.| - |**Worker Network**: Select the worker network from the **drop-down Menu**. | - - - - -11. The Node configuration page is where you can specify the availability zones (AZ), instance types, disk size, and the number of nodes. Configure the master and worker node pools. A master and a worker node pool are configured by default. - -
- - - - You can add new worker pools to customize specific worker nodes to run specialized workloads. For example, the default worker pool may be configured with the c2.standard-4 instance types for general-purpose workloads. You can configure another worker pool with instance type g2-standard-4 to leverage GPU workloads. - - - - -12. An optional taint label can be applied to a node pool during the cluster creation. You can edit the taint label on existing clusters. Review the [Node Pool](/clusters/cluster-management/node-pool) management page to learn more. Toggle the **Taint** button to create a label. - - - -13. Enable or disable node pool taints. If tainting is enabled, then you need to provide values for the following parameters. - - |**Parameter**| **Description**| - |-------------|---------------| - |**Key** |Custom key for the taint.| - |**Value** | Custom value for the taint key.| - | **Effect** | Choose the preferred pod scheduling effect from the drop-down Menu. Review the [Effect Table](/clusters/public-cloud/gcp/create-gcp-iaas-cluster#effecttable) below for more details. | - - #### Effect Table - - |**Parameter**| **Description**| - |-------------|---------------| - | **NoSchedule**| A pod that cannot tolerate the node taint and should not be scheduled to the node. - | **PreferNoSchedule**| The system will avoid placing a non-tolerant pod on the tainted node but is not guaranteed. - | **NoExecute**| New pods will not be scheduled on the node, and existing pods on the node will be evicted if they do not tolerate the taint. | - -14. Click on **Next** after configuring the node pool. - - - -15. The settings page is where you can configure the patching schedule, security scans, backup settings, and set up Role Based Access Control (RBAC). Review the cluster settings and make changes if needed. Click on **Validate**. - - - -16. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Be aware that provisioning IaaS clusters can take approximately 15 - 30 min depending on the cluster profile and the node pool configuration. - -You can monitor cluster deployment progress on the cluster details page. - - -# Validate - -You can validate that your cluster is up and available by reviewing the cluster details page. - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. The **Clusters** page lists the available clusters that Palette manages. Select your cluster to review its details. - - - -4. From the cluster details page, verify the **Cluster Status** field displays **Running**. \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/05-gcp/30-create-gcp-gke-cluster.md b/content/docs/04-clusters/01-public-cloud/05-gcp/30-create-gcp-gke-cluster.md deleted file mode 100644 index e10947c372..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05-gcp/30-create-gcp-gke-cluster.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: "Create and Manage GCP GKE Cluster" -metaTitle: "Create and Manage GKE IaaS Cluster" -metaDescription: "Learn how to add and manage a GKE cluster deployed to GCP with Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Create and Manage GCP IaaS Cluster - - -Palette supports creating and managing Kubernetes clusters using Google Kubernetes Engine (GKE). This section guides you to create a Kubernetes cluster that is deployed to GKE and that Palette manages. - -# Prerequisites - -Ensure the following requirements are met before you attempt to deploy a cluster to GCP. - -
- -- Access to a GCP cloud account. - - -- You have added a GCP account in Palette. Review the [Register and Manage GCP Accounts](/clusters/public-cloud/gcp/add-gcp-accounts) for guidance. - - -- An infrastructure cluster profile for GKE. Review the [Create Cluster Profiles](/cluster-profiles/task-define-profile) for guidance. - - -- Palette creates compute, network, and storage resources while provisioning Kubernetes clusters. Ensure there is sufficient capacity in the preferred GCP region to create the following resources: - - Virtual Private Cloud (VPC) Network - - Static External IP Address - - Network Interfaces - - Cloud NAT - - Cloud Load Balancing - - Persistent Disks - - Cloud Router - - -# Deploy a GKE Cluster - -1. Log in to [Palette](https://console.spectrocloud.com) and ensure you are in the correct project scope. - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Click on **Add New Cluster**. - - -4. A prompt displays to either deploy or import a new cluster. Click on **Deploy New Cluster**. - - -5. Select **GCP** and click on **Start GCP Configuration**. - - -6. Populate the wizard page with the cluster name, description, and tags. Tags assigned to a cluster are propagated to the VMs deployed to the computing environments. - -7. Select a GCP account, and click on **Next**. - - - -8. Select the **Managed Kubernetes** row and select one of your GKE cluster profiles. Click on **Next**. - - - -9. Review and customize pack parameters as desired. By default, parameters for all packs are set with values defined in the cluster profile. Click on **Next** to continue. - - -10. Fill out the following parameters, and click on **Next** when you are done. - -
- - |Parameter|Description| - |---|---| - |**Project**|The project to which the cluster belongs.| - |**Region**|Choose the desired GCP region in which to deploy the cluster.| - - -11. The Node configuration page is where you can specify the availability zones (AZ), instance types, disk size, and the number of nodes. Configure the worker node pool. - -
- - - - You can add new worker pools to customize specific worker nodes to run specialized workloads. For example, the default worker pool may be configured with the c2.standard-4 instance types for general-purpose workloads. You can configure another worker pool with instance type g2-standard-4 to run GPU workloads. - - - - -12. An optional taint label can be applied to a node pool during the cluster creation. You can edit the taint label on existing clusters. Review the [Node Pool](/clusters/cluster-management/node-pool) management page to learn more. Toggle the **Taint** button to create a label. - - - -13. Enable or disable node pool taints. If tainting is enabled, then you need to provide values for the following parameters. - - |**Parameter**| **Description**| - |-------------|---------------| - |**Key** |Custom key for the taint.| - |**Value** | Custom value for the taint key.| - | **Effect** | Choose the preferred pod scheduling effect from the **drop-down Menu**. Review the [Effect Table](/clusters/public-cloud/gcp/create-gcp-iaas-cluster#effecttable) below for more details. | - - #### Effect Table - - |**Parameter**| **Description**| - |-------------|---------------| - | **NoSchedule**| A pod that cannot tolerate the node taint and should not be scheduled to the node. - | **PreferNoSchedule**| The system will avoid placing a non-tolerant pod to the tainted node but is not guaranteed. - | **NoExecute**| New pods will not be scheduled on the node, and existing pods on the node will be evicted if they do not tolerate the taint. | - -14. Click on **Next** after configuring the node pool. - - - -15. The **Settings** page is where you can configure the patching schedule, security scans, backup settings, and set up Role Based Access Control (RBAC). Review cluster settings and make changes if needed. Click on **Validate**. - - -16. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Be aware that provisioning GKE clusters can take 15 - 30 minutes depending on the cluster profile and the node pool configuration. - -You can monitor cluster deployment progress on the cluster details page. - - -# Validate - -
- -You can validate that your cluster is up and available by reviewing the cluster details page. - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - - -3. The **Clusters** page lists the available clusters that Palette manages. Select your cluster to view its details page. - - - -4. From the cluster details page, verify the **Cluster Status** field displays **Running**. \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/05-gcp/90-required-permissions.md b/content/docs/04-clusters/01-public-cloud/05-gcp/90-required-permissions.md deleted file mode 100644 index bfd5ba13ca..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05-gcp/90-required-permissions.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: "Required IAM Permissions" -metaTitle: "Required IAM Permissions" -metaDescription: "A list of required IAM permissions that Palette requires for GCP deployments." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -This table contains the required Google Cloud Platform (GCP) permissions to create a custom GCP role tailored for usage with Palette. When creating a custom role, ensure you include all the permissions listed below to prevent Palette from having issues when deploying a host cluster. - -| Permissions | Description | -|------------------------------------------|---------------------------------------------------------------| -| `compute.backendServices.create` | Create backend services | -| `compute.backendServices.delete` | Delete backend services | -| `compute.backendServices.get` | Get backend service information | -| `compute.backendServices.list` | List backend services | -| `compute.backendServices.update` | Update backend services | -| `compute.backendServices.use` | Use backend services | -| `compute.disks.create` | Create persistent disks | -| `compute.firewalls.create` | Create firewall rules | -| `compute.firewalls.delete` | Delete firewall rules | -| `compute.firewalls.get` | Get firewall rule information | -| `compute.firewalls.list` | List firewall rules | -| `compute.globalAddresses.create` | Create global addresses | -| `compute.globalAddresses.delete` | Delete global addresses | -| `compute.globalAddresses.get` | Get global address information | -| `compute.globalAddresses.list` | List global addresses | -| `compute.globalAddresses.use` | Use global addresses | -| `compute.globalForwardingRules.create` | Create global forwarding rules | -| `compute.globalForwardingRules.delete` | Delete global forwarding rules | -| `compute.globalForwardingRules.get` | Get global forwarding rule information | -| `compute.globalForwardingRules.list` | List global forwarding rules | -| `compute.healthChecks.create` | Create health checks | -| `compute.healthChecks.delete` | Delete health checks | -| `compute.healthChecks.get` | Get health check information | -| `compute.healthChecks.list` | List health checks | -| `compute.healthChecks.useReadOnly` | Use health checks in read-only mode | -| `compute.instanceGroups.create` | Create instance groups | -| `compute.instanceGroups.delete` | Delete instance groups | -| `compute.instanceGroups.get` | Get instance group information | -| `compute.instanceGroups.list` | List instance groups | -| `compute.instanceGroups.update` | Update instance groups | -| `compute.instanceGroups.use` | Use instance groups | -| `compute.instances.create` | Create instances | -| `compute.instances.delete` | Delete instances | -| `compute.instances.get` | Get instance information | -| `compute.instances.list` | List instances | -| `compute.instances.setLabels` | Set labels on instances | -| `compute.instances.setMetadata` | Set metadata on instances | -| `compute.instances.setServiceAccount` | Set service account on instances | -| `compute.instances.setTags` | Set tags on instances | -| `compute.instances.use` | Use instances | -| `compute.networks.create` | Create networks | -| `compute.networks.delete` | Delete networks | -| `compute.networks.get` | Get network information | -| `compute.networks.list` | List networks | -| `compute.networks.updatePolicy` | Update network policies | -| `compute.regions.get` | Get region information | -| `compute.regions.list` | List regions | -| `compute.routers.create` | Create routers | -| `compute.routers.delete` | Delete routers | -| `compute.routers.get` | Get router information | -| `compute.routes.delete` | Delete routes | -| `compute.routes.get` | Get route information | -| `compute.routes.list` | List routes | -| `resourcemanager.projects.get` | Get details of a specified Google Cloud project. | -| `resourcemanager.projects.list` | List all Google Cloud projects that the user has access to. | -| `storage.objects.get` | Get details of a specified object in Google Cloud Storage. | -| `storage.objects.list` | List all objects in a specified Google Cloud Storage bucket. | -| `iam.serviceAccounts.actAs` | Act as the service account specified, allowing access to its resources. | -| `iam.serviceAccounts.get` | Get details of a specified service account. | -| `iam.serviceAccounts.getAccessToken` | Get the Oauth2 access token for the service account. | -| `iam.serviceAccounts.list` | List all service accounts available to the user. | -| `serviceusage.quotas.get` | Get quota information for a specified Google Cloud service. | -| `serviceusage.services.get` | Get details of a specified Google Cloud service. | -| `serviceusage.services.list` | List all Google Cloud services available to the user. | -| `recommender.containerDiagnosisInsights.*` | Access insights about diagnosed issues with Google Kubernetes Engine containers. | -| `recommender.containerDiagnosisRecommendations.*` | Access recommendations for resolving diagnosed issues with Google Kubernetes Engine containers. | -| `recommender.locations.*` | Access details about locations in Google Cloud Recommender. | -| `recommender.networkAnalyzerGkeConnectivityInsights.*`| Access insights about network connectivity for Google Kubernetes Engine clusters. | -| `recommender.networkAnalyzerGkeIpAddressInsights.*` | Access insights about IP address usage for Google Kubernetes Engine clusters. | - diff --git a/content/docs/04-clusters/01-public-cloud/05.5-cox-edge.md b/content/docs/04-clusters/01-public-cloud/05.5-cox-edge.md deleted file mode 100644 index f64bffdde0..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05.5-cox-edge.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Cox Edge" -metaTitle: "Cox Edge with Palette" -metaDescription: "Palette supports deployment of workloads to Cox Edge." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -Palette enables deployment of workloads to Cox Edge, a last-mile edge cloud provider. Using the Cox Edge network allows you to deploy compute resources closer to the location of your application consumers, reducing latency and enhancing the user experience. - -Get started with Palette and Cox Edge by checking out the [Create and Manage Cox Edge IaaS Cluster](/clusters/public-cloud/cox-edge/create-cox-cluster). - - - -# Resources - -- [Register and Manage Cox Edge Accounts](/clusters/public-cloud/cox-edge/add-cox-edge-accounts) - -- [Create and Manage Cox IaaS Cluster](/clusters/public-cloud/cox-edge/create-cox-cluster) - -- [Required Network Rules](/clusters/public-cloud/cox-edge/network-rules) diff --git a/content/docs/04-clusters/01-public-cloud/05.5-cox-edge/10-add-cox-edge-accounts.md b/content/docs/04-clusters/01-public-cloud/05.5-cox-edge/10-add-cox-edge-accounts.md deleted file mode 100644 index 9c668d8e6e..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05.5-cox-edge/10-add-cox-edge-accounts.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: "Register and Manage Cox Edge Accounts" -metaTitle: "Add a Cox Edge Account to Palette" -metaDescription: "Learn how to add and manage a Cox Edge account in Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -Palette supports integration with Cox Edge accounts and account environments. This section explains how to create a Cox Edge account in Palette. - -# Add Cox Edge Account - -To add a Cox Edge account to Palette, use the following instructions. - -# Prerequisites - -- A [Spectro Cloud](https://console.spectrocloud.com) account. - -- A [Cox Edge](https://portal.coxedge.com/login) account. - -- Tenant admin access in Palette. - -- Your Cox Edge organization id. Ask your system administrator for this value or use the API endpoint `api/v2/organizations` to retrieve the organization id. - -``` -curl --silent "https://portal.coxedge.com/api/v2/organizations" \ - -H "MC-Api-Key: YourAPIkeyHere" | jq '.data[] | {id}' -``` - -``` -{ - "id": "268ce256-15ef-465f-bc0f-952fac3e7c1e" -} -``` - -# Enablement - -You can use the steps below or the interactive guide to help you add a Cox Edge account to Palette. Click on the first image link to navigate the destination site with the tutorial at right. - - - -1. Log in to the [Cox Edge](https://portal.coxedge.com/login) portal. - - -2. Navigate to the drop-down **User Menu** and click on **API Credentials**. - - -3. Select **Generate API key**. - - -4. Give the key a name and select **Generate**. - - -5. Copy the API key value to a secure location. You will use this value in the future. - - -6. Copy the API endpoint URL. The API endpoint is located above the table that lists all your API keys. - - -7. Click the **four-tile Home** button at the top and select **Edge Compute**. - - -8. Next, click on the environment drop-down menu and select **Add Environment** to create a compute environment. A compute environment is required when adding a Cox Edge account to Palette. If you already have a compute environment available, skip to step 11. - - -9. Provide a name and description and click **Next**. - - -10. Add members to the compute environment. You can also add members at a later point. Apply the changes. - - -11. Open another browser tab and log in to [Palette](https://console.spectrocloud.com) as a Tenant admin. - - -12. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add Cox Edge Account**. - - -13. Fill out the following input fields. - - - Account Name: Assign a name to the Cox Edge account. - - - API Base URL: Add the API endpoint URL you copied down earlier. You can locate this value in the API Key overview page in the Cox Edge portal. - - - API Key: Provide the API key you generated earlier. - - - Organization Id: Ask your system administrator for this value or use the Cox Edge API to retrieve the organization id. - - - Environment: This optional field allows you to enter name of the environment you wish to target if you have one. - - - Service: Use the value `edge-services`. - -14. Click **Validate** to confirm you have access to the Cox Edge account. - - -15. Select **Confirm** to add the Cox Edge account to Palette. - - -# Validate - -1. Log in to [Palette](https://console.spectrocloud.com) as a Tenant admin. - - -2. Navigate to the left **Main Menu** and select **Tenant Settings** to ensure you are in the **Cloud Accounts** page. - - -3. Your Cox Edge account is now listed with all the other infrastructure provider accounts. - - -4. You can also deploy a cluster to Cox Edge to validate everything is working. Use the [Create and Manage Cox IaaS Cluster](/clusters/public-cloud/cox-edge/create-cox-cluster) guide to create and deploy a cluster to Cox Edge. \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/05.5-cox-edge/20-create-cox-cluster.md b/content/docs/04-clusters/01-public-cloud/05.5-cox-edge/20-create-cox-cluster.md deleted file mode 100644 index 367c224ffd..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05.5-cox-edge/20-create-cox-cluster.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: "Create and Manage Cox IaaS Cluster" -metaTitle: "Create and Manage Cox IaaS Cluster" -metaDescription: "Learn how to add and manage a cluster deployed to Cox Edge." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Create and Manage Cox Edge IaaS Cluster - - -Palette supports creating and managing Kubernetes clusters deployed to a Cox Edge account. This section guides you on how to create a Kubernetes cluster in Cox Edge that is managed by Palette. - -## Prerequisites - -- A [Spectro Cloud](https://console.spectrocloud.com) account. - -- A [Cox Edge](https://portal.coxedge.com/login) account. - -- A Cox Edge account registered in Palette. Check out the [Register and Manage Cox Edge Accounts](/clusters/public-cloud/cox-edge/add-cox-edge-accounts) guide to learn how to register a Cox Edge account in Palette. - -- A cluster profile for Cox Edge clusters. If you need guidance creating a cluster profile, check out the [Creating Cluster Profiles](/cluster-profiles/task-define-profile) guide. - - -## Create a Cluster - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Click **+ Add New Cluster** and select **Deploy New Cluster**. - - -4. Select **Cox Edge** from the list of infrastructure providers. - - -5. Fill out the following input fields and click **Next**. - - - Cluster name: The name of the new cluster. - - Description: A text value that explains the cluster. - - Tags: Assign tags to the cluster. - - Cloud Account: Select your Cox Edge account. - - -6. Select a cluster profile that is compatible with Cox Edge. If you need guidance creating a cluster profile, check out the [Creating Cluster Profiles](/cluster-profiles/task-define-profile) guide. - - - -If you want to use the Kubernetes cluster autoscaler feature and you are using [Longhorn](/integrations/longhorn) for the storage container interface. Set the `charts.longhorn.defaultSettings.kubernetesClusterAutoscalerEnabled` parameter to `true`. - -
- - -```yaml -charts: - longhorn: - defaultSettings: - kubernetesClusterAutoscalerEnabled: true -``` - - -
- - -7. Review the cluster profile and all of its manifest files. Click **Next** to continue. - - -8. Fill out the following input fields and select **Next**. - - SSH Keys: Select an SSH key pair or create a new key pair. - - Load Balancer PoP: The location where you want to deploy the cluster compute resources. - - Organization: The Cox Edge organization to target for the deployment. - - Environment: The Cox Edge environment to deploy the compute resources. - - Update worker pools in parallel: Enable this checkbox if you wish to update worker pool nodes in parallel. - -9. Configure the master and worker node pools. The following input fields apply to Cox Edge master and worker node pools. For a description of input fields that are common across target platforms refer to the [Node Pools](https://docs.spectrocloud.com/clusters/cluster-management/node-pool) management page. Click **Next** when you are done. - -
- - #### Master Pool configuration: - - - Cloud Configuration: - - - Deployment Name: The name to assign the Cox Edge deployment. - - PoP: The Cox Edge location to target. - - Instance Type: The compute size. - - Network policies: The network rules to apply to the deployment. Review the list of required network policies in the [Network Rules](/clusters/public-cloud/cox-edge/network-rules) documentation. - -
- - - - Use the network rules specified in the [Network Rules](/clusters/public-cloud/cox-edge/network-rules) documentation. If you fail to add the required network rules, Palette will be unable to deploy the cluster to Cox Edge. - - - - #### Worker Pool configuration: - - - Cloud Configuration: - - Deployment Name: The name to assign the Cox Edge deployment. - - PoP: The Cox Edge location to target. - - Instance Type: The compute size. - - Network policies: The network rules to apply to the deployment. Review the list of required network policies in the [Network Rules](/clusters/public-cloud/cox-edge/network-rules) documentation. - - -10. The settings page is where you can configure patching schedule, security scans, backup settings, set up role-based access control (RBAC), and enable [Palette Virtual Clusters](/devx/palette-virtual-clusters). Review the settings and make changes if needed. Click **Validate**. - - -11. Review the settings summary and click **Finish Configuration** to deploy the cluster. Be aware that provisioning IaaS clusters can take several minutes. - -The cluster details page contains the status and details of the deployment. Use this page to track the deployment progress. - - -## Validate - -You can validate your cluster is up and running by reviewing the cluster details page. Navigate to the left **Main Menu** and click **Clusters**. The **Clusters** page contains a list of all available clusters Palette manages. Select the cluster to review its details page. Ensure the **Cluster Status** field contains the value **Running**. - - -# Delete a Cox Edge IaaS Cluster - -When you delete a Cox Edge cluster, all instances and associated resources created for the cluster are removed. To delete a cluster, use the following steps. - - -1. Ensure you are in the correct project scope. - - -2. Navigate to the left **Main Menu** and click **Clusters**. - - -3. Select the cluster you want to delete. - - -4. Click the **Settings** drop-down menu and select **Delete Cluster**. - - -5. Click on **Delete Cluster** - - -6. Type the name of the cluster and click **OK** - -The cluster status is updated to **Deleting** while cluster resources are being deleted. When all resources are successfully deleted, the cluster status is updated to **Deleted** and the cluster is removed from the list. - -## Force Delete a Cluster - -If a cluster is stuck in the **Deletion** state for a minimum of 15 minutes it becomes eligible for force deletion. You can force delete a cluster from the tenant and project admin scope. -To force delete a cluster follow the same steps outlined in [Delete a Cox Edge IaaS Cluster](#delete-a-cox-edge-iaas-cluster). However, after 15 minutes, a **Force Delete Cluster** option is available in the **Settings** drop-down menu. The **Settings** drop-down menu provides you with an estimated time left before the force deletion becomes available. - -
diff --git a/content/docs/04-clusters/01-public-cloud/05.5-cox-edge/90-network-rules.md b/content/docs/04-clusters/01-public-cloud/05.5-cox-edge/90-network-rules.md deleted file mode 100644 index e06c173a6d..0000000000 --- a/content/docs/04-clusters/01-public-cloud/05.5-cox-edge/90-network-rules.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Required Network Rules" -metaTitle: "Required Network Rules" -metaDescription: "Cox Edge deployments require the following network rules for a successful Palette deployment." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -To successfully deploy a host cluster to Cox Edge with Palette, you must add the following network rules to each deployment. - - -# Inbound - -The following inbound network rules are required for Palette to deploy and manage a Cox Edge cluster. - -| Port | Protocol | Source | Description | -|------|----------|-----------|---------------------------------------------------------------------------| -| 22 | TCP | 0.0.0.0/0 | To support the secure shell (SSH) protocol. | -| 179 | TCP | 0.0.0.0/0 | Required for the Border Gateway Protocol (BGP). | -| 6443 | TCP | 0.0.0.0/0 | Required for Palette to communicate with the cluster's Kubernetes API server. | -| 4789 | UDP | 0.0.0.0/0 | Required for networking with VXLAN. | \ No newline at end of file diff --git a/content/docs/04-clusters/01-public-cloud/06-tke.md b/content/docs/04-clusters/01-public-cloud/06-tke.md deleted file mode 100644 index 4915f963e6..0000000000 --- a/content/docs/04-clusters/01-public-cloud/06-tke.md +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: "Tencent-TKE" -metaTitle: "Creating TKE clusters in Palette" -metaDescription: "The methods of creating clusters for a speedy deployment on Tencent-TKE" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - - -# Overview - -Palette supports the deployment of tenant workloads with Tencent Kubernetes Engine (TKE). The following are the detailing of the Tencent TKE cluster provisioning through Palette: - -1. Palette enables the effortless deployment and management of containerized applications with fully managed TKE. - - -2. TKE is fully compatible with the native Kubernetes APIs and extends Kubernetes plugins such as CBS and CLB on the Tencent Cloud. - - -3. The Palette-supported TKE architecture is represented diagrammatically as below: - -![tencent-diagram.png](/tencent-diagram.png) - -# Prerequisites - -* A Tencent Cloud account with appropriate [permissions](/clusters/public-cloud/tke#permissionsfortkeclustercrudoperations). - - -* Create a Cloud API **Secret ID** and **Secret Key**. - - -* Create the **Virtual Private Network** and **Subnet** to the region where the workload cluster needs to be deployed. - - -* The [**NAT Gateway**](https://intl.cloud.tencent.com/document/product/457/38369) is to be created to support IP address translation and to enable Internet access to resources in Tencent Cloud. - - -* A Route table set to accept external traffic, so that the nodes getting created in the associated subnets will have internet capability. - - -* Create a security group for network security isolation and add Inbound traffic rule that allows the TCP/HTTPS protocol for port 443 from all IPv6 and IPv4 sources through this security group. - - -# Tencent Cloud Account Permissions - -**Last Update**: April 26, 2022 - -```yaml -{ - "version": "2.0", - "statement": [ - { - "effect": "allow", - "action": [ - "as:CreateLaunchConfiguration", - "as:CreateAutoScalingGroup", - "as:DescribeLaunchConfigurations", - "as:DescribeAutoScalingInstances", - "as:DescribeAutoScalingActivities", - "as:DescribeAutoScalingGroups", - "as:ModifyDesiredCapacity", - "as:ModifyAutoScalingGroup", - "as:DescribeAutoScalingGroups", - "as:DescribeAutoScalingGroupLastActivities", - "cam:GetRole", - "cam:GetPolicy", - "cam:DeletePolicyVersion", - "cam:CreatePolicyVersion", - "cam:ListGroupsForConsole", - "cam:ListPolicies", - "cam:ListMaskedSubAccounts", - "cvm:DescribeSecurityGroupLimits", - "cvm:DescribeSecurityGroups", - "cvm:CreateSecurityGroup", - "cvm:DescribeInstances", - "cvm:DescribeInstancesStatus", - "cvm:DescribeSecurityGroupAssociateInstances", - "cvm:DescribeSecurityGroupLimits", - "cvm:DescribeSecurityGroupPolicys", - "cvm:DescribeImages", - "cvm:DescribeCbsStorages", - "cvm:RunInstances", - "cvm:DescribeKeyPairs", - "cvm:DescribeAddresses", - "cvm:ModifySingleSecurityGroupPolicy", - "cvm:CreateSecurityGroupPolicy", - "cvm:DeleteSecurityGroupPolicy", - "clb:DescribeLoadBalancers", - "cloudaudit:DescribeEvents", - "cloudaudit:DescribeEvents", - "ecdn:PurgePathCache", - "ecdn:PurgeUrlsCache", - "ecdn:PushUrlsCache", - "monitor:DescribeDashboardMetricData", - "tke:CreateCluster", - "tke:DescribeClusters", - "tke:DescribeClusterEndpointStatus", - "tke:DescribeClusterEndpointVipStatus", - "tke:DescribeClusterSecurity", - "tke:CreateClusterEndpointVip", - "tke:CreateClusterEndpoint", - "tke:DeleteClusterEndpointVip", - "tke:DeleteClusterEndpoint", - "tke:DeleteCluster", - "tke:DescribeClusterAsGroupOption", - "tke:DescribeClusterInstances", - "tag:DescribeResourceTagsByResourceIds", - "tag:DescribeTagValues", - "tag:TagResources", - "tag:DescribeTagKeys", - "vpc:DescribeSubnetEx", - "vpc:DescribeVpcEx", - "vpc:DescribeVpcLimits", - "vpc:DescribeRouteTable", - "vpc:DescribeNatGateways", - "vpc:DescribeCcns", - "vpc:DescribeCcnAttachedInstances", - "vpc:DescribeLocalGateway", - "vpc:DescribeHaVips", - "vpc:DescribeVpnGw", - "vpc:DescribeDirectConnectGateways", - "vpc:DescribeVpcPeeringConnections", - "vpc:DescribeCustomerGateways", - "vpc:DescribeRoutes", - "vpc:ModifyNatGatewayAttribute", - "vpc:ResetNatGatewayConnection", - "vpc:DescribeAddress", - "vpc:DescribeTemplateLimits", - "vpc:DescribeAddressGroups", - "vpc:DescribeService", - "vpc:DescribeServiceGroups", - "vpc:DescribeNetworkAcls", - "vpc:DescribeNetworkInterfaces" - ], - "resource": [ - "*" - ] - } - ] -} -``` - - -# Create a Tencent Cloud Account - -Create a Tencent Cloud account in Palette from the Tenant Admin or Project Admin scope. To create the cloud account: - -1. Log in to the Palette and from the **Tenant Admin Settings**, select the **Cloud Accounts** tab. - - -2. Click **+ Tencent Account** to open the cloud account creation wizard and fill in the following details: - - |**Parameter** | **Description**| - |-----------------|----------------| - | **Account Name**| A custom name to identify the cloud account on the Palette Console.| - | **Optional Description**| Add a description, if any about the cloud account. - | **Secret ID**| The Secret ID of the Tencent cloud account. - | **Secret Key**| The secret key of the Tencent cloud account.| - - -3. Click the **Validate** button to validate credentials. - - -4. Click **Confirm** button to complete the cloud account create wizard. - - -**Note**: The cloud account can be created during the first step of cluster creation when you fill in the basic information by clicking the **+** next to **Cloud Account**. - -# Deploy a Tencent Cluster - -The following steps need to be performed to provision a new TKS cluster: - -1. Provide the basic cluster information such as: - * **Name**, **Description**, and **Tags**. Tags on a cluster are propagated to the VMs deployed on the cloud or data center environments. - * Select the desired [Tencent cloud account](/clusters/public-cloud/tke#createatencentcloudaccount). The Tencent credentials must be pre-configured in the Project/Tenant Admin settings. - - - **Note**: The cloud account can be created during the cluster creation by clicking **+** next to the **Cloud Account**. -
- -2. Select the cluster profile created for Tencent Cloud. The profile definition will be used as the cluster deployment template. - - -3. Review and override pack parameters as desired. By default, parameters for all packs are set with values defined in the cluster profile. - - - -While configuring the Operating System layer of the TKE cluster profile, configure the value of the OS pack file with any one of the following images: - -```yaml -"OsName": "centos7.6.0_x64" -``` -```yaml -"OsName": "centos7.6.0_x64 GPU" -``` -```yaml -"OsName": "ubuntu18.04.1x86_64" -``` -```yaml -"OsName": "ubuntu18.04.1x86_64 GPU" -``` - - - - - -While adding Add-on packs to the Cluster Profile, make sure that Persistent Volume Claim size is >=10 GB and in multiples of 10. - -Example: - -```yaml -master: -persistence: - enabled: true - accessModes: - - ReadWriteOnce - size: 20Gi -``` - - - -4. Provide the Tencent Cloud account and placement information: - - |**Parameter** | **Description**| - |--------------|----------------| - | **Cloud Account**| Select the desired cloud account. - | **Tencent Cloud Accounts** | The Tencent credentials need to be pre-configured in the **Project**/**Tenant Admin** settings. - ||**Note**: The cloud account can be created during this step of
cluster creation by clicking **+** next to the **Cloud Account**. | - | **Region** | Choose the desired Tencent region where you
would like the clusters to be provisioned. - | **SSH Key Pair Name**| Choose the desired SSH keypair. You must preconfigure SSH key pairs on TKS for the desired regions. The selected key is inserted into the provisioned VMs. - | **VPCID**|The ID of the Virtual Private Cloud (VPC) that the stack is to be launched into. The VPC must be in the specified region. All cluster instances will be launched into this VPC. | - |**Cluster Endpoint Access**| Select Public, or Private & Public, based on how you want to establish the communication with the endpoint for the managed Kubernetes API server and your cluster.| - |**Public Security Group**|A security group to controls the traffic that is allowed to reach and leave the resources that it is associated with. For example, after you associate a security group with the cluster, it controls the inbound and outbound traffic to the cluster. | - - -Palette encourages its uses to go with the Public Cluster endpoint access as of now. Other options will be supported in the near future. - - -5. Public Access CIDRs - To enable access restrictions. - - -6. Update Worker Pools in parallel - Patch updates to all Worker Pools simultaneously. - - -7. Configure one or more worker node pools. A single worker node will be configured by default. To learn more about the configuration options, review the [Node Pool](/clusters/cluster-management/node-pool) documentation page. Click on **Next** when you are done with node pool configurations. - - -8. Review settings and deploy the cluster. Provisioning status with details of ongoing provisioning tasks is available to track progress. - -# Delete a Tencent Cluster - -The deletion of a Tencent cluster results in the removal of all Virtual Machines and associated Storage Disks created for the cluster. The following tasks need to be performed to delete a Tencent cluster: - -1. Ensure you are in the correct project scope. - - -2. Navigate to the left **Main Menu** and click on **Clusters** - - -3. Click on the cluster that you want to remove. - - -4. Click on the **Settings** drop-down menu. - - -5. Click on **Delete Cluster** - - -6. Type in the name of the cluster and click on **OK** - -The cluster status is updated to **Deleting** while cluster resources are being deleted. Once all resources are successfully deleted, the cluster status is updated to **Deleted** and is removed from the list of clusters. - -## Force Delete a Cluster - -In Tenant Admin and Project Admin scope, Palette allows you to force the deletion of a cluster that's been stuck in **Deletion** state for a minimum of **15 minutes**. - -1. Log in to the Palette Management Console. - - -2. Navigate to the **Cluster Details** page of the cluster stuck in deletion. - - - If the deletion is stuck for more than 15 minutes, click the **Force Delete Cluster** button from the **Settings** dropdown. - - - If the **Force Delete Cluster** button is not enabled, wait for 15 minutes. The **Settings** dropdown will give the estimated time for the auto-enabling of the **Force Delete** button. - - - -If any resources remain in the cloud, you should clean them up before initiating a forced delete. - diff --git a/content/docs/04-clusters/01-public-cloud/80-deploy-k8s-cluster.md b/content/docs/04-clusters/01-public-cloud/80-deploy-k8s-cluster.md deleted file mode 100644 index 424b536fa0..0000000000 --- a/content/docs/04-clusters/01-public-cloud/80-deploy-k8s-cluster.md +++ /dev/null @@ -1,1270 +0,0 @@ ---- -title: "Deploy a Cluster" -metaTitle: "Deploy a Cluster" -metaDescription: "Learn how to deploy a Kubernetes cluster to a public cloud provider with Palette. " -icon: "" -category: ["tutorial"] -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import YouTube from 'shared/components/Video'; - -# Deploy a Cluster - -Palette helps you create and manage Kubernetes clusters in various cloud environments with minimal overhead. - -Palette offers profile-based management for Kubernetes, enabling consistency, repeatability, and operational efficiency across multiple clusters. A [cluster profile](/cluster-profiles) allows you to customize the cluster infrastructure stack, allowing you to choose the desired Operating System (OS), Kubernetes, Container Network Interfaces (CNI), Container Storage Interfaces (CSI). You can further customize the stack with add-on application layers. - -After defining a cluster profile, you can provide the cloud environment details, the control plane, and worker node configurations to deploy a host cluster. - -This tutorial will teach you how to deploy a host cluster with Palette using Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) cloud providers. You can deploy a cluster using either Palette or Terraform. You will learn about *Cluster Mode* and *Cluster Profiles* and how these components enable you to deploy customized applications to Kubernetes with minimal effort. - -# Architecture - -As you navigate the tutorial, refer to this diagram to help you understand how Palette uses a cluster profile as a blueprint for the host cluster you deploy. Palette clusters have the same node pools you may be familiar with: control plane nodes, often called *master nodes*, and *worker nodes* where you will deploy applications. The result is a host cluster that Palette manages. - -![A view of Palette managing the Kubernetes lifecycle](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_application.png) - -
- -# Deploy the Cluster and the Application - -Select the tab for the workflow you want to learn more about. - -
- - - - - -You can create and manage clusters directly from the Palette dashboard. Use the following steps to learn how to deploy a host cluster to multiple cloud providers. - -## Prerequisites - -To complete this tutorial, you will need the following. - -- A public cloud account from one of these providers: - - [AWS](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account) - - [Azure](https://learn.microsoft.com/en-us/training/modules/create-an-azure-account) - - [GCP](https://cloud.google.com/docs/get-started) - -
- -- Register the cloud account in Palette. The following resources provide additional guidance. - - [Register and Manage AWS Accounts](/clusters/public-cloud/aws/add-aws-accounts) - - [Register and Manage Azure Cloud Accounts](/clusters/public-cloud/azure/azure-cloud) - - [Register and Manage GCP Accounts](/clusters/public-cloud/gcp#creatingagcpcloudaccount) - -
- -- An SSH Key Pair. Use the [Create and Upload an SSH Key](/clusters/cluster-management/ssh-keys) guide to learn how to create an SSH key and upload it to Palette. - - - AWS users must create an AWS Key pair before starting the tutorial. If you need additional guidance, check out the [Create EC2 SSH Key Pair](https://docs.aws.amazon.com/ground-station/latest/ug/create-ec2-ssh-key-pair.html) tutorial. - -## Deploy the Environment - -The following steps will guide you through deploying the cluster infrastructure. You will start by creating a cluster profile that you apply to the host cluster. - -
- - - - -### Create Cluster Profile (AWS) - -[Cluster profiles](https://docs.spectrocloud.com/cluster-profiles) are templates you create with the following core layers and any add-on layers such as security, monitoring, logging, and more. - - - Operating System (OS) - - Kubernetes distribution and version - - Network Container Interface (CNI) - - Storage Container Interface (CSI) - - -You customize profiles by choosing the type of component and version. In this way, profiles offer a reproducible way to create clusters. - -Log in to [Palette](https://console.spectrocloud.com) and navigate to the left **Main Menu**. Select **Profiles** to view the cluster profile page. -You can view the list of available cluster profiles. To create a cluster profile, click the **Add Cluster Profile** button. - -![View of the cluster Profiles page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png) - -Follow the wizard to create a new profile. - -In the **Basic Information** section, assign the name **aws-profile**, a brief profile description, select the type as **Full**, and assign the tag **env:aws**. You can leave the version empty if you want to. Just be aware that the version defaults to **1.0.0**. Click on **Next**. - -**Cloud Type** allows you to choose the infrastructure provider with which this cluster profile is associated. Select **AWS** and click on **Next**. - -**Profile Layers**, this is the main configuration step where you specify the packs that compose the profile. There are four required infrastructure packs and several optional add-on packs you can choose from. -Every pack requires you to select the **Pack Type**, **Registry**, and **Pack Name**. - -For this tutorial, use the following packs: - -| Pack Name | Version | Layer | -|--------------------|-----------|--------------------| -| ubuntu-aws LTS | 20.4.x | Operating System | -| Kubernetes | 1.24.x | Kubernetes | -| cni-calico | 3.24.x | Network | -| csi-aws-ebs | 1.16.x | Storage | - - -As you fill out the information for each layer, click on **Next** to proceed to the next layer. - -Click on **Confirm** after you have completed filling out all the core layers. - -![A view of the cluster profile stack](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png) - -The review section gives an overview of the cluster profile configuration you selected. Click on **Finish Configuration** to create the cluster profile. - - -You can modify cluster profiles after you create them by adding, removing, or editing the layers. - -
- - -## Create a New Cluster - -Navigate to the left **Main Menu** and select **Cluster**. From the clusters page, click on the **Add New Cluster** button. - -![Palette clusters overview page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) - -Palette will prompt you to either deploy a new cluster or import an existing one. Click on **Deploy New Cluster** to access the cluster deployment wizard. Select **AWS** and click the **Start AWS Configuration** button. Use the following steps to create a host cluster in AWS. - -
- - -### Basic information - -In the **Basic information** section, insert the general information about the cluster, such as the Cluster name, Description, Tags, and Cloud account. Click on **Next**. - -![Palette clusters basic information](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png) - -
- - -### Cluster Profile - -A list is displayed of available profiles you can choose to deploy to AWS. Select the cluster profile you created earlier and click on **Next**. - -
- - -### Parameters - -The **Parameters** section displays the core and add-on layers in the cluster profile. - -![Palette clusters parameters](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_creation_parameters.png) - -Each layer has a pack manifest file with the deploy configurations. The pack manifest file is in a YAML format. Each pack contains a set of default values. You can change the manifest values if needed. Click on **Next** to proceed. - -
- - -### Cluster Configuration - -The **Cluster config** section allows you to select the **Region** in which to deploy the host cluster and specify other options such as the **SSH Key Pair** to assign to the cluster. All clusters require you to select an SSH key. After you have selected the **Region** and your **SSH Key Pair Name**, click on **Next**. - -### Nodes Configuration - -The **Nodes config** section allows you to configure the nodes that make up the control plane (master nodes) and data plane (worker nodes) of the host cluster. - - -Before you proceed to next section, review the following parameters.

-- **Number of nodes in the pool** - This option sets the number of master or worker nodes in the master or worker pool. For this tutorial, set the count to one for the master pool and two for the worker pool. - - -- **Allow worker capability** - This option allows the master node to also accept workloads. This is useful when spot instances are used as worker nodes. You can check this box if you want to. - - -- **Instance Type** - Select the compute type for the node pool. Each instance type displays the amount of CPU, RAM, and hourly cost of the instance. Select `m4.2xlarge`. - - -- **Availability zones** - Used to specify the availability zones in which the node pool can place nodes. Select an availability zone. - - - -- **Disk size** - Set the disk size to **60 GiB**. - -
- -- **Instance Option** - This option allows you to choose [on-demand instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html) or [spot instance](https://aws.amazon.com/ec2/spot/) for worker nodes. Select **On Demand**. - -
- -![Palette clusters basic information](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png) - -Select **Next** to proceed with the cluster deployment. - -
- - -### Settings - -In the **Settings** section, you can configure advanced options such as when to patch the OS, enable security scans, manage backups, add role-based access control (RBAC) bindings, and more. - -For this tutorial, you can use the default settings. Click on **Validate** to continue. - -
- - -### Review - -The **Review** section allows you to review the cluster configuration prior to deploying the cluster. Review all the settings and click on **Finish Configuration** to deploy the cluster. - -![Configuration overview of newly created AWS cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_profile_cluster_profile_review.png) - - -
- -Navigate to the left **Main Menu** and select **Clusters**. - -![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_aws_create_cluster.png) - -Click on your cluster to review its details such as deployment status, event logs, cluster profile, monitoring data, and other information about the cluster. - -
- -![A view of the cluster details page](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_details.png) - - -
- - -### Create Cluster Profile (Azure) - -[Cluster profiles](https://docs.spectrocloud.com/cluster-profiles) are templates you create with the following core layers and any add-on layers such as security, monitoring, logging, and more. - - Operating System (OS) - - Kubernetes distribution and version - - Network Container Interface (CNI) - - Storage Container Interface (CSI) - -A cluster profile contains these core and additional add-on layers, such as security, monitoring, logging, etc. - -You customize profiles by choosing the type of component and version. In this way, profiles offer a reproducible way to create clusters. - -Log in to Palette and navigate to the left **Main Menu**. Select **Profiles** to view the cluster profile page. -You can view the list of available cluster profiles. To create a cluster profile, click the **Add Cluster Profile** button. - -![Cluster profiles page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png) - -Follow the wizard to create a new profile. - -In the **Basic Information** section, assign the name **azure-profile**, a brief profile description, select the type as **Full**, and assign the tag **env:azure***. You can leave the version empty if you want to. Just be aware that the version defaults to **1.0.0**. Click on **Next**. - -**Cloud Type** allows you to choose the infrastructure provider with which this cluster profile is associated. Select **Azure** and click on **Next**. - -**Profile Layers** is the main configuration step where you specify the packs that compose the profile. You can choose from four required infrastructure packs and several optional add-on packs. -Every pack requires you to select the **Pack Type**, **Registry**, and **Pack Name**. - -For this tutorial, use the following packs: - -| Pack Name | Version | Layer | -|--------------------|--------------------------------------------------|--------------------| -| ubuntu-azure LTS | 20.4.x | Operating System | -| Kubernetes | 1.24.x | Kubernetes | -| cni-calico-azure | 3.24.x | Network | -| Azure Disk | 1.25.x | Storage | - - -As you fill out the information for each layer, click on **Next** to proceed to the next layer. - -Click on **Confirm** after you have completed filling out all the core layers. - -![Azure cluster profile overview page](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_stack.png) - -The review section gives an overview of the cluster profile configuration you selected. Click on **Finish Configuration** to finish creating the cluster profile. - - -You can modify cluster profiles after you create them by adding, removing, or editing the layers. - -
- - -## Create a New Cluster - -Navigate to the left **Main Menu** and select **Clusters**. Click the **Add New Cluster** button. - -![Palette clusters overview page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) - -Click on **Deploy New Cluster** to access the cluster deployment wizard. Select **Azure** and click the **Start Azure Configuration** button. Use the following steps to create a host cluster in Azure. - -
- - -### Basic information - -In the **Basic information** section, insert the general information about the cluster, such as the Cluster name, Description, Tags, and Cloud account. Click on **Next**. - -![Palette clusters basic information](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png) - -
- - -### Cluster Profile - -A list is displayed of available profiles you can choose to deploy to Azure. Select the cluster profile you created earlier and click on **Next**. - -### Parameters - -The **Parameters** section displays all the layers and add-on components in the cluster profile. - -![palette clusters basic information](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_parameters.png) - -Each layer has a pack manifest file with the deploy configurations. The pack manifest file is in a YAML format. Each pack contains a set of default values. You can change the manifest values if needed. Click on **Next** to proceed. - -
- -### Cluster Configuration - - -The **Cluster config** section allows you to select the **Subscription**, **Region**, **Resource Group**, **Storage account**, and **SSH Key**** to apply to the host cluster. All clusters require you to assign an SSH key. Refer to the [SSH Keys](/clusters/cluster-management/ssh-keys) guide for information about uploading an SSH key. - - -
- -When you are done selecting a **Subscription**, **Region**, **Resource Group**, **Storage account** and **SSH Key**, click on **Next**. -
- -### Nodes Configuration - -The **Nodes config** section allows you to configure the nodes that compose the control plane (master nodes) and data plane (worker nodes) of the Kubernetes cluster. - -Refer to the [Node Pool](https://docs.spectrocloud.com/clusters/cluster-management/node-pool) guide for a list and description of parameters. - -Before you proceed to next section, review the following parameters. - -
- - -**Number of nodes in the pool** - This option sets the number of master or worker nodes in the master or worker pool. For this tutorial, set the count to one for both the master and worker pools. - -**Allow worker capability** - This option allows the master node to also accept workloads. This is useful when spot instances are used as worker nodes. You can check this box if you want to. - - -- **Instance Type** - Select the compute type for the node pool. Each instance type displays the amount of CPU, RAM, and hourly cost of the instance. Select **Standard_A8_v2**. - - -- **Managed disk** - Used to select the storage class. Select **Standard LRS** and set the disk size to **60**. - - -- **Availability zones** - Used to specify the availability zones in which the node pool can place nodes. Select an availability zone. - -![Palette clusters nodes configuration](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png) - -
- - -### Settings - -In the **Settings** section, you can configure advanced options such as when to patch the OS, enable security scans, manage backups, add Role-Based Access Control (RBAC) bindings, and more. - -For this tutorial, you can use the default settings. Click on **Validate** to continue. - -
- - -### Review - -The Review section allows you to review the cluster configuration before deploying the cluster. Review all the settings and click on **Finish Configuration** to deploy the cluster. - -![Configuration overview of newly created Azure cluster](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_profile_review.png) - - -
- -Navigate to the left **Main Menu** and select **Clusters**. - -![Update the cluster](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster.png) - -Click on your cluster to review details such as deployment status, event logs, cluster profile, monitoring data, and other information about the cluster. - -
- -![View of the cluster details page](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster_details.png) - -
- -
- - -### Create Cluster Profile (GCP) -[Cluster profiles](https://docs.spectrocloud.com/cluster-profiles) are templates you create with the following core layers and any add-on layers such as security, monitoring, logging, and more. - - - Operating System (OS) - - Kubernetes distribution and version - - Network Container Interface (CNI) - - Storage Container Interface (CSI) - - -You customize profiles by choosing the type of component and version. In this way, profiles offer a reproducible way to create clusters. - -Log in to [Palette](https://console.spectrocloud.com) and navigate to the left **Main Menu**. Select **Profiles** to view the cluster profile page. -You can view the list of available cluster profiles. To create a cluster profile, click the **Add Cluster Profile** button. - -![View of the cluster view page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png) - -Follow the wizard to create a new profile. - -In the **Basic Information** section, assign the name **gcp-profile**, provide a profile description, select the type as **Full**, and assign the tag **env:gcp**. You can leave the version empty if you want to. Just be aware that the version defaults to 1.0.0. Click on **Next**. - -Cloud Type allows you to choose the infrastructure provider with which this cluster profile is associated. Select **Google Cloud** and click on **Next**. - -Profile Layers, this is the main configuration step where you specify the packs that compose the profile. You can choose from four required infrastructure packs and several optional add-on packs. Every pack requires you to select the Pack Type, Registry, and Pack Name. - - -For this tutorial, use the following packs: - -| Pack Name | Version | Layer | -|--------------------|--------------------------|--------------------| -| ubuntu-gcp LTS | 20.4.x | Operating System | -| Kubernetes | 1.24.x | Kubernetes | -| cni-calico | 3.24.x | Network | -| csi-gcp-driver | 1.7.x | Storage | - - -As you fill out the information for each layer, click on **Next** to proceed to the next layer. - -Click on **Confirm** after you have completed filling out all the core layers. - -![GCP cluster profile view](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_gcp_cluster_profile_stack_view.png) - -The review section gives an overview of the cluster profile configuration you selected. Click on **Finish Configuration** to create the cluster profile. - -You can modify cluster profiles after you create them by adding, removing, or editing the layers. - -
- -Navigate to the left **Main Menu** and select **Cluster**. Click the **Add New Cluster** button. - -![Palette clusters overview page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) - -Click on **Deploy New Cluster** to access the cluster deployment wizard. Select **Google Cloud** and click the **Start Google Cloud Configuration** button. Use the following steps to create a host cluster in Google Cloud. - -
- - -### Basic information - -In the **Basic information** section, insert the general information about the cluster, such as the **Cluster name**, **Description**, **Tags**, and **Cloud account**. Click on **Next**. - -![Palette clusters basic information](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_basic_info.png) - -
- - -### Cluster Profile - -A list is displayed of available profiles you can choose to deploy to GCP. Select the cluster profile you created earlier and click on **Next**. - -![Palette clusters basic information](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_gcp_profile.png) - -
- - -### Parameters - -The **Parameters** section displays all the layers and add-on components in the cluster profile. - -![Palette clusters basic information](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png) - -Each layer has a pack manifest file with the deploy configurations. The pack manifest file is in a YAML format. Each pack contains a set of default values. You can change the manifest values if needed. Click on **Next** to proceed. - -
- - -### Cluster Configuration - -The **Cluster config** section allows you to select the **Project**, **Region**, and **SSH Key** to apply to the host cluster. All clusters require you to assign an SSH key. Refer to the [SSH Keys](/clusters/cluster-management/ssh-keys) guide for information about uploading an SSH key. - - -
- -After selecting a **Project**, **Region**, and **SSH Key**, click on **Next**. -### Nodes Configuration - -The **Nodes config** section allows you to configure the nodes that make up the control plane (master nodes) and data plane (worker nodes) of the host cluster. - -Before you proceed to the next section, review the following parameters. - -Refer to the [Node Pool](/clusters/cluster-management/node-pool) guide for a list and description of parameters. - -Before you proceed to next section, review the following parameters. -- **Number of nodes in the pool** - This option sets the number of master or worker nodes in the master or worker pool. For this tutorial, set the count to one for the master pool and two for the worker pool. - -- **Allow worker capability** - This option allows the master node to also accept workloads. This is useful when spot instances are used as worker nodes. You can check this box if you want to. - - -- **Instance Type** - Select the compute type for the node pool. Each instance type displays the amount of CPU, RAM, and hourly cost of the instance. Select **n1-standard-4**. - -- **Disk size** - Set the disk size to **60**. - - -- **Availability zones** - Used to specify the availability zones in which the node pool can place nodes. Select an availability zone. - -![Palette clusters nodes configuration](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png) - -
- -Select **Next** to proceed with the cluster deployment. - - -### Settings - -In the **Settings** section, you can configure advanced options such as when to patch the OS, enable security scans, manage backups, add Role-Based Access Control (RBAC) bindings, and more. - -For this tutorial, you can use the default settings. Click on **Validate** to continue. - -### Review - -The **Review** section allows you to review the cluster configuration before deploying the cluster. Review all the settings and click on **Finish Configuration** to deploy the cluster. - -![Newly created GCP cluster](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_review.png) - -
- -Navigate to the left **Main Menu** and select **Clusters**. - -
- -![Update the cluster](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) - -Click on your cluster to review details such as deployment status, event logs, cluster profile, monitoring data, and other information about the cluster. - -
- -![View of the cluster details page](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_details.png) - -
-
- - -The cluster deployment process can take 15 to 30 min. The deployment time varies depending on the cloud provider, cluster profile, cluster size, and the node pool configurations provided. You can learn more about the deployment progress by reviewing the event log. Click on the **Events** tab to view the log. - -![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-event_log.png) - -
- -While you wait for the cluster deployment process to complete, feel free to check out a video where we discuss the growing pains of using Kubernetes and how Palette can help your address these pain points. - -
- - - - ---- - -## Update Cluster Profile - -In the following steps, you will learn how to update a cluster profile by adding a new layer to it that contains the application. - -
- -### Add a Manifest - -Navigate to the left **Main Menu** and select **Profiles**. Select the cluster profile you created earlier and which you applied to the host cluster. - -Click on **Add Manifest** at the top of the page and fill out the following input fields.

- -- **Layer name** - The name of the layer. Assign the name **application**. - - -- **Manifests** - Add your manifest by giving it a name and clicking the **New Manifest** button. Assign a name to the internal manifest and click on the blue button An empty editor will be displayed on the right side of the screen. - -![Screenshot of unopened manifest editor](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest_blue_btn.png) - -
- -In the manifest editor, insert the following content. - -
- -```yaml -apiVersion: v1 -kind: Service -metadata: - name: hello-universe-service -spec: - type: LoadBalancer - ports: - - protocol: TCP - port: 8080 - targetPort: 8080 - selector: - app: hello-universe ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-universe-deployment -spec: - replicas: 2 - selector: - matchLabels: - app: hello-universe - template: - metadata: - labels: - app: hello-universe - spec: - containers: - - name: hello-universe - image: ghcr.io/spectrocloud/hello-universe:1.0.12 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 8080 -``` - -The code snippet you added will deploy the [*hello-universe*](https://github.com/spectrocloud/hello-universe) application. You may have noticed that the code snippet you added is a Kubernetes configuration. Manifest files are a method you can use to achieve more granular customization of your Kubernetes cluster. You can add any valid Kubernetes configuration to a manifest file. - -![Screenshot of manifest in the editor](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest.png) - -The manifest defines a replica set for the application to simulate a distributed environment with a web application deployed to Kubernetes. The application is assigned a load balancer. Using a load balancer, you can expose a single access point and distribute the workload to both containers. - -Click on **Confirm & Create** to save your changes. - -
- - -### Deployment - -Navigate to the left **Main Menu** and select **Clusters**. Click on the host cluster you deployed to open its details page. - - -On the top right-hand corner is a blue button **Updates Available**. Click on the button to review the available updates. - -![The cluster details page with a view of pending updates](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_update_available.png) - - -Compare the new changes against the previous cluster profile definition. The only difference is the addition of a manifest that will deploy the Hello Universe application. - - -![Available updates details](/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_update_details_compare.png) - -Click on **Confirm updates** to apply the updates to the host cluster. Depending on the scope of the change this may take a few moments. - -
- - -## Verify the Application - -Navigate to the cluster's details page and verify you are in the **Overview** tab. When the application is deployed and ready for network traffic, indicated in the **Services** field, Palette exposes the service URL. Click on the URL for port **:8080** to access the Hello Universe application. - -![Cluster details page with service URL highlighted](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_service_url.png) - - -
- - - - -It can take up to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - - -
- -![Deployed application landing page with counter displayed](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_app.png) - -
- -Welcome to Hello Universe, a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the counter and for a fun image change. - -You have deployed your first application to a cluster managed by Palette. Your first application is a single container application with no upstream dependencies. - - -## Cleanup - -Use the following steps to remove all the resources you created for the tutorial. - -To remove the cluster, navigate to the left **Main Menu** and click on **Clusters**. Select the cluster you want to delete to access its details page. - -Click on **Settings** to expand the menu, and select **Delete Cluster**. - -![Delete cluster](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_delete-cluster-button.png) - -You will be prompted to type in the cluster name to confirm the delete action. Type in the cluster name to proceed with the delete step. The deletion process takes several minutes to complete. - -
- - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for a force delete. To trigger a force delete, navigate to the cluster’s details page, click on **Settings**, then select **Force Delete Cluster**. Palette automatically removes clusters stuck in the cluster deletion phase for over 24 hours. - - - - -
- -Once the cluster is deleted, navigate to the left **Main Menu** and click on **Profiles**. Find the cluster profile you created and click on the **three-dot Menu** to display the **Delete** button. Select **Delete** and confirm the selection to remove the cluster profile. - - -
- - - -## Terraform - -The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider enables you to create and manage Palette resources in a codified manner by leveraging Infrastructure as Code (IaC). Some notable reasons why you would want to utilize IaC are: - -- The ability to automate infrastructure. - -- Improved collaboration in making infrastructure changes. - -- Self-documentation of infrastructure through code. - -- Allows tracking all infrastructure in a single source of truth. - -If want to become more familiar with Terraform, we recommend you check out the [Terraform](https://developer.hashicorp.com/terraform/intro) learning resources from HashiCorp. - -
- -## Prerequisites - -To complete this tutorial, you will need the following items - -- Basic knowledge of containers. -- [Docker Desktop](https://www.docker.com/products/docker-desktop/) or another container management tool. -- Create a Cloud account from one of the following providers. - - [AWS](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account) - - [Azure](https://learn.microsoft.com/en-us/training/modules/create-an-azure-account) - - [GCP](https://cloud.google.com/docs/get-started) -- Register the [cloud account with Palette](https://console.spectrocloud.com/auth/signup). Use the following resource for additional guidance. - - [Register and Manage AWS Accounts](/clusters/public-cloud/aws/add-aws-accounts) - - [Register and Manage Azure Cloud Accounts](/clusters/public-cloud/azure/azure-cloud) - - [Register and Manage GCP Accounts](/clusters/public-cloud/gcp#creatingagcpcloudaccount) - -
- -## Set Up Local Environment - -You can clone the tutorials repository locally or follow along by downloading a Docker image that contains the tutorial code and all dependencies. - -
- - - -If you choose to clone the repository instead of using the tutorial container make sure you have Terraform v1.4.0 or greater installed. - - - - -
- - - - - - -Ensure Docker Desktop on your local machine is available. Use the following command and ensure you receive an output displaying the version number. - -
- -```bash -docker version -``` - -Download the tutorial image to your local machine. -
- -```bash -docker pull ghcr.io/spectrocloud/tutorials:1.0.5 -``` - -Next, start the container, and open a bash session into it. -
- -```shell -docker run --name tutorialContainer --interactive --tty ghcr.io/spectrocloud/tutorials:1.0.4 bash -``` - -Navigate to the tutorial code. - -
- -```shell -cd /terraform/iaas-cluster-deployment-tf -``` - - -
- - -Open a terminal window and download the tutorial code from GitHub. - -
- -```shell -git@github.com:spectrocloud/tutorials.git -``` - -Change the directory to the tutorial folder. - -
- -```shell -cd tutorials/ -``` - -Check out the following git tag. - -
- -```shell -git checkout v1.0.4 -``` - -Change the directory to the tutorial code. - -
- -```shell -cd terraform/iaas-cluster-deployment-tf/ -``` - -
- - -
- ---- - -## Create an API Key - -Before you can get started with the Terraform code, you need a Spectro Cloud API key. - -To create an API key, log in to [Palette](https://console.spectrocloud.com) and click on the user **User Menu** and select **My API Keys**. - -![Image that points to the user drop-down Menu and points to the API key link](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create_api_key.png) - -Next, click on **Add New API Key**. Fill out the required input field, **API Key Name**, and the **Expiration Date**. Click on **Confirm** to create the API key. Copy the key value to your clipboard, as you will use it shortly. - -
- -In your terminal session, issue the following command to export the API key as an environment variable. - -
- -```shell -export SPECTROCLOUD_APIKEY=YourAPIKeyHere -``` - -The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider requires credentials to interact with the Palette API. -The Spectro Cloud Terraform provider will use the environment variable to authenticate with the Spectro Cloud API endpoint. - - -## Resources Review - -To help you get started with Terraform, the tutorial code is structured to support deploying a cluster to either Azure, GCP, or AWS. Before you deploy a host cluster to your target provider, take a few moments to review the following files in the folder structure. - -
- -- **providers.tf** - This file contains the Terraform providers that are used to support the deployment of the cluster. - - -- **inputs.tf** - This file contains all the Terraform variables for the deployment logic. - - -- **data.tf** - This file contains all the query resources that perform read actions. - - -- **cluster_profiles.tf** - This file contains the cluster profile definitions for each cloud provider. - - -- **cluster.tf** - This file has all the required cluster configurations to deploy a host cluster to one of the cloud providers. - - -- **terraform.tfvars** - Use this file to customize the deployment and target a specific cloud provider. This is the primary file you will modify. - - -- **outputs.tf** - This file contains content that will be output in the terminal session upon a successful Terraform `apply` action. - -The following section allows you to review the core Terraform resources more closely. - -
- -### Provider - -The **provider.tf** file contains the Terraform providers and their respective versions. The tutorial uses two providers - the Spectro Cloud Terraform provider and the TLS Terraform provider. Note how the project name is specified in the `provider "spectrocloud" {}` block. You can change the target project by changing the value specified in the `project_name` parameter. - -
- - -```terraform -terraform { - required_providers { - spectrocloud = { - version = ">= 0.13.1" - source = "spectrocloud/spectrocloud" - } - tls = { - source = "hashicorp/tls" - version = "4.0.4" - } - } -} - -provider "spectrocloud" { - project_name = "Default" -} -``` - -The next file you should become familiar with is the **cluster-profiles.tf** file. - -### Cluster Profile - -The Spectro Cloud Terraform provider has several resources available for use. When creating a cluster profile, use `spectrocloud_cluster_profile`. -This resource can be used to customize all layers of a cluster profile. You can specify all the different packs and versions to use and add a manifest or Helm chart. - - -In the **cluster-profiles.tf** file, the cluster profile resource is declared three times. Each instance of the resource is for a specific cloud provider. Using the AWS cluster profile as an example, note how the **cluster-profiles.tf** file uses `pack {}` blocks to specify each layer of the profile. The order in which you arrange contents of the `pack {}` blocks plays an important role, as each layer maps to the core infrastructure in a cluster profile. - -The first listed `pack {}` block must be the OS, followed by Kubernetes, the container network interface, and the container storage interface. The first `pack {}` block in the list equates to the bottom layer of the cluster profile. Ensure you define the bottom layer of the cluster profile - the OS layer - first in the list of `pack {}` blocks. - -
- -```terraform -resource "spectrocloud_cluster_profile" "aws-profile" { - name = "tf-aws-profile" - description = "A basic cluster profile for AWS" - tags = concat(var.tags, ["env:aws"]) - cloud = "aws" - type = "cluster" - - pack { - name = data.spectrocloud_pack.aws_ubuntu.name - tag = data.spectrocloud_pack.aws_ubuntu.version - uid = data.spectrocloud_pack.aws_ubuntu.id - values = data.spectrocloud_pack.aws_ubuntu.values - } - - pack { - name = data.spectrocloud_pack.aws_k8s.name - tag = data.spectrocloud_pack.aws_k8s.version - uid = data.spectrocloud_pack.aws_k8s.id - values = data.spectrocloud_pack.aws_k8s.values - } - - pack { - name = data.spectrocloud_pack.aws_cni.name - tag = data.spectrocloud_pack.aws_cni.version - uid = data.spectrocloud_pack.aws_cni.id - values = data.spectrocloud_pack.aws_cni.values - } - - pack { - name = data.spectrocloud_pack.aws_csi.name - tag = data.spectrocloud_pack.aws_csi.version - uid = data.spectrocloud_pack.aws_csi.id - values = data.spectrocloud_pack.aws_csi.values - } - - pack { - name = "hello-universe" - type = "manifest" - tag = "1.0.0" - values = "" - manifest { - name = "hello-universe" - content = file("manifests/hello-universe.yaml") - } - } -} -``` - -The last `pack {}` block contains a manifest file with all the Kubernetes configurations for the [Hello Universe](https://github.com/spectrocloud/hello-universe) application. Including the application in the profile ensures the application is installed during cluster deployment. If you wonder what all the data resources are for, head to the next section to review them. - - -### Data Resources - -You may have noticed that each `pack {}` block contains references to a data resource. - -
- - -```terraform - pack { - name = data.spectrocloud_pack.aws_csi.name - tag = data.spectrocloud_pack.aws_csi.version - uid = data.spectrocloud_pack.aws_csi.id - values = data.spectrocloud_pack.aws_csi.values - } -``` -
- -[Data resources](https://developer.hashicorp.com/terraform/language/data-sources) are used to perform read actions in Terraform. The Spectro Cloud Terraform provider exposes several data resources to help you make your Terraform code more dynamic. The data resource used in the cluster profile is `spectrocloud_pack`. This resource enables you to query Palette for information about a specific pack. You can get information about the pack using the data resource such as unique ID, registry ID, available versions, and the pack's YAML values. - -Below is the data resource used to query Palette for information about the Kubernetes pack for version `1.24.10`. - -
- -```terraform -data "spectrocloud_pack" "aws_k8s" { - name = "kubernetes" - version = "1.24.10" -} -``` - -Using the data resource, you avoid manually typing in the parameter values required by the cluster profile's `pack {}` block. - -
- -### Cluster - -The **clusters.tf** file contains the definitions for deploying a host cluster to one of the cloud providers. To create a host cluster, you must use a cluster resource for the cloud provider you are targeting. - -In this tutorial, the following Terraform cluster resources are used. - -
- -| Terraform Resource | Platform | -|---|---| -| [`spectrocloud_cluster_aws`](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_aws) | AWS | -| [`spectrocloud_cluster_azure`](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_azure) | Azure | -| [`spectrocloud_cluster_gcp`](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_gcp) | GCP | - -Using the `spectrocloud_cluster_azure` resource in this tutorial as an example, note how the resource accepts a set of parameters. When deploying a cluster, you can change the same parameters in the Palette user interface (UI). You can learn more about each parameter by reviewing the resource documentation page hosted in the Terraform registry. - -
- -```terraform -resource "spectrocloud_cluster_azure" "cluster" { - name = "azure-cluster" - tags = concat(var.tags, ["env:azure"]) - cloud_account_id = data.spectrocloud_cloudaccount_azure.account[0].id - - cloud_config { - subscription_id = var.azure_subscription_id - resource_group = var.azure_resource_group - region = var.azure-region - ssh_key = tls_private_key.tutorial_ssh_key[0].public_key_openssh - } - - cluster_profile { - id = spectrocloud_cluster_profile.azure-profile[0].id - } - - machine_pool { - control_plane = true - control_plane_as_worker = true - name = "master-pool" - count = var.azure_master_nodes.count - instance_type = var.azure_master_nodes.instance_type - azs = var.azure_master_nodes.azs - is_system_node_pool = var.azure_master_nodes.is_system_node_pool - disk { - size_gb = var.azure_master_nodes.disk_size_gb - type = "Standard_LRS" - } - } - - machine_pool { - name = "worker-basic" - count = var.azure_worker_nodes.count - instance_type = var.azure_worker_nodes.instance_type - azs = var.azure_worker_nodes.azs - is_system_node_pool = var.azure_worker_nodes.is_system_node_pool - } - - timeouts { - create = "30m" - delete = "15m" - } -} -``` -## Deploy Cluster - -To deploy a cluster using Terraform, you must first modify the **terraform.tfvars** file. Open the **terraform.tfvars** file in the editor of your choice, and locate the cloud provider you will use to deploy a host cluster. - -To simplify the process, we added a toggle variable in the Terraform template, that you can use to select the deployment environment. Each cloud provider has a section in the template that contains all the variables you must populate. Variables to populate are identified with `REPLACE_ME`. - -In the example AWS section below, you would change `deploy-aws = false` to `deploy-aws = true` to deploy to AWS. Additionally, you would replace all the variables with a value `REPLACE_ME`. You can also update the values for nodes in the master pool or worker pool. - -
- -```terraform -########################### -# AWS Deployment Settings -############################ -deploy-aws = false # Set to true to deploy to AWS - -aws-cloud-account-name = "REPLACE_ME" -aws-region = "REPLACE_ME" -aws-key-pair-name = "REPLACE_ME" - -aws_master_nodes = { - count = "1" - control_plane = true - instance_type = "m4.2xlarge" - disk_size_gb = "60" - availability_zones = ["REPLACE_ME"] # If you want to deploy to multiple AZs, add them here -} - -aws_worker_nodes = { - count = "1" - control_plane = false - instance_type = "m4.2xlarge" - disk_size_gb = "60" - availability_zones = ["REPLACE_ME"] # If you want to deploy to multiple AZs, add them here -} -``` - -When you are done making the required changes, issue the following command to initialize Terraform. - -
- -```shell -terraform init -``` - -Next, issue the `plan` command to preview the changes. - -
- -```shell -terraform plan -``` - - -Output: -```shell -Plan: 2 to add, 0 to change, 0 to destroy. -``` - -If you change the desired cloud provider's toggle variable to `true,` you will receive an output message that two new resources will be created. The two resources are your cluster profile and the host cluster. - -To deploy all the resources, use the `apply` command. - -
- -```shell -terraform apply -auto-approve -``` - - -### Verify the Profile - - -To check out the cluster profile creation in Palette, log in to [Palette](https://console.spectrocloud.com), and from the left **Main Menu** click on **Profiles**. Locate the cluster profile with the name pattern `tf-[cloud provier]-profile`. Click on the cluster profile to review its details, such as layers, packs, and versions. - -![A view of the cluster profile](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_view.png) - - -
- - -### Verify the Cluster - - -You can also check the cluster creation process by navigating to the left **Main Menu** and selecting **Clusters**. - -![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_aws_create_cluster.png) - -
- -Select your cluster to review its details page, which contains the status, cluster profile, event logs, and more. - -
- -The cluster deployment may take several minutes depending on the cloud provider, node count, node sizes used, and the cluster profile. You can learn more about the deployment progress by reviewing the event log. Click on the **Events** tab to check the event log. - -![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-event_log.png) - -
- -While you wait for the cluster deployment process to complete, feel free to check out the following video where we discuss the growing pains of using Kubernetes and how Palette can help you address these pain points. - -
- - - - -
- -## Validate - -When the cluster deploys, you can access the Hello Universe application. -From the cluster's **Overview** page, click on the URL for port **:8080** next to the **hello-universe-service** in the **Services** row. This URL will take you to the application landing page. - -
- - - - -It can take up to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - - -![Deployed application](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_app.png) - -
- -Welcome to Hello Universe, a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the counter and for a fun image change. - -You have deployed your first application to a cluster managed by Palette through Terraform. Your first application is a single container application with no upstream dependencies. - - -## Cleanup - -Use the following steps to clean up the resources you created for the tutorial. Use the `destroy` command to remove all the resources you created through Terraform. - -
- -```shell -terraform destroy --auto-approve -``` - -Output: -```shell -Destroy complete! Resources: 2 destroyed. -``` - -
- - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for force delete. To trigger a force delete, navigate to the cluster’s details page and click on **Settings**. Click on **Force Delete Cluster** to delete the cluster. Palette automatically removes clusters stuck in the cluster deletion phase for over 24 hours. - - - - -If you are using the tutorial container and want to exit the container, type `exit` in your terminal session and press the **Enter** key. Next, issue the following command to stop the container. - -
- -```shell -docker stop tutorialContainer && \ -docker rmi --force ghcr.io/spectrocloud/tutorials:1.0.4 -``` - - -
-
- -# Wrap-up - -In this tutorial, you created a cluster profile, which is a template that contains the core layers required to deploy a host cluster. You then deployed a host cluster onto your preferred cloud service provider. After the cluster deployed, you updated the profile by adding the Hello Universe application and applied the updates to the host cluster. - -Palette assures consistency across cluster deployments through cluster profiles. Palette also enables you to quickly deploy applications to a Kubernetes environment with little or no prior Kubernetes knowledge. In a matter of minutes, you were able to provision a new Kubernetes cluster and deploy an application. - -We encourage you to check out the [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) tutorial to learn more about Palette. Palette Dev Engine can help you deploy applications more quickly through the usage of [virtual clusters](/glossary-all#palettevirtualcluster). Feel free to check out the reference links below to learn more about Palette. - -
- - -- [Palette Modes](/introduction/palette-modes) - - -- [Cluster Profiles](/cluster-profiles) - - -- [Palette Clusters](/clusters) - - -- [Hello Universe GitHub repository](https://github.com/spectrocloud/hello-universe) - -
diff --git a/content/docs/04-clusters/02-data-center.md b/content/docs/04-clusters/02-data-center.md deleted file mode 100644 index 5011c7e7ea..0000000000 --- a/content/docs/04-clusters/02-data-center.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "Data Center Clusters" -metaTitle: "Creating new clusters on Spectro Cloud" -metaDescription: "The methods of creating clusters for a speedy deployment on any CSP" -icon: "database" -hideToC: false -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import Tooltip from 'shared/components/ui/Tooltip'; - - -# Data Center Clusters - -Palette supports provisioning and end-to-end lifecycle management of Kubernetes workload clusters on various private clouds, bare metal servers, and in self-hosted environments. - -
- - -Workload clusters are instantiated from cloud specific cluster profiles}>Cluster profiles are instantiated templates that are created with pre-configured layers/components needed for cluster deployments.. You can use one of the cluster profiles provided out-of-the-box or create a new one. - - - -# Supported Environments - -The following pages provide detailed instructions for setting up new workload clusters in various data center environments: - -
- -- [Canonical MAAS](/clusters/data-center/maas) - - -- [OpenStack](/clusters/data-center/openstack) - - -- [VMware](/clusters/data-center/vmware) - -
- -
diff --git a/content/docs/04-clusters/02-data-center/01-maas.md b/content/docs/04-clusters/02-data-center/01-maas.md deleted file mode 100644 index fee8ba1301..0000000000 --- a/content/docs/04-clusters/02-data-center/01-maas.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "MAAS" -metaTitle: "Configure MAAS and create MAAS clusters in Palette" -metaDescription: "Learn how to configure MAAS and create MAAS clusters in Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette enables seamless integration with Canonical MAAS, allowing you to deploy and manage Kubernetes clusters directly on bare metal servers. Palette achieves this through the Private Cloud Gateway (PCG), establishing a secure connection from the internal network to the internet-accessible Palette instance and effectively bypassing NAT gateways and firewalls. - - -Palette also supports self-hosted deployment of Kubernetes clusters in the MAAS environment, allowing direct access to MAAS through a private network without the need for a PCG. This setup ensures network connectivity and flexibility in managing Kubernetes clusters on bare metal servers, either through a VPN or by directly accessing the Palette instance in a private network. - -
- - - - - -# Resources - -- [MAAS Bare-Metal Architecture](/clusters/data-center/maas/architecture) - - -- [Install and Manage MAAS Gateway](/clusters/data-center/maas/install-manage-maas-pcg) - - -- [Register and Manage MAAS Cloud Accounts](/clusters/data-center/maas/register-manage-maas-cloud-accounts) - - -- [Create and Manage MAAS Cluster](/clusters/data-center/maas/create-manage-maas-clusters) - - -
-
diff --git a/content/docs/04-clusters/02-data-center/01-maas/02-architecture.md b/content/docs/04-clusters/02-data-center/01-maas/02-architecture.md deleted file mode 100644 index c1eae67d0b..0000000000 --- a/content/docs/04-clusters/02-data-center/01-maas/02-architecture.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Architecture" -metaTitle: "MAAS Architecture with Palette" -metaDescription: "Learn about the architecture used to support MAAS using Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# MAAS Bare-Metal Kubernetes Architecture - -Canonical MAAS is an open-source tool that lets you discover, commission, deploy and re-deploy operating systems to physical servers. The following are some architectural highlights of bare-metal Kubernetes clusters that Palette deploys using Canonical MAAS. Refer to the PCG deployment options section below to learn more about PCG deployment. - -
- -- Palette integrates with MAAS through Spectro Cloud’s open-source Cloud Native Computing Foundation (CNCF) [Cluster API provider](https://github.com/spectrocloud/cluster-api-provider-maas). -Refer to the table below - - -- Palette provides a cloud-like experience for deploying clusters on bare metal servers. The result is increased performance at minimal cost and operational effort. - - -- A Private Cloud Gateway (PCG) that you install in a MAAS cloud using a local installer facilitates communication between Palette and MAAS. The PCG is necessary in MAAS environments where Palette does not have direct network access to the MAAS server. Since MAAS environments are typically in a private network without a central endpoint, the PCG provides this endpoint and also wraps the MAAS environment into a cloud account that you can target for cluster deployment in Palette. Refer to the section below to learn about the PCG deployment options you have. - - -- When the PCG is installed, it registers itself with a Palette instance and enables secure communication between the SaaS portal and the private cloud environment. The gateway enables installation and end-to-end lifecycle management of Kubernetes clusters in private cloud environments from Palette's SaaS portal. - - -
- - The diagram below illustrates how MAAS works with Palette using a PCG. - - ![Network flow from an architectural perspective of how MAAS works with Palette](/maas_cluster_architecture.png) - -
- -# PCG Deployment Options - -Palette can communicate with MAAS using the following deployment options. - -
- - -- **Private Cloud Gateway** - - -- **System Private Gateway** - -## Private Cloud Gateway - -When a user wants to deploy a new cluster on a bare metal cloud using MAAS with Palette, Palette needs connectivity to MAAS. Often, MAAS is behind a firewall or a Network Address Translation (NAT) gateway, and Palette needs help to reach MAAS directly. - -To address these network challenges, you can deploy a PCG. The PCG will maintain a connection to Palette and directly connect to MAAS. The direct communication channel allows Palette to create clusters using the PCG to facilitate communication with MAAS. The PCG also supports using a proxy server to access the internet if needed. - -Once Palette deploys clusters, the clusters require connectivity to Palette. The clusters communicate with Palette directly via an internet gateway, or if a proxy has been configured on the PCG, the clusters will inherit the proxy configuration. Deployed and active clusters maintain their connectivity with Palette. Any actions taken on these clusters using Palette will not require PCG's participation. This means that if the PCG becomes unavailable, any clusters that are currently deployed will remain operational and still be managed by Palette. - -All Palette deployed clusters will use the PCG cluster during the creation and deletion phase. Once a host cluster is available, the internal Palette agent will communicate with Palette directly. The Palette agent inside each cluster is the originator of all communication, so the network requests are outbound toward Palette. The exception is a host cluster creation or deletion request, where the PCG must be involved because it needs to acquire and release machines provided by MAAS. - -Typically, the PCG is used with Palette SaaS. However, a PCG is also required if you have a self-hosted Palette instance and it does not have direct access to the MAAS environment. You can utilize the System Private Gateway if there is direct network connectivity access with the MAAS environment. Refer to the [System Private Gateway](/clusters/data-center/maas/architecture/#systemprivategateway) section to learn more. - -
- - -## System Private Gateway - -A System Private Gateway can be used if a self-hosted Palette instance can communicate directly with a MAAS installation. A System Private Gateway is a PCG service that is enabled inside the self-hosted Palette instance. - -
- - - -Only self-hosted Palette instances support the option of using the System Private Gateway. Use the default [PCG deployment](/clusters/data-center/maas/architecture/#privatecloudgateway) option if you have NAT gateways or network firewalls between Palette and MAAS. - - - -
- -When registering a MAAS cloud account with Palette, toggle on **Use System Private Gateway** to enable direct communication between Palette and MAAS. Refer to the [Register and Manage MAAS Cloud Account](/clusters/data-center/maas/register-manage-maas-cloud-accounts) guide to learn more. - -The following table explains the different use cases for when a PCG and System Private Gateway are eligible. - -
- -| Scenario | Use Private Cloud Gateway | Use System Private Gateway | -|-----------|----|----------------| -| Firewall or NAT between MAAS and a self-hosted Palette instance | ✅ | ❌ | -| Direct connectivity between MAAS and a Palette instance | ✅ | ✅ | - - -
diff --git a/content/docs/04-clusters/02-data-center/01-maas/05-install-manage-maas-pcg.md b/content/docs/04-clusters/02-data-center/01-maas/05-install-manage-maas-pcg.md deleted file mode 100644 index 576cfe8794..0000000000 --- a/content/docs/04-clusters/02-data-center/01-maas/05-install-manage-maas-pcg.md +++ /dev/null @@ -1,698 +0,0 @@ ---- -title: "Install and Manage MAAS Gateway" -metaTitle: "Install and Manage MAAS Private Cloud Gateway" -metaDescription: "Learn how to install and manage the MAAS Private Cloud Gateway in Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -The Private Cloud Gateway (PCG) supports private cloud and data center environments. Its function is similar to that of a reverse proxy. The PCG facilitates connectivity between Palette and a private cloud that exists behind a NAT gateway or firewall. It traverses any NAT gateways or firewalls to establish a permanent connection with Palette. - -The PCG is a Kubernetes cluster that supports Palette in a private network environment. All host clusters deployed through Palette communicate with PCG. - -At a high level, the following occurs during a successful MAAS PCG installation: - -
- -- Use the Palette CLI on a laptop, workstation, or Bastion host. - - -- Provide information to the CLI so that it can connect to both a local MAAS installation and a Palette account. - - -- The installation process uses MAAS to obtain machines and install a PCG on them. - - -- The PCG then facilitates all communication between Palette and MAAS, enabling Palette to create new clusters on machines that MAAS provides. - -You can set up the PCG as a single- or three-node cluster based on your requirements for high availability (HA). - -As the following diagram shows, Palette provides an installer in the form of a Docker container that is temporarily deployed on your laptop, workstation, or jump box. You can use the installer on any Linux x86-64 system with a Docker daemon installed and connectivity to Palette and the MAAS identity endpoint. - - -
- - -![An architecture diagram of MaaS with PCG.](/clusters_maas_install-manage-mass-pcg_diagram-of-mass-with-pcg.png) - - -# Install PCG - -Use the following steps to install a PCG cluster in your MAAS environment. You can use the [Palette CLI](/palette-cli) or the PCG Installer Image to deploy a PCG cluster. Review the prerequisites for each option to help you identify the correct install method. - -
- - - - - - - -## Prerequisites - - -- Palette version 4.0.X or greater. - - -- Canonical [MAAS installed](https://maas.io/docs/how-to-install-maas), set up, and available in your environment. - - -- Download the Palette CLI from the [Downloads](/spectro-downloads#palettecli) page and install the CLI. Refer to the [Palette CLI Install](/palette-cli/install-palette-cli) guide to learn more. - - -- A Palette API key. Refer to the [Create API Key](/user-management/user-authentication#apikey) page for guidance. - -
- - - - The installation does not work with Single Sign-On (SSO) credentials. You must use an API key from a local tenant admin account in Palette to deploy the PCG. After the PCG is configured and functioning, this local account is no longer used to keep the PCG connected to Palette, so you can disable the account if desired. - - - -- A Linux environment with a Docker daemon installed and a connection to Palette and the MAAS endpoint. The installation must be invoked on an up-to-date Linux system with an x86-64 architecture. ARM architecture is currently not supported. - - -- PCG IP address requirements:

- - - For a single-node gateway, one IP address must be available in the MAAS subnet for the PCG, or three available IP addresses for a three-node gateway. -
- - - One IP address must be available in the MAAS subnet for the Kubernetes API-server endpoint when deploying a three-node gateway. - - -- Sufficient available IPs within the configured MAAS subnets. - - - -By default, the MAAS Kubernetes pack uses a pod classless inter-domain routing (CIDR) range of 192.168.0.0/16. Ensure that the pod CIDR range for any clusters you deploy after setting up the PCG does not overlap with the network used by the bare metal machines that MAAS manages. - - - -- Each node in the PCG cluster requires a machine from MAAS in a ready state with the following resources: - -
- - - CPU: 4 - - Memory: 8192 MiB - - Storage: 60 GiB - - For production environments, we recommend using three nodes, each with 100 GiB of storage, as nodes can exhaust the 60 GiB storage with prolonged use. If you initially set up the gateway with one node, you can resize it at a later time. - - -- An active [MAAS API key](https://maas.io/docs/api-authentication-reference) can be generated in the MAAS web console under **My Preferences** > **API keys**. The following is an example key: - - ``APn53wz232ZwBMxDp5:MHZIbUp3e4DJTjZEKg:mdEv33WAG536MhNC8mIywNLtjcDTnFAQ`` - - For details, refer to the MAAS document on [how to add an API key](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key). - -
- -- The DNS server that the PCG installer will use, must be able to resolve the DNS names of machines that MAAS deploys so it can connect to them. The default setup is to use the MAAS server as the DNS server for any bare metal servers that it deploys. The default MAAS DNS zone is ``.maas``. You can use ``.maas`` or you can use the MAAS web console to create a new DNS zone. When you deploy the PCG and clusters, you can select the desired DNS zone in which DNS name records should be created. - - In the MAAS subnets configuration, you can specify which DNS servers those servers in the MAAS subnet should use. - - - -If you configure a different DNS server than the MAAS DNS server, you must be sure to create a DNS delegation in the other DNS server, so that it can forward DNS requests for zones that are hosted by MAAS to the MAAS DNS server. - - - -
- -The installation process first requests machines from MAAS and then must connect to them. To connect, the install process attempts to use the fully qualified domain name (FQDN) of the server. If you used ``.maas`` as the default DNS zone, the FQDN would be ``machine-hostname.maas``. - -The diagram shows an example of using an external DNS server for servers that MAAS deploys in addition to a DNS delegation. This ensures all servers in the network can resolve the DNS names of servers deployed by MAAS. Note that it is not required for the DNS records to be accessible from the internet. - - -![Image showing external DNS server machines that MAAS deploys in addition to a DNS delegation](/clusters_maas_maas-dns-setup.png) - - -## Install - -The following steps will guide you on how to install a PCG cluster. -
- -1. In an x86 Linux host, open up a terminal session. - - -2. Use the [Palette CLI](/palette-cli/install-palette-cli) `login` command to authenticate the CLI with Palette. When prompted, enter the information listed in the following table. - -
- - ```shell - palette login - ``` - -
- - |**Parameter** | **Description**| - |:-----------------------------|---------------| - |**Spectro Cloud Console** |Enter the Palette endpoint URL. When using the Palette SaaS service, enter ``https://console.spectrocloud.com``. When using a self-hosted instance of Palette, enter the URL for that instance. | - |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if you are using a self-hosted Palette instance with self-signed TLS certificates. Otherwise, enter `n`.| - |**Spectro Cloud API Key** |Enter your Palette API Key.| - |**Spectro Cloud Organization** |Enter your Palette Organization name.| - |**Spectro Cloud Project** |Enter your desired project name within the selected Organization.| - - -3. Once you have authenticated successfully, invoke the PCG installer by issuing the following command. When prompted, enter the information listed in each of the following tables. - -
- - ```bash - palette pcg install - ``` - -
- - |**Parameter** | **Description**| - |:-----------------------------|---------------| - |**Cloud Type**| Choose OpenStack.| - |**Private Cloud Gateway Name** | Enter a custom name for the PCG. Example: `openstack-pcg-1`.| - |**Share PCG Cloud Account across platform Projects** |Enter `y` if you want the Cloud Account associated with the PCG to be available from all projects within your organization. Enter `n` if you want the Cloud Account to only be available at the tenant admin scope.| - - - -4. Next, provide environment configurations for the cluster. Refer to the following table for information about each option. - -
- - |**Parameter**| **Description**| - |:-------------|----------------| - |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: ``https://USERNAME:PASSWORD@PROXYIP:PROXYPORT``.| - |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: ``http://USERNAME:PASSWORD@PROXYIP:PROXYPORT``.| - |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: ``maas.company.com,10.10.0.0/16``.| - |**Proxy CA Certificate Filepath**|The default is blank. You can provide the file path of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| - |**Pod CIDR**|Enter the CIDR pool that will be used to assign IP addresses to pods in the PCG cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| - |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the PCG cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.|.| - -
- - -5. After the environment options, the next set of prompts is for configuring the PCG cluster for the MAAS environment. The following table contains information about each prompt. - -
- - |**Parameter**| **Description**| - |-------------|----------------| - | **MAAS API Endpoint** |Enter the MAAS API endpoint. This can be a domain or IP address. Example: `http://10.11.12.13:5240/MAAS`.| - | **MAAS API Key** |Enter an active MAAS API key to use for authentication.| - - - -6. Next, select the appropriate option for each of the following items to define which machines should be selected on the MAAS server for deployment as a PCG. - -
- - |**Parameter**| **Description**| - |-------------|----------------| - | **Domain** | Select the MAAS domain. | - | **Patch OS on boot** | This parameter indicates whether or not to patch the OS of the PCG hosts on the first boot.| - | **Reboot nodes once OS patch is applied** | This parameter indicates whether or not to reboot PCG nodes after OS patches are applied.| - | **Availability Zone** | Select the availability zones for the PCG cluster. | - | **Resource Pool** | Select the MAAS resource pool. | - | **Cluster Size** | The number of nodes that will make up the cluster. Available options are **1** or **3** . Use three nodes for a High Availability (HA) cluster. | | - - - - Ensure the MAAS server has one or more machines in the **Ready** state for the chosen availability zone - and resource pool combination. - - - - -7. A new PCG configuration file is generated and its location is displayed on the console. You will receive an output similar to the following. - -
- - ```bash hideClipboard - ==== PCG config saved ==== - Location: :/home/spectro/.palette/pcg/pcg-20230706150945/pcg.yaml - ``` - - - - The ``CloudAccount.apiKey`` and ``Mgmt.apiKey`` values in the **pcg.yaml** are encrypted and cannot be manually updated. To change these values, restart the installation process using the `palette pcg install` command. - - - - -The Palette CLI will now provision a PCG cluster in your OpenStack environment. -If the deployment fails due to misconfiguration, update the PCG configuration file and restart the install process. Refer to the Edit and Redeploy PCG section below. For additional assistance, visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. - -## Validate - -Once installed, the PCG registers itself with Palette. To verify the PCG is registered, use the following steps. - - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and select **Tenant Settings** - - -3. From the **Tenant Settings Menu** click on **Private Cloud Gateways**. Verify your PCG cluster is available from the list of PCG clusters displayed. - - -4. When you install the PCG, a cloud account is auto-created. To verify the cloud account is created, go to **Tenant Settings > Cloud Accounts** and locate **MAAS** in the table. Verify your MAAS account is listed. - - - -## Edit and Redeploy PCG - -To change the PCG install values, restart the installation process using the `palette pcg install` command. Use the following steps to redeploy the PCG or restart the install process. - -
- -1. Make the necessary changes to the PCG configuration file the CLI created during the installation, if needed. Use a text editor, such as Vi or Nano to update the PCG install configuration file. - -
- - ```shell hideClipboard - ==== Create PCG reference config ==== - ==== PCG config saved ==== - Location: /Users/demo/.palette/pcg/pcg-20230717114807/pcg.yaml - ``` - - ```bash hideClipboard - vi /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml - ``` - - - -2. To redeploy the PCG, use the `install` command with the flag `--config-file`. Provide the file path to the generated PCG config file that was generated and displayed in the output. - -
- - ```bash hideClipboard - palette pcg install --config-file /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml - ``` - - -
- -
- - - -## Prerequisites - -- Palette version 3.4.X or older. - - - -- Canonical [MAAS installed](https://maas.io/docs/how-to-install-maas), set up, and available in your environment. - - -- A Linux environment with a Docker daemon installed and a connection to Palette and the MAAS endpoint. The installer must be invoked on an up-to-date Linux system with an x86-64 architecture. ARM architecture is currently not supported. - - -- PCG IP address requirements:

- - - For a single-node gateway, one IP address must be available in the MaaS subnet for the PCG, or three available IP addresses for a three-node gateway. -
- - - One IP address must be available in the MAAS subnet for the Kubernetes api-server endpoint when deploying a three-node gateway. - - -- Sufficient available IPs within the configured MAAS subnets. - - - -By default, the MAAS Kubernetes pack uses a pod classless inter-domain routing (CIDR) range of 192.168.0.0/16. Ensure that the pod CIDR range for any clusters you deploy after setting up the PCG does not overlap with the network used by the bare metal machines that MAAS manages. - - - -- Each node in the PCG cluster requires a machine from MAAS in a ready state with the following resources: - -
- - - CPU: 4 - - Memory: 8192 MiB - - Storage: 60 GiB - - For production environments, we recommend using three nodes, each with 100 GiB of storage, as nodes can run out of 60 GiB with prolonged use. If you initially set up the gateway with one node, you can resize it at a later time. - - -- An active [MAAS API key](https://maas.io/docs/api-authentication-reference) which can be generated in the MAAS web console under **My Preferences** > **API keys**. The following is an example key: - - ``APn53wz232ZwBMxDp5:MHZIbUp3e4DJTjZEKg:mdEv33WAG536MhNC8mIywNLtjcDTnFAQ`` - - For details, refer to the MAAS document on [how to add an API key](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key). - -
- -- The DNS server that the PCG installer will use, must be able to resolve the DNS names of machines that MAAS deploys so it can connect to them. The default setup is to use the MAAS server as the DNS server for any bare metal servers that it deploys. The default MAAS DNS zone is ``.maas``. You can use ``.maas`` or you can use the MAAS web console to create a new DNS zone. When you deploy the PCG and clusters, you can select the desired DNS zone in which DNS name records should be created. - - In the MAAS subnets configuration, you can specify which DNS servers those servers in the MAAS subnet should use. - - - -If you configure a different DNS server than the MAAS DNS server, you must be sure to create a DNS delegation in the other DNS server, so that it can forward DNS requests for zones that are hosted by MAAS to the MAAS DNS server. - - - -
- -The installer first requests machines from MAAS and then must connect to them. To connect, the installer attempts to use the fully qualified domain name (FQDN) of the server. If you used ``.maas`` as the default DNS zone, the FQDN would be ``machine-hostname.maas``. - -The diagram shows an example of using an external DNS server for servers that MAAS deploys in addition to a DNS delegation. This ensures all servers in the network can resolve the DNS names of servers deployed by MAAS. Note that it is not required for the DNS records to be accessible from the internet. - - -![Image showing external DNS server machines that MAAS deploys in addition to a DNS delegation](/clusters_maas_maas-dns-setup.png) - - -## Understand the Gateway Installation Process - -The following steps outline the overall process to install the PCG. - -For detailed steps, refer to the **Install PCG** section below, which describes a single-step installation that creates the PCG configuration file and installs the PCG. - -If you have already installed the PCG and are experiencing issues that you want to fix by editing the PCG configuration file directly, refer to the **Edit PCG Config** section below. - -
- -1. You obtain a pairing code in Palette that you will use later. - - -2. Use the Docker image to start the installation on the installer host. - - -3. The installer prompts you for information, including the pairing code you obtained in step **1**. - - -4. The installer generates the PCG configuration file from information you provide in step **3**. - -
- - The installer needs access to your Palette account and to your MAAS environment. Additionally, one (no HA) or three (HA) machines must be in ready state and have internet access in MAAS. If you select one machine in step 3, then you need one in MAAS. Likewise, if you select three machines in step 3, you need three in MAAS. -
- -5. The installer installs the MAAS machines and uses the configuration file to build a new cluster to host the PCG application. - -
- - - - -## Install the PCG - -The following steps will guide you to install the PCG. -
- -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. If you have Single or Social Sign-On (SSO) enabled, you will need to use or create a local non-SSO tenant admin account in Palette and use the credentials for that account in step **7**. - - - -The installer does not work with SSO or Social sign on credentials. You must use a username and password from a local tenant admin account in Palette to deploy the PCG. After the PCG is configured and functioning, this local account is no longer used to keep the PCG connected to Palette, so you can disable the account if desired. - - - - -3. Navigate to the **Main Menu** and select **Tenant Settings > Private Cloud Gateway**. - - -4. Click the **Create Private Cloud Gateway** button and select **MAAS**. Private Gateway installation instructions are displayed. - - -5. Note the pairing code displayed in the Instructions section of the page. You will input this code when you use the installer. This pairing code is valid for 24 hours. - - -6. To invoke the installer, copy the following code snippet to your terminal. -
- - ```bash - docker run -it --rm \ - --net=host \ - --volume /var/run/docker.sock:/var/run/docker.sock \ - --volume /tmp:/opt/spectrocloud \ - gcr.io/spectro-images-public/release/spectro-installer:v1.0.12 - ``` - -7. When prompted, enter the pairing code and information listed in each of the following tables. The installer will generate the gateway configuration file. -
- - -#### Palette Parameters - -|**Parameter** | **Description**| -|:-----------------------------|---------------| -|**Install Type**| Choose **Private Cloud Gateway**.
You can change your selection with the up or down keys.| -|**Cloud Type**| Choose MAAS.| -|**Name** | Enter a custom name for the PCG. Example: ``maas-pcg-1``.| -|**Endpoint** |Enter the Palette endpoint URL. When using the Palette SaaS service, enter ``https://console.spectrocloud.com``. When using a dedicated instance of Palette, enter the URL for that instance. | -|**Username** |Enter your Palette username. This is your sign-in email address. Example: ``user1@company.com``. | -|**Password** |Enter your Palette Password. This is your sign-in password.| -|**Pairing Code** |Enter the pairing code you noted from the instructions page in step **5**. | - -
- -#### Environment Configuration - - -|**Parameter**| **Description**| -|:-------------|----------------| -|**HTTPS Proxy (--https_proxy)**| Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: ``https://USERNAME:PASSWORD@PROXYIP:PROXYPORT``.| -| **HTTP Proxy(--http_proxy)**| Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: ``http://USERNAME:PASSWORD@PROXYIP:PROXYPORT``.| -| **No Proxy(--no_proxy)**| The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: ``maas.company.com,10.10.0.0/16``.| -| **Pod CIDR (--pod_cidr)**|Enter the CIDR pool that will be used to assign IP addresses to pods in the PCG cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| -| **Service IP Range (--svc_ip_range)**|Enter the IP address range that will be used to assign IP addresses to services in the PCG cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| - -
- - -#### MAAS Account Information - -|**Parameter**| **Description**| -|-------------|----------------| -| **API Endpoint** |Enter the MAAS API endpoint (syntax is important). This can be a domain or IP address. Example: ``http://10.11.12.13:5240/MAAS``.| -| **API Key** |Enter an active MAAS API key to use for authentication.| - -
- -1. When the installer prompts you, select the appropriate option for each of the following items to define which machines should be selected on the MAAS server for deployment as a PCG: - - - Domain - - Availability Zone - - Resource Pool - - One node (no HA) or three nodes (HA) - -
- - - - Ensure the MAAS server has one or more machines in the **Ready** state for the chosen availability zone - and resource pool combination. - - - -When you have entered all the configuration values, the installer saves the gateway configuration file to disk and prints its location before proceeding with the installation. For example: - -``/tmp/install-user-defined-MaaS-Gateway_Name-20210805155034/pcg.yaml`` - -
- - - -The **/opt/spectrocloud** folder is volume mapped to the installer's **/tmp** folder. - - - -The installer then requests available bare metal machines in your MAAS environment on which to install the gateway. The ``password`` and ``API key`` values in the ``pcg.yaml`` are encrypted and cannot be manually updated. To change these values, copy the code snippet in step **6** to rerun the installer. - -If the deployment fails due to misconfiguration, update the gateway configuration file and rerun the installer. Refer to the **Edit PCG Config** tab above. - -If you need assistance, please visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. - -
- -## Validate - -Once installed, the gateway registers itself with Palette. To verify the gateway is registered, navigate to **Tenant Settings > Private Cloud Gateways** and ensure the gateway is listed on the **Manage Private Cloud Gateways** page. - -When you install the gateway, a cloud account is auto-created. To verify the cloud account is created, go to **Tenant Settings > Cloud Accounts** and locate **MAAS** in the table. Verify your MAAS account is listed. - - - - -## Edit PCG Configuration File - -Use the following steps if you want to edit the PCG configuration file directly. - -
- -1. Copy the ``pcg.yaml`` file out of ``/tmp/install-user-defined-MaaS-Gateway_Name-20210805155034/pcg.yaml`` and into ``/tmp`` as follows. - - -```bash -cp /tmp/install-User-define-MaaS-Gateway-Name-20210805155034/pcg.yaml /tmp -``` - - -2. Make the necessary changes to the configuration file. - - -3. Before you redeploy the gateway, do the following: - -
- - - Ensure the pairing code in the configuration file is the same as the pairing code displayed in the installation instructions in Palette. To verify the pairing code, click the **Create Private Cloud Gateway** button and select **MAAS**. Note the pairing code and verify it is the same code in the configuration file. - -
- - - If the codes do not match, modify the code in the configuration file so it matches the code displayed in Palette. - -
- - - -Issues can occur with the PCG installation if the pairing code in Palette changes during the time it takes to modify the configuration file. Ensure pairing codes in Palette and the configuration file match before you redeploy the gateway. - -If you stop the installation or it fails due to mismatched pairing codes, the gateway might display as **Pending (unnamed)** on the **Private Cloud Gateways** page. If this happens, delete the gateway and ensure pairing codes in Palette and the configuration file match before redeploying the gateway. - - - -
- -4. To redeploy the gateway, copy the following code snippet to your terminal and provide the gateway configuration file as input. - - -```bash -docker run -it –rm \ -–net-host \ --v /var/run/docker.sock:/var/run/docker.sock \ --v /tmp:/opt/spectrocloud \ -gcr.io/spectro-images-public/release/spectro-installer:1.0.12 \ --s true \ --c /opt/spectrocloud/pcg.yaml -``` - -The installer requests available bare metal machines in your MAAS environment on which to install the gateway. - -If you need assistance, please visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. - - - -
- -
-
- - - - -# Update and Manage the PCG - -Palette maintains the Operating System (OS) image and all configurations for the PCG. Periodically, the OS images, configurations, and other components need to be updated to resolve security or functionality issues. Palette releases updates when required, and informs you with an update notification when you click on the gateway in the **Manage Cloud Gateways** page. - -Review the changes in the update notification, and apply the update when you are ready. - -Updating the cloud gateway does not result in any downtime for the tenant clusters. During the update process, new cluster provisioning is unavailable. New cluster requests are queued and processed when the gateway update is complete. - -
- -# Delete the MAAS Gateway - -Follow these steps to delete a MAAS gateway. -
- -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the **Main menu** and select **Tenant Settings > Private Cloud Gateways**. - - -3. Click the **three-dot Menu** for the gateway instance you want to delete and choose **Delete**. - - Palette checks for running tenant clusters associated with the gateway instance and displays an error message if it detects any. -
- -4. If there are running clusters, delete them and retry deleting the gateway instance. - -
- -# Resize the MAAS Gateway - -You can set up a PCG as a single-node (no HA) or three-node (HA) cluster. You can set up a PCG initially with one node and resize it to three nodes at a later time. - -
- - - -For production environments, we recommend setting up three nodes. - - - -## Prerequisites - -- Each PCG node requires the following: - - - 4 CPUs - - 8192 MiB memory - - 60 GiB storage - -Follow these steps to resize a single-node gateway to three nodes. - -
- -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the **Main Menu** and select **Tenant Settings > Private Cloud Gateways**. - - -3. Click the **three-dot Menu** for the gateway instance you want to resize and choose **Set number of nodes**. - - -4. Change the number of nodes to 3. - -Two new nodes will be added to the PCG cluster. - -
- - - - -Ensure the MAAS server has two more machines in the **Ready** state in the same Availability Zone and Resource Pool combination. - - - - -## Validate - -You can validate your your PCG has been resized by navigating to the **Private Cloud Gateways** page. Select the resized gateway instance and click the **Nodes** tab. You will see two additional nodes being deployed along with their health status. Three nodes in total will be listed. - -
- - -# Next Steps - -You can now create tenant clusters in the auto-created cloud account. To get started, check out [Create and Manage MAAS Clusters](/clusters/data-center/maas/create-manage-maas-clusters). - -You can also create additional cloud accounts if you need them. Refer to [Register and Manage MAAS Cloud Accounts](/clusters/data-center/maas/register-manage-maas-cloud-accounts). - - -
- - -# Resources - - - [Install MAAS](https://maas.io/) - - - - [MAAS Fresh Install](https://maas.io/docs/how-to-install-maas) - - - - [Manage MAAS User Accounts](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key) - - diff --git a/content/docs/04-clusters/02-data-center/01-maas/10-register-manage-maas-cloud-accounts.md b/content/docs/04-clusters/02-data-center/01-maas/10-register-manage-maas-cloud-accounts.md deleted file mode 100644 index 82fd161844..0000000000 --- a/content/docs/04-clusters/02-data-center/01-maas/10-register-manage-maas-cloud-accounts.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: "Register and Manage MAAS Cloud Accounts" -metaTitle: "Register and Manage MAAS Cloud Accounts" -metaDescription: "Learn how to register and manage your MAAS cloud accounts in Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -When you install the Private Cloud Gateway (PCG), a cloud account is auto-created in every project in Palette. You can use this cloud account to create clusters at either the tenant or the project level. If desired, you can create additional cloud accounts that reference specific PCGs. - -# Prerequisites - -- An installed PCG if you do not have a direct connection to the MAAS environment. Review [Install and Manage MAAS Gateway](/clusters/data-center/maas/install-manage-maas-pcg) for guidance. - - If are self-hosting Palette and have a direct connection to the MAAS environment, you can select **Use System Private Gateway**. To learn more about when you would use Palette's PCG or the System Private Gateway, refer to the [Architecture](/clusters/data-center/maas/architecture) page. - - - -- An active [MAAS API key](https://maas.io/docs/api-authentication-reference) which can be generated in the MAAS web console under **My Preferences** > **API keys**. The following is an example key: - - ``APn53wz232ZwBMxDp5:MHZIbUp3e4DJTjZEKg:mdEv33WAG536MhNC8mIywNLtjcDTnFAQ`` - - For details, refer to the MAAS document on [how to add an API key](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key). - - -# Register a MAAS Cloud Account - -Follow these steps to create additional MAAS cloud accounts. - -
- -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the **Main Menu** and select **Tenant Settings > Cloud Accounts**. - - -3. Locate **MAAS** on the **Cloud Accounts** page and click **Add MAAS Account**. - - -4. In the next window that displays, enter values for properties listed in the following table. - - In a self-hosted environment where Palette has direct network access to MAAS, you can register a MAAS cloud account without installing the PCG. Note the **Use System Private Gateway** setting listed in the table. refer to the System Private Gateway section in the [Architecture](/clusters/data-center/maas/architecture) page to learn more about System Private Gateway. - -Refer to the Deploy with PCG and system PCG in the [Architecture](/clusters/data-center/maas/architecture) page to learn more about system PCG. - -
- - - -For the self-hosted Palette instance, MAAS is reachable on port 5240. - - - -
- -| Property | Description | -|-----------|-------------| -| Account Name | Custom name for the cloud name. | -| Use System Private Gateway | This setting is for self-hosted environments that do not require a PCG. Toggle this option to bypass installing the PCG.| -| Select Private Cloud Gateway | Select your MAAS cloud gateway from the **drop-down Menu**. | -| API Endpoint | API endpoint of the gateway. | -| API Key | The MAAS API key. | - -5. Click **Confirm** to register your MAAS cloud account. - - -# Validate - -You can validate your MAAS cloud account is registered by reviewing the **Cloud Accounts** page. Ensure your account is listed under **MAAS**. - -# Next Steps - -Deploy a Kubernetes cluster to one of your MAAS accounts. Check out [Create and Manage MAAS Cluster](/clusters/data-center/maas/create-manage-maas-clusters) for guidance. - -# References - -- [How to add an API key](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key) - -
- -
- - - - diff --git a/content/docs/04-clusters/02-data-center/01-maas/15-create-manage-maas-clusters.md b/content/docs/04-clusters/02-data-center/01-maas/15-create-manage-maas-clusters.md deleted file mode 100644 index 5c52d1b52b..0000000000 --- a/content/docs/04-clusters/02-data-center/01-maas/15-create-manage-maas-clusters.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: "Create and Manage MAAS Clusters" -metaTitle: "Create and Manage MAAS Clusters" -metaDescription: "Learn how to create and manage MAAS clusters in Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette supports creating and managing Kubernetes clusters deployed to a MAAS account. This section guides you on how to create a Kubernetes cluster in MAAS that is managed by Palette. - -# Prerequisites - -- An installed PCG if you do not have a direct connection to the MAAS environment. Review [Install and Manage MAAS Gateway](/clusters/data-center/maas/install-manage-maas-pcg) for guidance. - - If are self-hosting Palette and have a direct connection to the MAAS environment, you can select **Use System Private Gateway**. To learn more about when you would use Palette's PCG or the System Private Gateway, refer to the [Architecture](/clusters/data-center/maas/architecture#deploywithprivatecloudgateway(pcg)andsystemprivategateway) page to learn more. - - -- A MAAS account registered in Palette. Refer to the [Register and Manage MAAS Cloud Accounts](/clusters/data-center/maas/register-manage-maas-cloud-accounts) if you need to register a MAAS account in Palette. - - -- A cluster profile for the MAAS environment. Review [Cluster Profiles](/cluster-profiles) for more information. - - -- Verify that the required Operating System (OS) images you use in your cluster profiles are downloaded and available in your MAAS environment. Review the [How to use standard images](https://maas.io/docs/how-to-use-standard-images) for guidance on downloading OS images for MAAS. - - -# Deploy a MAAS Cluster - -To deploy a new MAAS cluster: - -
- -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the **Main Menu** and click **Clusters**. Then click the **Add New Cluster** button. - - -3. Click **Deploy New Cluster** on the Create a New Cluster page. - - -4. Select **MAAS** and click the **Start MAAS Configuration** button. - - -5. Provide basic cluster information: **Cluster name**, **Description**, and **Tags**. - - -6. Select your MAAS cloud account from the **drop-down Menu** and click **Next**. - - -7. Select the cluster profile for your MAAS cluster. - - -8. Review and override pack parameters as desired and click **Next**. By default, parameters for all packs are set with values defined in the cluster profile. - - -9. Select a domain from the **Domain drop-down Menu** and click **Next**. - - -10. Configure the master and worker node pools. The following input fields apply to MAAS master and worker node pools. For a description of input fields that are common across target platforms refer to the [Node Pools](https://docs.spectrocloud.com/clusters/cluster-management/node-pool) management page. Click **Next** when you are done. - -
- - #### Master Pool configuration - - - Cloud configuration: - - - Resource Pool: The MAAS resource pool from which to select available servers for deployment. Filter available servers to only those that have at least the amount of CPU and Memory selected. - -
- - #### Worker Pool configuration - - - Cloud configuration: - - - Resource Pool: The MAAS resource pool from which to select available servers for deployment. Filter available servers to only those that have at least the amount of CPU and Memory selected. - - -11. You can configure the following cluster management features now if needed, or you can do it later: - - - Manage machines - - Schedule scans - - Schedule backups - - Role-based access control (RBAC) - - Location - - -12. Review settings and deploy the cluster. - - -## Validate - -You can validate your cluster is available by reviewing the cluster details page. Navigate to the left **Main Menu** and click **Clusters**. The **Clusters** page lists all available clusters that Palette manages. Select the cluster to review its details page. Ensure the **Cluster Status** field contains the value **Running**. - -
- -# Delete a MAAS Cluster - -When you delete a MAAS cluster, all machines and associated storage disks that were created for the cluster are removed. - -Follow these steps to delete a MAAS cluster. - -
- -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the **Main Menu** and click **Clusters**. - - -3. Select the cluster you want to delete. - - -4. Click the **Settings drop-down Menu**, and choose **Delete**. - -The cluster status is updated to **Deleting** while cluster resources are being deleted. When all resources are successfully deleted, the cluster status is updated to **Deleted** and the cluster is removed from the list. The delete operation returns the edge hosts to the **Ready** state. All the artifacts related to the Kubernetes distribution are removed. - -
- - -# Upgrade a MAAS Cluster - -Upgrade a MAAS cluster to enhance the performance and functionality of the cluster. To learn more about managing a MAAS cluster, refer to [Manage Clusters](https://docs.spectrocloud.com/clusters/cluster-management/cluster-updates). - -To protect your data, we recommend you create a backup of your MAAS cluster before proceeding with any upgrades or infrastructure changes. Review instructions provided in the [Backup and Restore](https://docs.spectrocloud.com/clusters/cluster-management/backup-restore). - -
- - - -Ensure that the Operating System (OS) image selected for your cluster are downloaded and available for your MAAS configuration to eliminate errors in Palette. You can refer to the [How to use standard images](https://maas.io/docs/how-to-customise-images) guide for instructions on downloading OS images compatible with their respective MAAS environment. - - - - - - - -# Next Steps - -Now that you’ve deployed a MAAS cluster, you can start developing and deploying applications to your cluster. We recommend you review the Day-2 operations and become familiar with the cluster management tasks. Check out the [Manage Clusters](/clusters/cluster-management) documentation to learn more about Day-2 responsibilities. - - - -
- -
diff --git a/content/docs/04-clusters/02-data-center/02-openstack.md b/content/docs/04-clusters/02-data-center/02-openstack.md deleted file mode 100644 index 952440658e..0000000000 --- a/content/docs/04-clusters/02-data-center/02-openstack.md +++ /dev/null @@ -1,879 +0,0 @@ ---- -title: "OpenStack" -metaTitle: "Creating new clusters on Spectro Cloud" -metaDescription: "The methods of creating clusters for a speedy deployment on any CSP" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - -# Overview - -Following are some highlights of OpenStack clusters provisioned by Palette: - -1. Palette enables the ability to use OpenStack as an application platform for engineering team. - - - -2. To facilitate communication between Palette and the OpenStack controllers installed in the private data center, a Private Cloud Gateway (PCG) must be set up within the environment. - - -3. Private Cloud Gateway (PCG) is Palette's self-hosted component to support isolated private cloud or data center environments. Once installed, the PCG registers itself with Palette's SaaS portal and enables secure communication between the SaaS portal and the private cloud environment. The PCG enables installation and end-to-end lifecycle management of Kubernetes clusters in private cloud environments from Palette's SaaS portal. - -
- -![openstack_cluster_architecture.png](/openstack_cluster_architecture.png) - -# Prerequisites - -The following prerequisites must be met before deploying a Kubernetes clusters in OpenStack: - -1. OpenStack Victoria (recommended). - - -2. NTP configured on all Hosts. - - -3. Shared Storage between OpenStack hosts. - - -4. You must have an active OpenStack account with access to all the projects that you would like to provision clusters into. The account should have all the permissions listed below in the "OpenStack Cloud Account Permissions" section. - - -5. You should have an Infrastructure cluster profile created in Palette for OpenStack. - - -6. Install a Private Cloud Gateway for OpenStack as described in the **Installing Private Cloud Gateway - OpenStack** section below. Installing the Private Cloud Gateway will automatically register a cloud account for OpenStack in Palette. You can register your additional OpenStack cloud accounts in Palette as described in the **Creating a OpenStack Cloud account** section below. - - -7. Egress access to the internet (direct or via proxy): - * For proxy: HTTP_PROXY, HTTPS_PROXY (both required) - * Outgoing internet connection on port 443 to api.spectrocloud.com - - -8. DNS to resolve public internet names (e.g.: api.spectrocloud.com). - - -9. Sufficient IPs for application workload services (e.g.: Load Balancer services). - - -10. Per workload cluster IP requirements: - * One (1) per cluster node - * One (1) Kubernetes control-plane VIP - - -# OpenStack Cloud Account Permissions - - - - - - - -### Cinder Service - -**Last Update**: June 28, 2021 - -``` json -"volume:attachment_update": "rule:admin_or_owner" -"volume:attachment_delete": "rule:admin_or_owner" -"volume:attachment_complete": "rule:admin_or_owner" -"volume:multiattach_bootable_volume": "rule:admin_or_owner" -"message:get_all": "rule:admin_or_owner" -"message:get": "rule:admin_or_owner" -"message:delete": "rule:admin_or_owner" -"volume:get_snapshot_metadata": "rule:admin_or_owner" -"volume:update_snapshot_metadata": "rule:admin_or_owner" -"volume:delete_snapshot_metadata": "rule:admin_or_owner" -"volume:get_all_snapshots": "rule:admin_or_owner" -"volume_extension:extended_snapshot_attributes": "rule:admin_or_owner" -"volume:create_snapshot": "rule:admin_or_owner" -"volume:get_snapshot": "rule:admin_or_owner" -"volume:update_snapshot": "rule:admin_or_owner" -"volume:delete_snapshot": "rule:admin_or_owner" -"backup:get_all": "rule:admin_or_owner" -"backup:get": "rule:admin_or_owner" -"backup:update": "rule:admin_or_owner" -"backup:delete": "rule:admin_or_owner" -"backup:restore": "rule:admin_or_owner" -"group:get_all": "rule:admin_or_owner" -"group:get": "rule:admin_or_owner" -"group:update": "rule:admin_or_owner" -"group:get_all_group_snapshots": "rule:admin_or_owner" -"group:get_group_snapshot": "rule:admin_or_owner" -"group:delete_group_snapshot": "rule:admin_or_owner" -"group:update_group_snapshot": "rule:admin_or_owner" -"group:reset_group_snapshot_status": "rule:admin_or_owner" -"group:delete": "rule:admin_or_owner" -"group:enable_replication": "rule:admin_or_owner" -"group:disable_replication": "rule:admin_or_owner" -"group:failover_replication": "rule:admin_or_owner" -"group:list_replication_targets": "rule:admin_or_owner" -"volume_extension:quotas:show": "rule:admin_or_owner" -"limits_extension:used_limits": "rule:admin_or_owner" -"volume_extension:volume_type_access": "rule:admin_or_owner" -"volume:extend": "rule:admin_or_owner" -"volume:extend_attached_volume": "rule:admin_or_owner" -"volume:revert_to_snapshot": "rule:admin_or_owner" -"volume:retype": "rule:admin_or_owner" -"volume:update_readonly_flag": "rule:admin_or_owner" -"volume_extension:volume_actions:upload_image": "rule:admin_or_owner" -"volume_extension:volume_actions:initialize_connection": "rule:admin_or_owner" -"volume_extension:volume_actions:terminate_connection": "rule:admin_or_owner" -"volume_extension:volume_actions:roll_detaching": "rule:admin_or_owner" -"volume_extension:volume_actions:reserve": "rule:admin_or_owner" -"volume_extension:volume_actions:unreserve": "rule:admin_or_owner" -"volume_extension:volume_actions:begin_detaching": "rule:admin_or_owner" -"volume_extension:volume_actions:attach": "rule:admin_or_owner" -"volume_extension:volume_actions:detach": "rule:admin_or_owner" -"volume:get_all_transfers": "rule:admin_or_owner" -"volume:create_transfer": "rule:admin_or_owner" -"volume:get_transfer": "rule:admin_or_owner" -"volume:delete_transfer": "rule:admin_or_owner" -"volume:get_volume_metadata": "rule:admin_or_owner" -"volume:create_volume_metadata": "rule:admin_or_owner" -"volume:update_volume_metadata": "rule:admin_or_owner" -"volume:delete_volume_metadata": "rule:admin_or_owner" -"volume_extension:volume_image_metadata": "rule:admin_or_owner" -"volume:get": "rule:admin_or_owner" -"volume:get_all": "rule:admin_or_owner" -"volume:update": "rule:admin_or_owner" -"volume:delete": "rule:admin_or_owner" -"volume_extension:volume_tenant_attribute": "rule:admin_or_owner" -"volume_extension:volume_encryption_metadata": "rule:admin_or_owner" -"volume:multiattach": "rule:admin_or_owner" - -``` - - - - - - - -### Neutron Service - -**Last Update**: June 28, 2021 - -``` json - "create_subnet": "rule:admin_or_network_owner", - "get_subnet": "rule:admin_or_owner or rule:shared", - "update_subnet": "rule:admin_or_network_owner", - "delete_subnet": "rule:admin_or_network_owner", - "get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools", - "update_subnetpool": "rule:admin_or_owner", - "delete_subnetpool": "rule:admin_or_owner", - "get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes", - "update_address_scope": "rule:admin_or_owner", - "delete_address_scope": "rule:admin_or_owner", - "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", - "update_network": "rule:admin_or_owner", - "delete_network": "rule:admin_or_owner", - "network_device": "field:port:device_owner=~^network:", - "create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner", - "create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner", - "create_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared", - "create_port:fixed_ips:ip_address": "rule:context_is_advsvc or rule:admin_or_network_owner", - "create_port:fixed_ips:subnet_id": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared", - "create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", - "create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", - "create_port:allowed_address_pairs": "rule:admin_or_network_owner", - "create_port:allowed_address_pairs:mac_address": "rule:admin_or_network_owner", - "create_port:allowed_address_pairs:ip_address": "rule:admin_or_network_owner", - "get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner", - "update_port": "rule:admin_or_owner or rule:context_is_advsvc", - "update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner", - "update_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared", - "update_port:fixed_ips:ip_address": "rule:context_is_advsvc or rule:admin_or_network_owner", - "update_port:fixed_ips:subnet_id": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared", - "update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", - "update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", - "update_port:allowed_address_pairs": "rule:admin_or_network_owner", - "update_port:allowed_address_pairs:mac_address": "rule:admin_or_network_owner", - "update_port:allowed_address_pairs:ip_address": "rule:admin_or_network_owner", - "delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner", - "create_router:external_gateway_info": "rule:admin_or_owner", - "create_router:external_gateway_info:network_id": "rule:admin_or_owner", - "get_router": "rule:admin_or_owner", - "update_router": "rule:admin_or_owner", - "update_router:external_gateway_info": "rule:admin_or_owner", - "update_router:external_gateway_info:network_id": "rule:admin_or_owner", - "delete_router": "rule:admin_or_owner", - "add_router_interface": "rule:admin_or_owner", - "remove_router_interface": "rule:admin_or_owner", - "update_floatingip": "rule:admin_or_owner", - "delete_floatingip": "rule:admin_or_owner", - "get_floatingip": "rule:admin_or_owner", - "update_rbac_policy": "rule:admin_or_owner", - "update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner", - "get_rbac_policy": "rule:admin_or_owner", - "delete_rbac_policy": "rule:admin_or_owner", - "get_auto_allocated_topology": "rule:admin_or_owner", - "get_trunk": "rule:admin_or_owner", - "delete_trunk": "rule:admin_or_owner", - "add_subports": "rule:admin_or_owner", - "remove_subports": "rule:admin_or_owner", - "get_security_groups": "rule:admin_or_owner", - "get_security_group": "rule:admin_or_owner", - "create_security_group": "rule:admin_or_owner", - "update_security_group": "rule:admin_or_owner", - "delete_security_group": "rule:admin_or_owner", - "get_security_group_rules": "rule:admin_or_owner", - "get_security_group_rule": "rule:admin_owner_or_sg_owner", - "create_security_group_rule": "rule:admin_or_owner", - "delete_security_group_rule": "rule:admin_or_owner", - -``` - - - - - - -### Glance Service - -**Last Update**: June 28, 2021 - -``` json - "add_image": "role:admin or role:member", - "delete_image": "role:admin or role:member", - "get_image": "role:admin or role:member", - "get_images": "role:admin or role:member", - "publicize_image": "role:admin or role:member", - "download_image": "role:admin or role:member", - "upload_image": "role:admin or role:member", - "get_image_location": "role:admin or role:member", - "set_image_location": "role:admin or role:member", -``` - - - - - - - -### Nova Compute Service - -**Last Update**: June 28, 2021 - -``` json - "os_compute_api:os-admin-password": "rule:admin_or_owner", - "os_compute_api:os-attach-interfaces": "rule:admin_or_owner", - "os_compute_api:os-attach-interfaces:create": "rule:admin_or_owner", - "os_compute_api:os-attach-interfaces:delete": "rule:admin_or_owner", - "os_compute_api:os-availability-zone:list": "rule:admin_or_owner", - "os_compute_api:os-config-drive": "rule:admin_or_owner", - "os_compute_api:os-console-output": "rule:admin_or_owner", - "os_compute_api:os-consoles:create": "rule:admin_or_owner", - "os_compute_api:os-consoles:show": "rule:admin_or_owner", - "os_compute_api:os-consoles:delete": "rule:admin_or_owner", - "os_compute_api:os-consoles:index": "rule:admin_or_owner", - "os_compute_api:os-create-backup": "rule:admin_or_owner", - "os_compute_api:os-deferred-delete": "rule:admin_or_owner", - "os_compute_api:os-extended-availability-zone": "rule:admin_or_owner", - "os_compute_api:os-extended-status": "rule:admin_or_owner", - "os_compute_api:os-extended-volumes": "rule:admin_or_owner", - "os_compute_api:extensions": "rule:admin_or_owner", - "os_compute_api:os-flavor-access": "rule:admin_or_owner", - "os_compute_api:os-flavor-extra-specs:show": "rule:admin_or_owner", - "os_compute_api:os-flavor-extra-specs:index": "rule:admin_or_owner", - "os_compute_api:os-flavor-rxtx": "rule:admin_or_owner", - "os_compute_api:flavors": "rule:admin_or_owner", - "os_compute_api:os-floating-ip-dns": "rule:admin_or_owner", - "os_compute_api:os-floating-ip-pools": "rule:admin_or_owner", - "os_compute_api:os-floating-ips": "rule:admin_or_owner", - "os_compute_api:os-fping": "rule:admin_or_owner", - "os_compute_api:image-size": "rule:admin_or_owner", - "os_compute_api:os-instance-actions": "rule:admin_or_owner", - "os_compute_api:ips:show": "rule:admin_or_owner", - "os_compute_api:ips:index": "rule:admin_or_owner", - "os_compute_api:os-keypairs": "rule:admin_or_owner", - "os_compute_api:limits": "rule:admin_or_owner", - "os_compute_api:os-lock-server:lock": "rule:admin_or_owner", - "os_compute_api:os-lock-server:unlock": "rule:admin_or_owner", - "os_compute_api:os-multinic": "rule:admin_or_owner", - "os_compute_api:os-networks:view": "rule:admin_or_owner", - "os_compute_api:os-pause-server:pause": "rule:admin_or_owner", - "os_compute_api:os-pause-server:unpause": "rule:admin_or_owner", - "os_compute_api:os-quota-sets:show": "rule:admin_or_owner", - "os_compute_api:os-quota-sets:detail": "rule:admin_or_owner", - "os_compute_api:os-remote-consoles": "rule:admin_or_owner", - "os_compute_api:os-rescue": "rule:admin_or_owner", - "os_compute_api:os-security-groups": "rule:admin_or_owner", - "os_compute_api:os-server-groups": "rule:admin_or_owner", - "os_compute_api:server-metadata:index": "rule:admin_or_owner", - "os_compute_api:server-metadata:show": "rule:admin_or_owner", - "os_compute_api:server-metadata:create": "rule:admin_or_owner", - "os_compute_api:server-metadata:update_all": "rule:admin_or_owner", - "os_compute_api:server-metadata:update": "rule:admin_or_owner", - "os_compute_api:server-metadata:delete": "rule:admin_or_owner", - "os_compute_api:os-server-password": "rule:admin_or_owner", - "os_compute_api:os-server-tags:delete_all": "rule:admin_or_owner", - "os_compute_api:os-server-tags:index": "rule:admin_or_owner", - "os_compute_api:os-server-tags:update_all": "rule:admin_or_owner", - "os_compute_api:os-server-tags:delete": "rule:admin_or_owner", - "os_compute_api:os-server-tags:update": "rule:admin_or_owner", - "os_compute_api:os-server-tags:show": "rule:admin_or_owner", - "os_compute_api:os-server-usage": "rule:admin_or_owner", - "os_compute_api:servers:index": "rule:admin_or_owner", - "os_compute_api:servers:detail": "rule:admin_or_owner", - "os_compute_api:servers:show": "rule:admin_or_owner", - "os_compute_api:servers:create": "rule:admin_or_owner", - "os_compute_api:servers:create:attach_volume": "rule:admin_or_owner", - "os_compute_api:servers:create:attach_network": "rule:admin_or_owner", - "os_compute_api:servers:delete": "rule:admin_or_owner", - "os_compute_api:servers:update": "rule:admin_or_owner", - "os_compute_api:servers:confirm_resize": "rule:admin_or_owner", - "os_compute_api:servers:revert_resize": "rule:admin_or_owner", - "os_compute_api:servers:reboot": "rule:admin_or_owner", - "os_compute_api:servers:resize": "rule:admin_or_owner", - "os_compute_api:servers:rebuild": "rule:admin_or_owner", - "os_compute_api:servers:create_image": "rule:admin_or_owner", - "os_compute_api:servers:create_image:allow_volume_backed": "rule:admin_or_owner", - "os_compute_api:servers:start": "rule:admin_or_owner", - "os_compute_api:servers:stop": "rule:admin_or_owner", - "os_compute_api:servers:trigger_crash_dump": "rule:admin_or_owner", - "os_compute_api:os-shelve:shelve": "rule:admin_or_owner", - "os_compute_api:os-shelve:unshelve": "rule:admin_or_owner", - "os_compute_api:os-simple-tenant-usage:show": "rule:admin_or_owner", - "os_compute_api:os-suspend-server:resume": "rule:admin_or_owner", - "os_compute_api:os-suspend-server:suspend": "rule:admin_or_owner", - "os_compute_api:os-tenant-networks": "rule:admin_or_owner", - "os_compute_api:os-virtual-interfaces": "rule:admin_or_owner", - "os_compute_api:os-volumes": "rule:admin_or_owner", - "os_compute_api:os-volumes-attachments:index": "rule:admin_or_owner", - "os_compute_api:os-volumes-attachments:create": "rule:admin_or_owner", - "os_compute_api:os-volumes-attachments:show": "rule:admin_or_owner", - "os_compute_api:os-volumes-attachments:delete": "rule:admin_or_owner" - - -``` - - - - - - - - -# Installing Private Cloud Gateway - OpenStack - - -Use the following steps to install a PCG cluster in your OpenStack environment. You can use the [Palette CLI](/palette-cli) or the PCG Installer Image to deploy a PCG cluster. Review the prerequisites for each option to help you identify the correct installation method. - - -
- - - - - - -## Prerequisites - - -The following system prerequisites are required to install an OpenStack PCG. - -- Palette version 4.0.X or greater. - - -- A Palette API key. Refer to the [Create API Key](/user-management/user-authentication#apikey) page for guidance. - - -- Download the Palette CLI from the [Downloads](/spectro-downloads#palettecli) page and install the CLI. Refer to the [Palette CLI Install](/palette-cli/install-palette-cli) guide to learn more. - - -The following system requirements should be met in order to install a private cloud gateway for OpenStack: - -* Private cloud gateway IP requirements: - * 1 IP for a 1 node PCG or 3 IPs for a 3 node PCG - * 1 IP for Kubernetes control-plane - -Palette provides an installer in the form of a docker container. This installer can be run on any system that has docker daemon installed and has connectivity to the Palette Management console as well as OpenStack controller. - - - One additional Kubernetes control plane IP address for rolling upgrades. - - -- A Linux x86-64 host with the Docker daemon installed. - - - - -## Install PCG - - -1. In an x86 Linux host, open up a terminal session. - - - -2. Use the Palette CLI `login` command to authenticate the CLI with Palette. When prompted, enter the information listed in the following table. - -
- - ```shell - palette login - ``` - -
- - |**Parameter** | **Description**| - |:-----------------------------|---------------| - |**Spectro Cloud Console** |Enter the Palette endpoint URL. When using the Palette SaaS service, enter ``https://console.spectrocloud.com``. When using a self-hosted instance of Palette, enter the URL for that instance. | - |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if you are using a self-hosted Palette instance with self-signed TLS certificates. Otherwise, enter `n`.| - |**Spectro Cloud API Key** |Enter your Palette API Key.| - |**Spectro Cloud Organization** |Enter your Palette Organization name.| - |**Spectro Cloud Project** |Enter your desired project name within the selected Organization.| - - -3. Once you have authenticated successfully, invoke the PCG installer by issuing the following command. When prompted, enter the information listed in each of the following tables. - -
- - ```bash - palette pcg install - ``` - -
- - |**Parameter** | **Description**| - |:-----------------------------|---------------| - |**Cloud Type**| Choose OpenStack.| - |**Private Cloud Gateway Name** | Enter a custom name for the PCG. Example: `openstack-pcg-1`.| - |**Share PCG Cloud Account across platform Projects** |Enter `y` if you want the Cloud Account associated with the PCG to be available from all projects within your organization. Enter `n` if you want the Cloud Account to only be available at the tenant admin scope.| - - -4. Next, provide environment configurations for the cluster. Refer to the following table for information about each option. - -
- - |**Parameter**| **Description**| - |:-------------|----------------| - |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: `https://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| - |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| - |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: `my.company.com,10.10.0.0/16`.| - |**Proxy CA Certificate Filepath**|The default is blank. You can provide the file path of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| - |**Pod CIDR**|Enter the CIDR pool that will be used to assign IP addresses to pods in the PCG cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| - |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the PCG cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| - - - -5. After the environment options, the next set of prompts is for configuring the PCG cluster for the OpenStack environment. The following table contains information about each prompt. - -
- - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - |**OpenStack Identity Endpoint** | OpenStack Identity endpoint. Domain or IP address.
Example: `https://openstack.mycompany.com/identity`.| - |**OpenStack Account Username** | OpenStack account username.| - |**OpenStack Account Password** | OpenStack account password.| - |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if you are using an OpenStack instance with self-signed TLS certificates. Otherwise, enter `n`.| - |**CA Certificate** |This is only required when using TLS, in which case you would provide a base64-encoded CA certificate for your OpenStack instance. | - -6. Next, fill out additional OpenStack configurations. - -
- - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **Default Domain** | OpenStack Domain. Example: `Default`.| - | **Default Region** | OpenStack Region. Example: `RegionOne`.| - | **Default Project** | OpenStack Project. Example: `dev`.| - | **Placement Type** | Placement can be static or dynamic. For static placement, VMs are placed into existing networks. For dynamic placement, a new network is created.| - | **Network** | Select an existing network. This is only required for static placement.| - | **Subnet** | Select an existing subnet. This is only required for static placement.| - | **DNS Server(s)** | Enter a comma-separated list of DNS server IPs . This is only required for dynamic placement.| - | **Node CIDR** | Enter a node CIDR. This is only required for dynamic placement. Example: `10.55.0.0/24`.| - | **SSH Public Key** | Provide the public OpenSSH key for the PCG cluster. Use this key when establishing an SSH connection with the PCG cluster. This prompt will result in the default text editor for the Operating System to open. Vi is the more common text editor used in Linux environments.| - | **Patch OS on boot** | This parameter indicates whether or not to patch the OS of the PCG hosts on the first boot.| - | **Reboot nodes once OS patch is applied** | This parameter indicates whether or not to reboot PCG nodes after OS patches are complete. This only applies if the **Patch OS on boot** parameter is enabled.| - - -7. Configure the OpenStack PCG Machine by answering the following prompts. - -
- - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **Availability Zone** | Select the availability zone. | - | **PCG Cluster Size** | Select the node size of the PCG cluster. You can choose between **1** node or **3** nodes for High Availability (HA). | - - -8. A new PCG configuration file is generated and its location is displayed on the console. You will receive an output similar to the following. - -
- - ```bash hideClipboard - ==== PCG config saved ==== - Location: :/home/spectro/.palette/pcg/pcg-20230706150945/pcg.yaml - ``` - - - - The `CloudAccount.apiKey` and `Mgmt.apiKey` values in the **pcg.yaml** are encrypted and cannot be manually updated. To change these values, restart the installation process using the `palette pcg install` command. - - - -
- -The Palette CLI will now provision a PCG cluster in your OpenStack environment. -If the deployment fails due to misconfiguration, update the PCG configuration file and rerun the installer. Refer to the [Edit and Redeploy PCG](/clusters/data-center/openstack#editandredeploypcg) section below. For additional assistance, visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. - - - -## Validate - -Once installed, the PCG registers itself with Palette. To verify the PCG is registered, use the following steps. - - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and select **Tenant Settings** - - -3. From the **Tenant Settings Menu** click on **Private Cloud Gateways**. Verify your PCG cluster is available from the list of PCG clusters displayed. - - -4. When you install the PCG, a cloud account is auto-created. To verify the cloud account is created, go to **Tenant Settings > Cloud Accounts** and locate **OpenStack** in the table. Verify your OpenStack account is listed. - - - -## Edit and Redeploy PCG - -To change the PCG install values, restart the installation process using the `palette pcg install` command. Use the following steps to redeploy the PCG or restart the install process. - -
- -1. Make the necessary changes to the PCG configuration file the CLI created during the installation, if needed. Use a text editor, such as Vi or Nano to update the PCG install configuration file. - -
- - ```shell hideClipboard - ==== Create PCG reference config ==== - ==== PCG config saved ==== - Location: /Users/demo/.palette/pcg/pcg-20230717114807/pcg.yaml - ``` - - ```bash hideClipboard - vi /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml - ``` - - - -2. To redeploy the PCG, use the `install` command with the flag `--config-file`. Provide the file path to the generated PCG config file that was generated and displayed in the output. - -
- - ```bash hideClipboard - palette pcg install --config-file /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml - ``` - -
- - - - - - - `video: title: "openstack-pcg-creation": /pcg-creation-video/openstack.mp4` - - - -## Prerequisites - -The following system requirements are required to deploy a PCG cluster. - -- Palette version 3.4.X or older. - -- A Linux environment with a Docker daemon installed and a connection to Palette and the OpenStack environment. The installer must be invoked on an up-to-date Linux system with an x86-64 architecture. ARM architecture is currently not supported. - - -- Private Cloud Gateway IP requirements: - * One IP address for a single-node PCG or three IP addresses for a three-node PCG cluster. - * One IP address for the Kubernetes control plane. - - -## Generate pairing code - -Navigate to the Private Cloud Gateway page under Administration and Create a new OpenStack gateway. Copy the pairing code displayed on the page. This will be used in subsequent steps. - -## Generate gateway config - -Invoke the gateway installer in interactive mode to generate the gateway configuration file. Follow the prompts to provide the Palette Management, OpenStack cloud account, Environment and Placement information as requested. - -```bash -docker run -it --rm \ - --net=host \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /tmp:/opt/spectrocloud \ - gcr.io/spectro-images-public/release/spectro-installer:1.0.12 \ - -o true -``` - -#### Enter Palette Management Information: - -|**Parameter**| **Description**| -|----------------------------------------|:----------------| -|**Palette Console** | Management Console endpoint e.g. https://console.spectrocloud.com| -|**Palette Username** | Login email address
e.g. user1@company.com| -|**Palette Password** | Login password| -|**Private Cloud Gateway pairing code**| The unique authentication code
generated in the previous step.| - -#### Enter Environment Configuration: - -| **Parameter** | **Description** | - |------------------------------------|----------------| - |**HTTPS Proxy(--https_proxy)**|The endpoint for the HTTPS proxy server. This setting will be
propagated to all the nodes launched in the proxy network.
e.g., http://USERNAME:PASSWORD@PROXYIP:PROXYPORT| - |**HTTP Proxy(--http_proxy)**|The endpoint for the HTTP proxy server. This setting will be
propagated to all the nodes launched in the proxy network.
e.g., http://USERNAME:PASSWORD@PROXYIP:PROXYPORT| - |**No Proxy(--no_proxy)** |A comma-separated list of local network CIDRs, hostnames,
domain names that should be excluded from proxying.
This setting will be propagated to all the nodes to bypass the proxy server.
e.g., maas.company.com,10.10.0.0/16| - |**Pod CIDR (--pod_cidr)**|The CIDR pool is used to assign IP addresses to pods in the cluster.
This setting will be used to assign IP
addresses to pods in Kubernetes clusters.
The pod IP addresses should be unique and
should notoverlap with any
Virtual Machine IPs in the environment.| - |**Service IP Range (--svc_ip_range)**|The IP address that will be assigned to
services created on Kubernetes. This setting will be used
to assign IP addresses to services in Kubernetes clusters.
The service IP addresses should be unique and not
overlap with any virtual machine IPs in the environment.| - -#### Enter OpenStack Account Information: - -|**Parameter** | **Description**| -|-----------------------------------------|----------------| -|**OpenStack Identity Endpoint** | OpenStack Identity endpoint. Domain or IP address.
e.g. https://openstack.mycompany.com/identity| -|**OpenStack Account Username** | OpenStack account username| -|**OpenStack Account Password** | OpenStack account password| -|**Default Domain** | Default OpenStack domain. e.g. Default| -|**Default Region** | Default OpenStack region. e.g. RegionOne| -|**Default Project** | Default OpenStack project. e.g. dev| - - -#### Enter OpenStack cluster configuration for the Private Cloud Gateway: - -1. Verify the following parameters: - * Default Domain - * Default Region - * Default Project - - -2. Enter the values for: - -|**Parameter** | **Description**| -|-----------------------------------------|----------------| - | **SSH Key** | Select a key.| - | **Placement option as Static or Dynamic** | For static placement, VMs are placed into existing
networks whereas, for dynamic placement, new network is created.| - | **Network** | Select an existing network. | - | **Sub Network** | Select a sub network.| - -#### Enter OpenStack Machine configuration for the Private Cloud Gateway: - -* Select the availability zone -* Choose flavor -* Number of nodes: Choose between **1** and **3** - -After this step, a new gateway configuration file is generated and its location is displayed on the console. -e.g.: Config created:/opt/spectrocloud//install-pcg-ar-dev-os-gw-02-aug-01-20210802062349/pcg.yaml - - -## Copy configuration file to known location: - -Copy the pcg.yaml file to a known location for easy access and updates. - - -```bash -cp /tmp/install-pcg-xxx/pcg.yaml /tmp -``` - - -## Deploy Private Cloud Gateway - -Invoke the gateway installer in *silent mode*, providing the gateway config file as input to deploy the gateway. New VM(s) will be launched in your OpenStack environment and a gateway will be installed on those VM(s). If deployment fails due to misconfiguration, update the gateway configuration file and rerun the command. - -```bash -docker run -it --rm \ - --net=host \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /tmp:/opt/spectrocloud \ - gcr.io/spectro-images-public/release/spectro-installer:1.0.12 \ - -s true \ - -c //opt/spectrocloud/pcg.yaml -``` - -
- -
- - - - - -## Upgrade PCG -Palette maintains the OS image and all configurations for the PCG. Periodically, the OS images, configurations, or other components need to be upgraded to resolve security or functionality issues. Palette releases such upgrades when required and in an upgrade notification on the PCG. - -Administrators should review the changes and apply them at a suitable time. Upgrading a PCG does not result in any downtime for the tenant clusters. During the upgrade process, the provisioning of new clusters might be temporarily unavailable. New cluster requests are queued while the PCG is being upgraded and are processed as soon as the PCG upgrade is complete. - -## Delete the PCG -The following steps need to be performed to delete a PCG: - -1. As a tenant admin, navigate to the Private Cloud Gateway page under settings. - - -2. Invoke the **Delete** action on the cloud gateway instance that needs to be deleted. - - -3. The system performs a validation to ensure that there are no running tenant clusters associated with the gateway instance being deleted. If such instances are found, the system presents an error. Delete relevant running tenant clusters and retry the deletion of the cloud gateway. - - -4. Delete the gateway. - - -The delete gateway operation deletes the gateway instance registered in the management console, however the gateway infrastructure such as Load Balancers, VMs, Networks (if dynamic provision was chosen), etc. need to be deleted on the OpenStack console - - -## Resize the PCG -You can set up the PCG as a single-node or three-node cluster for high availability (HA). For production environments, we recommend three nodes. A PCG can be initially set up with one node and resized to three nodes later. Use the following steps to resize a single-node PCG cluster to a three-node PCG cluster. - -1. As a tenant administrator, navigate to the Private Cloud Gateway page under settings. - - -2. Invoke the resize action for the relevant cloud gateway instance. - - -3. Update the size from 1 to 3. - - -4. The gateway upgrade begins shortly after the update. Two new nodes are created, and the gateway is upgraded to a 3-node cluster. - - -# Creating an OpenStack Cloud Account - -A default cloud account is automatically created when the private cloud gateway is configured. This cloud account can be used to create tenant clusters. Additional cloud accounts may be created if desired within the same gateway. - -1. To create an OpenStack cloud account, proceed to project settings and select 'create cloud account' under OpenStack. - - -2. Fill the following values to the cloud account creation wizard. - - |**Property**|**Description** | - |:---------------|:-----------------------| - | **Account Name** | Custom name for the cloud account | - | **Private cloud gateway**| Reference to a running cloud gateway | - | **Username** | OpenStack Username | - | **Password**| OpenStack Password | - | **Identity Endpoint** | Identity Endpoint of the gateway | - | **CA Certificate** | Digital certificate of authority | - | **Parent Region** | OpenStack Region to be used | - | **Default Domain** | Default OpenStack domain | - | **Default Project** | Default OpenStack project | - - -# Deploying an OpenStack Cluster - -`video: title: "openstack-cluster-creation": ./cluster-creation-videos/openstack.mp4` - -The following steps need to be performed to provision a new OpenStack cluster: - -1. Provide basic cluster information like Name, Description, and Tags. Tags are currently not propagated to the VMs deployed on the cloud/data center environments. - - -2. Select a Cluster Profile created for the OpenStack environment. The profile definition will be used as the cluster construction template. - - -3. Review and override Pack Parameters as desired. By default, Parameters for all packs are set with values defined in the Cluster Profile. - - -4. Provide an OpenStack Cloud account and placement information. - - * **Cloud Account** - Select the desired cloud account. OpenStack cloud accounts with credentials need to be preconfigured in project settings. An account is auto-created as part of the cloud gateway setup and is available for provisioning of tenant clusters if permitted by the administrator. - * Domain - * Region - * Project - * SSH Key - * Placement - * If the user choice of placement is Static then: - * Network - * Subnet - * If the user choice of placement is NOT Static then: - * Subnet CIDR - * DNS Name Server - -5. Configure the master and worker node pools. Fill out the input fields in the **Add node pool** page. The following table contains an explanation of the available input parameters. - -### Master Pool - -|**Parameter** | **Description**| -|------------------|---------------| -|**Name** |A descriptive name for the node pool.| -|**Size** |Number of VMs to be provisioned for the node pool. For the master pool, this number can be 1, 3, or 5.| -|**Allow worker capability**|Select this option for allowing workloads to be provisioned on master nodes.| -|**[Labels](/clusters/cluster-management/taints#overviewonlabels)**| Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. -|**[Taints](/clusters/cluster-management/taints#overviewontaints)**|To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints.| -|**Instance type** |Select the compute instance type to be used for all nodes in the node pool.| -|**Availability Zones**| Choose one or more availability zones. Palette provides fault tolerance to guard against hardware failures, network failures, etc., by provisioning nodes across availability zones if multiple zones are selected.| -|**Disk Size**|Give the required storage size| - -### Worker Pool - -|**Parameter** | **Description**| -|------------------|---------------| -|**Name** |A descriptive name for the node pool.| -|**Enable Autoscaler**|You can enable the autoscaler, by toggling the **Enable Autoscaler** button. Autoscaler scales up and down resources between the defined minimum and the maximum number of nodes to optimize resource utilization.| -||Set the scaling limit by setting the **Minimum Size** and **Maximum Size**, as per the workload the number of nods will scale up from minimum set value to maximum set value and the scale down from maximum set value to minimum set value| -|**Size** |Number of VMs to be provisioned for the node pool.| -|**Rolling Update**| Rolling update has two available options. Review the [Update Parameter](#update-parameter-table) table below for more details. -|**[Labels](/clusters/cluster-management/taints#overviewonlabels)**|Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. -|**[Taints](/clusters/cluster-management/taints#overviewontaints)**|To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints.| -|**Instance type** |Select the compute instance type to be used for all nodes in the node pool.| -|**Availability Zones**| Choose one or more availability zones. Palette provides fault tolerance to guard against hardware failures, network failures, etc., by provisioning nodes across availability zones if multiple zones are selected.| -|**Disk Size**|Provide the required storage size - - - -6. Configure the cluster policies/features. - * Manage Machines - * Scan Policies - * Backup Policies - - -7. Click to get details on [cluster management feature](/clusters/cluster-management/#cluster-updates). - - -8. Review settings and deploy the cluster. Provisioning status with details of ongoing provisioning tasks is available to track progress. - -## Deleting an OpenStack Cluster - -The deletion of an OpenStack cluster results in the removal of all Virtual machines and associated storage disks created for the cluster. The following tasks need to be performed to delete an OpenStack cluster: - -1. Select the cluster to be deleted from the **Cluster** **View** page and navigate to the **Cluster Overview** page. - - -2. Invoke a delete action available on the page: **Cluster** > **Settings** > **Cluster** **Settings** > **Delete** **Cluster**. - - -3. Click **Confirm** to delete. - - -The Cluster Status is updated to **Deleting** while cluster resources are being deleted. Provisioning status is updated with the ongoing progress of the delete operation. Once all resources are successfully deleted, the cluster status changes to **Deleted** and is removed from the list of clusters. - - - -Delete action is only available for clusters that are fully provisioned. For clusters that are still in the process of being provisioned, the 'Abort' action is available to stop provisioning and delete all resources. - - - -# Force Delete a Cluster - -A cluster stuck in the **Deletion** state can be force deleted by the user through the User Interface. The user can go for a force deletion of the cluster, only if it is stuck in a deletion state for a minimum of **15 minutes**. Palette enables cluster force delete from the Tenant Admin and Project Admin scope. - -## To force delete a cluster: - -1. Log in to the Palette Management Console. - - -2. Navigate to the **Cluster Details** page of the cluster stuck in deletion. - - - If the deletion is stuck for more than 15 minutes, click the **Force Delete Cluster** button from the **Settings** dropdown. - - - If the **Force Delete Cluster** button is not enabled, wait for 15 minutes. The **Settings** dropdown will give the estimated time for the auto-enabling of the **Force Delete** button. - - -If there are any cloud resources still on the cloud, the user should cleanup those resources before going for the force deletion. - diff --git a/content/docs/04-clusters/02-data-center/03-vmware.md b/content/docs/04-clusters/02-data-center/03-vmware.md deleted file mode 100644 index fe15df0213..0000000000 --- a/content/docs/04-clusters/02-data-center/03-vmware.md +++ /dev/null @@ -1,1186 +0,0 @@ ---- -title: "VMware" -metaTitle: "Create VMware clusters in Palette" -metaDescription: "Learn how to configure VMware to create VMware clusters in Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - - -# Overview - -The following are some architectural highlights of Kubernetes clusters provisioned by Palette on VMware: - -
- -- Kubernetes nodes can be distributed across multiple-compute clusters, which serve as distinct fault domains. - - -- Support for static IP as well as DHCP. If your are using DHCP, Dynamic DNS is required. - - -- IP pool management for assigning blocks of IPs dedicated to clusters or projects. - - -- A Private Cloud Gateway (PCG) that you set up within the environment facilitates communications between the Palette management platform and vCenter installed in the private data center. - -
- - The PCG is Palette's on-prem component to enable support for isolated, private cloud, or data center environments. When the PCG is installed on-prem, it registers itself with Palette's SaaS portal and enables secure communications between the SaaS portal and private cloud environment. - -![vmware_arch_oct_2020.png](/vmware_arch_oct_2020.png) - -# Prerequisites - -The following prerequisites must be met before deploying a Kubernetes clusters in VMware: - -
- -- vSphere version 7.0 or above. vSphere 6.7 is supported but we do not recommend it, as it reached end of general support in 2022. - -
- - Palette supports port groups as follows. Opaque networks in vCenter Server are *not* supported. - - - Virtual machine port groups on vSphere standard switch - - Distributed port groups on vSphere distributed switch - - NSX-T distributed virtual port group - - -- A Resource Pool configured across the hosts onto which the workload clusters will be provisioned. Every host in the Resource Pool will need access to shared storage, such as vSAN, to be able to make use of high-availability (HA) control planes. - - -- Network Time Protocol (NTP) configured on each ESXi host. - - -- An active vCenter account with all the permissions listed in [VMware Privileges](/clusters/data-center/vmware#vmwareprivileges). - - -- Installed PCG for VMware. Installing the PCG will automatically register a cloud account for VMware in Palette. You can register your additional VMware cloud accounts in Palette as described in the [Create VMware Cloud Account](/clusters/data-center/vmware#createavmwarecloudaccount) section. - - -- A subnet with egress access to the internet (direct or via proxy): - - - For proxy: HTTP_PROXY, HTTPS_PROXY (both required). - - Outgoing internet connection on port 443 to api.spectrocloud.com. - - -- PCG IP requirements are: - - - One node with one IP address or three nodes for HA with three IP addresses. - - One Kubernetes control-plane (VIP). - - One Kubernetes control-plane (extra). - - -- IPs for application workload services, such as LoadBalancer service. - - -- A DNS to resolve public internet names, such as `api.spectrocloud.com`. - - -- Shared Storage between vSphere hosts. - - -- A cluster profile created in Palette for VMWare. - - -- Zone tagging for dynamic storage allocation for persistent storage. - - - - -The following naming conventions apply to vSphere Region and Zone tags: - -
- -- Valid tags consist of alphanumeric characters. - - -- Tags must start and end with an alphanumeric character. - - -- The regex used for validation is `(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?` - -Some example Tags are: `MyValue`, `my_value`, and `12345`. - -
- -## Zone Tagging - -Zone tagging is required for dynamic storage allocation across fault domains when you provision workloads that require persistent storage. This is required for Palette installation and useful for workloads deployed in tenant clusters that require persistent storage. Use unique vSphere tags on data centers (k8s-region) and compute clusters (k8s-zone) to create distinct zones in your environment. Tag values must be unique. - - For example, assume your vCenter environment includes three compute clusters (cluster-1, cluster-2, and cluster-3) that are part of data center dc-1. You can tag them as follows: - -| **vSphere Object** | **Tag Category** | **Tag Value** | -| :------------- | :---------- | :----------- | -| dc-1 | k8s-region | region1 | -| cluster-1 | k8s-zone | az1 | -| cluster-2 | k8s-zone | az2 | -| cluster-3 | k8s-zone | az3 | - - -
- -# VMware Privileges - -The vSphere user account that deploys Palette must have the minimum root-level vSphere privileges listed in the table below. The **Administrator** role provides superuser access to all vSphere objects. For users without the **Administrator** role, one or more custom roles can be created based on tasks the user will perform. -Permissions and privileges vary depending on the vSphere version you are using. - -Select the tab for your vSphere version. - -
- - - -If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” is required. - - - -
- - - - - -
- -## Root-Level Role Privileges - -Root-level role privileges listed in the table are applied only to root objects and data center objects. - - - -**vSphere Object** |**Privileges**| -|---------------|----------| -|**Cns**|Searchable| -|**Datastore**|Browse datastore -|**Host**|Configuration -||* Storage partition configuration -|**vSphere** **Tagging**|Create vSphere Tag| -||Edit vSphere Tag| -|**Network**|Assign network| -|**Sessions**|Validate session| -|**VM Storage Policies**|View VM storage policies| -|**Storage views**|View| - -
- -## Spectro Role Privileges - - -The Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, virtual machines, templates, datastore, and network objects. - -
- - - -Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. - - - - -|**vSphere Object** |**Privileges**| -|---------------|----------| -|**spectro-templates** |Read only| -|**Cns**|Searchable -|**Datastore**|Allocate space| -||Browse datastore| -||Low-level file operations| -||Remove file| -||Update virtual machine files| -||Update virtual machine metadata| -|**Folder**|Create folder| -||Delete folder| -||Move folder| -||Rename folder| -|**Host**|Local operations| -||Reconfigure virtual machine| -|**vSphere Tagging**|Assign or Unassign vSphere Tag| -||Create vSphere Tag| -||Delete vSphere Tag| -||Edit vSphere Tag| -|**Network**|Assign network| -|**Resource**|Apply recommendation| -||Assign virtual machine to resource pool| -||Migrate powered off virtual machine| -||Migrate powered on virtual machine| -||Query vMotion| -|**Sessions**|Validate session| -|**VM Storage Policies**|View VM storage policies| -|**Storage views**|Configure service| -||View| -|**Tasks**|Create task| -||Update task| -|**vApp**|Export| -||Import| -||View OVF environment| -||vApp application configuration| -||vApp instance configuration| -|**Virtual machines**|**Change Configuration**| -||* Acquire disk lease| -||* Add existing disk| -||* Add new disk| -||* Add or remove device| -||* Advanced configuration| -||* Change CPU count| -||* Change Memory| -||* Change Settings| -||* Change Swapfile placement| -||* Change resource| -||* Configure Host USB device| -||* Configure Raw device| -||* Configure managedBy| -||* Display connection settings| -||* Extend virtual disk| -||* Modify device settings| -||* Query Fault Tolerance compatibility| -||* Query unowned files| -||* Reload from path| -||* Remove disk| -||* Rename| -||* Reset guest information| -||* Set annotation| -||* Toggle disk change tracking| -||* Toggle fork parent| -||* Upgrade virtual machine compatibility| -||**Edit Inventory**| -||* Create from existing| -||* Create new| -||* Move| -||* Register| -||* Remove| -||* Unregister| -||**Guest operations**| -||* Guest operation alias modification| -||* Guest operation alias query| -||* Guest operation modifications| -||* Guest operation program execution| -||* Guest operation queries| -||**Interaction**| -||* Console interaction| -||* Power off| -||* Power on| -||**Provisioning**| -||* Allow disk access| -||* Allow file access| -||* Allow read-only disk access| -||* Allow virtual machine download| -||* Allow virtual machine files upload| -||* Clone template| -||* Clone virtual machine| -||* Create template from virtual machine| -||* Customize guest| -||* Deploy template| -||* Mark as template| -||* Mark as virtual machine| -||* Modify customization specification| -||* Promote disks| -||* Read customization specifications| -||**Service configuration**| -||* Allow notifications| -||* Allow polling of global event notifications| -||* Manage service configurations| -||* Modify service configuration| -||* Query service configurations| -||* Read service configuration| -||**Snapshot management**| -||* Create snapshot| -||* Remove snapshot| -||* Rename snapshot| -||* Revert to snapshot| -||**vSphere Replication**| -||* Configure replication| -||* Manage replication| -||* Monitor replication| -|**vSAN**|Cluster| -||ShallowRekey| - - - -
- - -
- -## Root-Level Role Privileges - -Root-level role privileges listed in the table are applied only to root object and data center objects. - -**vSphere Object** |**Privileges**| -|---------------|----------| -|**Cns**|Searchable| -|**Datastore**|Browse datastore -|**Host**|Configuration -||* Storage partition configuration -|**vSphere** **Tagging**|Create vSphere Tag| -||Edit vSphere Tag| -|**Network**|Assign network| -|**Sessions**|Validate session| -|**Profile-driven storage**|Profile-driven storage view| -|**Storage views**|View| - -
- -## Spectro Role Privileges - - -The Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, virtual machines, templates, datastore, and network objects. - -
- - - -Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. - - - -|**vSphere Object** |**Privileges**| -|---------------|----------| -|**spectro-templates** |Read only| -|**Cns**|Searchable -|**Datastore**|Allocate space| -||Browse datastore| -||Low level file operations| -||Remove file| -||Update virtual machine files| -||Update virtual machine metadata| -|**Folder**|Create folder| -||Delete folder| -||Move folder| -||Rename folder| -|**Host**|Local operations| -||Reconfigure virtual machine| -|**vSphere Tagging**|Assign or Unassign vSphere Tag| -||Create vSphere Tag| -||Delete vSphere Tag| -||Edit vSphere Tag| -|**Network**|Assign network| -|**Resource**|Apply recommendation| -||Assign virtual machine to resource pool| -||Migrate powered off virtual machine| -||Migrate powered on virtual machine| -||Query vMotion| -|**Sessions**|Validate session| -|**Profile-driven storage**|Profile-driven storage view| -|**Storage views**|Configure service| -||View| -|**Tasks**|Create task| -||Update task| -|**vApp**|Export| -||Import| -||View OVF environment| -||vApp application configuration| -||vApp instance configuration| -|**Virtual machines**|**Change Configuration**| -||* Acquire disk lease| -||* Add existing disk| -||* Add new disk| -||* Add or remove device| -||* Advanced configuration| -||* Change CPU count| -||* Change Memory| -||* Change Settings| -||* Change Swapfile placement| -||* Change resource| -||* Configure Host USB device| -||* Configure Raw device| -||* Configure managedBy| -||* Display connection settings| -||* Extend virtual disk| -||* Modify device settings| -||* Query Fault Tolerance compatibility| -||* Query unowned files| -||* Reload from path| -||* Remove disk| -||* Rename| -||* Reset guest information| -||* Set annotation| -||* Toggle disk change tracking| -||* Toggle fork parent| -||* Upgrade virtual machine compatibility| -||**Edit Inventory**| -||* Create from existing| -||* Create new| -||* Move| -||* Register| -||* Remove| -||* Unregister| -||**Guest operations**| -||* Guest operation alias modification| -||* Guest operation alias query| -||* Guest operation modifications| -||* Guest operation program execution| -||* Guest operation queries| -||**Interaction**| -||* Console interaction| -||* Power off| -||* Power on| -||**Provisioning**| -||* Allow disk access| -||* Allow file access| -||* Allow read-only disk access| -||* Allow virtual machine download| -||* Allow virtual machine files upload| -||* Clone template| -||* Clone virtual machine| -||* Create template from virtual machine| -||* Customize guest| -||* Deploy template| -||* Mark as template| -||* Mark as virtual machine| -||* Modify customization specification| -||* Promote disks| -||* Read customization specifications| -||**Service configuration**| -||* Allow notifications| -||* Allow polling of global event notifications| -||* Manage service configurations| -||* Modify service configuration| -||* Query service configurations| -||* Read service configuration| -||**Snapshot management**| -||* Create snapshot| -||* Remove snapshot| -||* Rename snapshot| -||* Revert to snapshot| -||**vSphere Replication**| -||* Configure replication| -||* Manage replication| -||* Monitor replication| -|**vSAN**|Cluster| -||ShallowRekey| - - -
- - -
- -## Root-Level Role Privileges - - -Root-level role privileges listed in the table are applied only to root object and data center objects. - - -**vSphere Object** |**Privileges**| -|---------------|----------| -|**Cns**|Searchable| -|**Datastore**|Browse datastore -|**Host**|Configuration -||* Storage partition configuration -|**vSphere** **Tagging**|Create vSphere Tag| -||Edit vSphere Tag| -|**Network**|Assign network| -|**Sessions**|Validate session| -|**Profile-driven storage**|Profile-driven storage view| -|**Storage views**|View| - -
- -## Spectro Role Privileges - -The Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, virtual machines, templates, datastore, and network objects. - -
- - - -Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. - - - -|**vSphere Object** |**Privileges**| -|---------------|----------| -|**spectro-templates** |Read only| -|**Cns**|Searchable -|**Datastore**|Allocate space| -||Browse datastore| -||Low level file operations| -||Remove file| -||Update virtual machine files| -||Update virtual machine metadata| -|**Folder**|Create folder| -||Delete folder| -||Move folder| -||Rename folder| -|**Host**|Local operations| -||Reconfigure virtual machine| -|**vSphere Tagging**|Assign or Unassign vSphere Tag| -||Create vSphere Tag| -||Delete vSphere Tag| -||Edit vSphere Tag| -|**Network**|Assign network| -|**Resource**|Apply recommendation| -||Assign virtual machine to resource pool| -||Migrate powered off virtual machine| -||Migrate powered on virtual machine| -||Query vMotion| -|**Sessions**|Validate session| -|**Profile-driven storage**|Profile-driven storage view| -|**Storage views**|Configure service| -||View| -|**Tasks**|Create task| -||Update task| -|**vApp**|Export| -||Import| -||View OVF environment| -||vApp application configuration| -||vApp instance configuration| -|**Virtual machines**|**Change Configuration**| -||* Acquire disk lease| -||* Add existing disk| -||* Add new disk| -||* Add or remove device| -||* Advanced configuration| -||* Change CPU count| -||* Change Memory| -||* Change Settings| -||* Change Swapfile placement| -||* Change resource| -||* Configure Host USB device| -||* Configure Raw device| -||* Configure managedBy| -||* Display connection settings| -||* Extend virtual disk| -||* Modify device settings| -||* Query Fault Tolerance compatibility| -||* Query unowned files| -||* Reload from path| -||* Remove disk| -||* Rename| -||* Reset guest information| -||* Set annotation| -||* Toggle disk change tracking| -||* Toggle fork parent| -||* Upgrade virtual machine compatibility| -||**Edit Inventory**| -||* Create from existing| -||* Create new| -||* Move| -||* Register| -||* Remove| -||* Unregister| -||**Guest operations**| -||* Guest operation alias modification| -||* Guest operation alias query| -||* Guest operation modifications| -||* Guest operation program execution| -||* Guest operation queries| -||**Interaction**| -||* Console interaction| -||* Power off| -||* Power on| -||**Provisioning**| -||* Allow disk access| -||* Allow file access| -||* Allow read-only disk access| -||* Allow virtual machine download| -||* Allow virtual machine files upload| -||* Clone template| -||* Clone virtual machine| -||* Create template from virtual machine| -||* Customize guest| -||* Deploy template| -||* Mark as template| -||* Mark as virtual machine| -||* Modify customization specification| -||* Promote disks| -||* Read customization specifications| -||**Service configuration**| -||* Allow notifications| -||* Allow polling of global event notifications| -||* Manage service configurations| -||* Modify service configuration| -||* Query service configurations| -||* Read service configuration| -||**Snapshot management**| -||* Create snapshot| -||* Remove snapshot| -||* Rename snapshot| -||* Revert to snapshot| -||**vSphere Replication**| -||* Configure replication| -||* Manage replication| -||* Monitor replication| -|**vSAN**|Cluster| -||ShallowRekey| - - -
- -
- - ---- - -# Create VMware Cloud Gateway - -You can use two different PCG installation methods for VMware vSphere. You can use the Palette CLI, or you can use an OVA/OVF template. Review the prerequisites for each option to help you identify the correct installation method. - -
- - - - - - -## Prerequisites - - -- Palette version 4.0.X or greater. - - -- A Palette API key. Refer to the [Create API Key](/user-management/user-authentication#apikey) page for guidance. - - -- Download the Palette CLI from the [Downloads](/spectro-downloads#palettecli) page and install the CLI. Refer to the [Palette CLI Install](/palette-cli/install-palette-cli) guide to learn more. - -- You can set up the PCG as a single or three-node cluster based on your requirements for high availability (HA). The minimum PCG resource requirements are the following. - -
- - - Single-node cluster: 2 vCPU, 4 GB memory, 60 GB storage. - - - High-Availability (HA) three-node cluster: 6 vCPU, 12 GB memory, 70 GB storage. - - -- Sufficient available IP addresses within the configured OpenStack subnets. - - - - -Self-hosted Palette installations provide a system PCG out-of-the-box and typically do not require a separate, user-installed PCG. However, you can create additional PCGs as needed to support provisioning into remote data centers that do not have a direct incoming connection from the management console. - - - -## Install PCG - -1. In an x86 Linux host, open up a terminal session. - - -2. Use the Palette CLI `login` command to authenticate the CLI with Palette. When prompted, enter the information listed in the following table. - -
- - ```shell - palette login - ``` - -
- - |**Parameter** | **Description**| - |:-----------------------------|---------------| - |**Spectro Cloud Console** |Enter the Palette endpoint URL. When using the Palette SaaS service, enter `https://console.spectrocloud.com`. When using a self-hosted instance of Palette, enter the URL for that instance. | - |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if you are using a self-hosted Palette instance with self-signed TLS certificates. Otherwise, enter `n`.| - |**Spectro Cloud API Key** |Enter your Palette API Key.| - |**Spectro Cloud Organization** |Enter your Palette Organization name.| - |**Spectro Cloud Project** |Enter your desired project name within the selected Organization.| - - - -3. Once you have authenticated successfully, invoke the PCG installer by issuing the following command. When prompted, enter the information listed in each of the following tables. - -
- - ```bash - palette pcg install - ``` - -
- - |**Parameter** | **Description**| - |:-----------------------------|---------------| - |**Cloud Type**| Choose OpenStack.| - |**Private Cloud Gateway Name** | Enter a custom name for the PCG. Example: ``openstack-pcg-1``.| - |**Share PCG Cloud Account across platform Projects** |Enter `y` if you want the Cloud Account associated with the PCG to be available from all projects within your organization. Enter `n` if you want the Cloud Account to only be available at the tenant admin scope.| - - -4. Next, provide environment configurations for the cluster. Refer to the following table for information about each option. - -
- - |**Parameter**| **Description**| - |:-------------|----------------| - |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: `https://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| - |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| - |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: `my.company.com,10.10.0.0/16`.| - |**Proxy CA Certificate Filepath**|The default is blank. You can provide the file path of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| - |**Pod CIDR**|Enter the CIDR pool that will be used to assign IP addresses to pods in the PCG cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| - |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the PCG cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| - - - - -5. After the environment options, the next set of prompts is for configuring the PCG cluster for the VMware environment. The following table contains information about each prompt. - -
- - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - |**vSphere Endpoint** | vSphere endpoint: FQDN or IP address, without the HTTP scheme `https://` or `http://`.
Example: `vcenter.mycompany.com`| - |**vSphere Username** | vSphere account username.| - |**vSphere Password** | vSphere account password.| - |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if using a vSphere instance with self-signed TLS certificates. Otherwise, enter `n`.| - - -6. Next, fill out VMware account configurations. Specify values for the following properties. - -
- - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **Datacenter** | The data center to target.| - | **Folder**| The folder to target.| - | **Fault Domains**| Specify any fault domains you would like to use.| - | **Cluster**| The compute cluster to use for the PCG deployment.| - | **Network**| The network the PCG cluster will use. | - | **Resource Pool** | The resource pool to target when deploying the PCG cluster.| - | **Storage Type**| Select the datastore and VM Storage policy to apply to the PCG cluster. | - | **NTP Servers**| Specify the IP address for any Network Time Protocol (NTP) servers the PCG cluster can reference.| - | **SSH Public Keys**| Provide the public OpenSSH key for the PCG cluster. Use this key when establishing an SSH connection with the PCG cluster. This prompt will result in the default text editor for the Operating System to open. Vi is the more common text editor used in Linux environments. | - | **Cluster Size** | The number of nodes that will make up the cluster. Available options are **1** or **3** . Use three nodes for a High Availability (HA) cluster. | - - - -7. Specify IP Pool configuration. You have the option to select a static placement or use Dynamic Domain Name Service (DDNS). With static placement, an IP pool is created and the VMs are assigned IP addresses from the selected pool. With DDNS, VMs are assigned IP addresses via DNS. Review the following tables to learn more about each parameter. - -
- - ##### Static Placement Configuration - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **IP Start range** | Enter the first address in the PCG IP pool range.| - | **IP End range** | Enter the last address in the PCG IP pool range.| - | **Network Prefix** | Enter the network prefix for the IP pool range. Valid values are network CIDR subnet masks from the range `0 - 32`. Example: `18`.| - | **Gateway IP Address** | Enter the IP address of the static IP gateway.| - | **Name servers** | Comma-separated list of DNS name server IP addresses.| - | **Name server search suffixes (optional)** | Comma-separated list of DNS search domains.| - - ##### DDNS Placement Configuration - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **Search domains** | Comma-separated list of DNS search domains.| - - -8. Specify the cluster boot configuration. - -
- - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **Patch OS on boot** | This parameter indicates whether or not to patch the OS of the PCG hosts on the first boot.| - | **Reboot nodes once OS patch is applied** | This parameter indicates whether or not to reboot PCG nodes after OS patches are complete. This only applies if the **Patch OS on boot** parameter is enabled.| - - - - -9. Enter the vSphere Machine configuration for the Private Cloud Gateway. - -
- - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **CPU** | The number of CPUs in the Virtual Machine. | - | **Memory** | The number of memory to allocate to the Virtual Machine.| - | **Storage** | The amount of storage to allocate to the Virtual Machine. | - -10. A new PCG configuration file is generated and its location is displayed on the console. You will receive an output similar to the following. - -
- - ```bash hideClipboard - ==== PCG config saved ==== - Location: :/home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml - ``` - - - - The ``CloudAccount.apiKey`` and ``Mgmt.apiKey`` values in the **pcg.yaml** are encrypted and cannot be manually updated. To change these values, restart the installation process using the `palette pcg install` command. - - - -The Palette CLI will now provision a PCG cluster in your VMware environment. -If the deployment fails due to misconfiguration, update the PCG configuration file and restart the installer. Refer to the [Edit and Redeploy PCG](/clusters/data-center/vmware#editandredeploypcg) section below. For additional assistance, visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. - - -## Validate - -Once installed, the PCG registers itself with Palette. To verify the PCG is registered, use the following steps. - - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and select **Tenant Settings** - - -3. From the **Tenant Settings Menu** click on **Private Cloud Gateways**. Verify your PCG cluster is available from the list of PCG clusters displayed. - - -## Edit and Redeploy PCG - -To change the PCG install values, restart the installation process using the `palette pcg install` command. Use the following steps to redeploy the PCG or restart the install process. - -
- -1. Make the necessary changes to the PCG configuration file the CLI created during the installation, if needed. Use a text editor, such as vi or nano to update the PCG install configuration file. - -
- - ```shell hideClipboard - ==== Create PCG reference config ==== - ==== PCG config saved ==== - Location: /Users/demo/.palette/pcg/pcg-20230717114807/pcg.yaml - ``` - - ```bash hideClipboard - vi /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml - ``` - - - -2. To redeploy the PCG, use the `install` command with the flag `--config-file`. Provide the file path to the generated PCG config file that was generated and displayed in the output. - -
- - ```bash hideClipboard - palette pcg install --config-file /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml - ``` - - -
- - - - - -The following points give an overview of what you will do to set up the PCG: - -
- - - Initiate the installation from the tenant portal. - - - - Deploy the gateway installer VM in VMware vSphere. - - - - Launch the cloud gateway from the tenant portal. - - - -Self-hosted Palette installations provide a system gateway out-of-the-box and typically do not require a PCG. However, you can create additional gateways as needed to support provisioning into remote data centers that do not have a direct incoming connection from the management console. - - - - -`video: title: "vsphere-pcg-creation": /pcg-creation-video/vmware.mp4` - -## Prerequisites - - -- Palette version 3.4.X or older. - - -- You can set up the PCG as a single- or three-node cluster based on your requirements for high availability (HA). The minimum PCG resource requirements are the following. - - Single-node cluster: 2 vCPU, 4 GB memory, 60 GB storage. - - - High-Availability (HA) three-node cluster: 6 vCPU, 12 GB memory, 70 GB storage. - -## Install PCG - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and select **Tenant Settings** > **Private Cloud Gateway**. - - -3. Click the **Create Private Cloud Gateway** button and select **VMware**. Private Gateway installation instructions are displayed. - - -4. Copy the gateway-installer link. Alternatively, you can download the OVA and upload it to an accessible location and import it as a local file. - - -## vSphere - Deploy Gateway Installer - -1. Deploy a new OVF template by providing the link to the installer OVA as the URL. - - -2. Proceed through the OVF deployment wizard, selecting the desired Name, Placement, Compute, Storage, and Network options. - - -3. At the **Customize Template** step, specify Palette properties as follows: - -
- -| **Parameter** | **Value** | **Description** | -|---|---|---| -|**Installer Name** | Desired Palette Gateway Name. | The name will be used to identify the gateway instance. Typical environments may only require a single gateway to be deployed. However, multiple gateways may be required to manage clusters across multiple vCenters. We recommend choosing a name that readily identifies the environment for which this gateway instance is being configured.| -| **Console endpoint** | URL to Palette management platform portal. | Default: https://console.spectrocloud.com | -|**Pairing Code** | PIN displayed on the Palette management platform portal's 'Create a new gateway' dialogue. | | -| **SSH Public Key** | Optional key for troubleshooting purposes. | We recommended having an SSH key, as it enables SSH access to the VM as 'ubuntu' user. | -| **Pod CIDR** | Optional IP range exclusive to pods. | This range should be different to prevent an overlap with your network CIDR. | -| **Service cluster IP range** | Optional IP range in the CIDR format exclusive to the service clusters. | This range also must not overlap with either the pod CIDR or your network CIDR. | - - -Proxy environments require additional property settings. Each of the proxy properties may or may not have the same value but all the three properties are required. - - -| **Parameter** | **Value** | **Remarks** | -|---|---|---| -|HTTP PROXY | Endpoint for the HTTP proxy server. | This setting will be propagated to all the nodes launched in the proxy network. For example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT` | -| HTTPS PROXY | Endpoint for the HTTPS proxy server. | This setting will be propagated to all the nodes launched in the proxy network. For example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT` | -| NO Proxy | A comma-separated list of vCenter server, local network CIDR, hostnames, and domain names that should be excluded from proxying. | This setting will be propagated to all the nodes to bypass the proxy server. For example: `vcenter.company.com`, `.company.org`, and `10.10.0.0/16` | -| Certificate | The base64-encoded value of the proxy server's certificate OR the base64-encoded root and issuing certificate authority (CA) certificates used to sign the proxy server's certificate. | Depending on how the certificate is decoded, an additional `=` character may appear at the end of the value. You can use this command to properly encode the certificate: `base64 -w0 | sed "s/=$//"`. - -4. Complete the OVF deployment wizard and wait for the OVA to be imported and the Virtual Machine (VM) to be deployed. - - -5. Power on the VM. - - -## Tenant Portal - Launch Cloud Gateway - -1. Close the **Create New Gateway** installation instructions and navigate to the Private Cloud Gateway page under **Tenant Settings** if you have navigated away or logged out. - - -2. Wait for a gateway widget to display on the page and for the **Configure** option to become available. The IP address of the installer VM will be displayed on the gateway widget. This may take a few minutes after the VM is powered on. Failure of the installer to register with Palette within 10 minutes of powering on the Virtual Machine on vSphere might indicate an error. Follow steps in [Troubleshooting](/clusters/data-center/vmware#troubleshooting) to identify and resolve the issue. - - -3. Click on the **Configure** button to invoke the Palette Configuration dialogue. Provide vCenter credentials and proceed to the next configuration step. - - -4. Choose the desired values for the Data Center, Compute Cluster, Datastore, Network, Resource pool, and Folder. Optionally, provide one or more SSH Keys or NTP server addresses. - -
- - Virtual machine port groups and distributed port groups are listed with their names. NSX-T distributed virtual port groups that exist in vSphere will be listed with their name and segment IDs. - - -5. Choose the IP Allocation Scheme - Static IP or DHCP. Selecting static IP enables the option to create an IP pool. To create an IP pool, provide an IP range or a subnet. The IP addresses from the IP pool will be assigned to the gateway cluster. By default, the IP pool is available for use by other tenant clusters. You can prevent this by toggling on the **Restrict to a single cluster** option. - - - - -6. Click on **Confirm** to initiate gateway cluster provisioning. Cluster status should change to **Provisioning** and eventually to **Running**, when the gateway cluster is fully provisioned. This process can take about 10 minutes. - - You can click on the Cloud Gateway widget in the UI to view a detailed provisioning sequence on the **Cluster Details** page. If gateway cluster provisioning results in errors or gets stuck, you can view the details on the **Summary** tab or the **Events** tab of the **Cluster Details** page. - - In certain cases where provisioning of the gateway cluster is stuck or failed due to invalid configuration, you can reset the process from the Cloud Gateway widget. - - -7. When the Gateway transitions to the **Running** state, it is fully provisioned and ready to bootstrap tenant cluster requests. - - -8. Power off the installer OVA that you initially imported at the start of this installation process. - - - -A Gateway cluster installation automatically creates a cloud account using the credentials entered at the time the gateway cluster is deployed. You can use this account to provision clusters across all tenant projects. - - - -
- -
- - -
- ---- - - - -# Upgrade PCG - -Palette maintains the OS image and all configurations for the cloud gateway. Periodically, the OS images, configurations, or other components need to be upgraded to resolve security or functionality issues. Palette releases such upgrades when required and communication about the same is presented in the form of an upgrade notification on the gateway. - -Administrators should review the changes and apply them at a suitable time. Upgrading a cloud gateway does not result in any downtime for the Tenant Clusters. During the upgrade process, the provisioning of new clusters might be temporarily unavailable. New cluster requests are queued while the gateway is being upgraded and are processed as soon as the gateway upgrade is complete. - - -
- -### Delete a VMware Cloud Gateway - -The following steps need to be performed to delete a cloud gateway: - -1. As a Tenant Administrator, navigate to the **Private Cloud Gateway** page under **Settings**. - - -2. Invoke the **Delete** action on the PCG instance you want to delete. - - -3. The system performs a validation to ensure there are no running tenant clusters associated with the PCG instance being deleted. If such instances are found, an error is displayed. Delete any running tenant clusters and retry deleting the PCG. - - -4. Delete the Gateway Virtual Machines from vSphere. - -
- -### Resize PCG -You can set up the PCG as a single-node cluster or as a three-node cluster for high availability (HA). For production environments, we recommend three nodes. A PCG can be initially set up with one node and resized to three nodes later. Use the following steps to resize a single-node PCG cluster to a three-node PCG cluster. - -1. As a Tenant Administrator, navigate to the **Private Cloud Gateway** page under **Settings**. - - -2. Invoke the resize action for the relevant cloud gateway instance. - - -3. Update the size from one (1) to three (3). - - -4. The gateway upgrade begins shortly after the update. Two new nodes are created on vSphere and the gateway is upgraded to a 3-node cluster. - - -Scaling a 3-node cluster down to a 1-node cluster is not permitted.

A load balancer instance is launched even for a 1-node gateway to support future expansion. -
- -# IP Address Management - -Palette supports both DHCP and Static IP-based allocation strategies for the VMs that are launched during cluster creation. IP Pools can be defined using a range or a subnet. Administrators can define one or more IP pools linked to a PCG. - -Clusters created using a PCG can select from the IP pools linked to the corresponding PCG. By default, IP Pools are shared across multiple clusters but can optionally be restricted to a cluster. - -The following is a description of various IP Pool properties: - -| **Property** | **Description** | -|---|---| -| **Name** | Descriptive name for the IP Pool. This name will be displayed for IP Pool selection when static IP is chosen as the IP allocation strategy | -| **Network Type** | Select **Range** to provide a start and an end IP address. IPs within this range will become part of this pool. Alternately select 'Subnet' to provide the IP range in CIDR format.| -| **Start** | First IP address for a range based IP Pool E.g. 10.10.183.1| -| **End** | Last IP address for a range based IP Pool. E.g. 10.10.183.100 | -| **Subnet** | CIDR to allocate a set of IP addresses for a subnet based IP Pool. E.g. 10.10.183.64/26 | -| **Subnet Prefix** | Network subnet prefix. e.g. /18| -| **Gateway** | Network Gateway E.g. 10.128.1.1 | -| **Name server addresses** | A comma-separated list of name servers. e.g., 8.8.8.8 | -| **Restrict to a Single Cluster** | Select this option to reserve the pool for the first cluster that uses this pool. By default, IP pools can be shared across clusters.| - -# Create a VMware Cloud Account - - -Configuring the private cloud gateway is a prerequisite task. A default cloud account is created when the private cloud gateway is configured. This cloud account can be used to create a cluster. - - - -Enterprise version users should choose the Use System Gateway option. - - -In addition to the default cloud account already associated with the private cloud gateway, new user cloud accounts can be created for the different vSphere users. - -| **Property** | **Description** | -|---|---| -|**Account Name** | Custom name for the cloud account | -| **Private cloud gateway** | Reference to a running cloud gateway| -| **vCenter Server** | IP or FQDN of the vCenter server| -| **Username** | vCenter username| -| **Password** | vCenter password| - -# Deploy a VMware Cluster - -`video: title: "vmware-cluster-creation": ./cluster-creation-videos/vmware.mp4` - -Use the following steps to provision a new VMware cluster. - -
- -1. Provide the basic cluster information like Name, Description, and Tags. Tags are currently not propagated to the Virtual Machines (VMs) deployed on the cloud/data center environments. - - -2. Select a Cluster Profile created for the VMware environment. The profile definition will be used as the cluster construction template. - - -3. Review and override Pack Parameters as desired. By default, parameters for all Packs are set with values defined in the Cluster Profile. - - -4. Provide a vSphere Cloud account and placement information. - - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **Cloud Account** | Select the desired cloud account.
VMware cloud accounts with credentials need to be preconfigured
in the Project Settings section. An account is auto-created as
part of the cloud gateway setup and is available for
provisioning of Tenant Clusters if permitted by the administrator.| - | **Datacenter** |The vSphere data center where the cluster nodes will be launched.| - | **Deployment Folder** | The vSphere VM Folder where the cluster nodes will be launched.| | - | **Image Template Folder** | The vSphere folder to which the Spectro templates are imported.| - | **SSH Keys (Optional)** | Public key to configure remote SSH access to the nodes (User: spectro).| - | **NTP Server (Optional)** | Setup time synchronization for all the running nodes.| - | **IP Allocation strategy** | DHCP or Static IP| - -5. Configure the master and worker node pools. Fill out the input fields in the **Add node pool** page. The following table contains an explanation of the available input parameters. - -### Master Pool - -|**Parameter** | **Description**| -|------------------|---------------| -|**Name** |A descriptive name for the node pool.| -|**Size** |Number of VMs to be provisioned for the node pool. For the master pool, this number can be 1, 3, or 5.| -|**Allow worker capability**|Select this option for allowing workloads to be provisioned on master nodes.| -|**[Labels](/clusters/cluster-management/taints#overviewonlabels)**| Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. -|**[Taints](/clusters/cluster-management/taints#overviewontaints)**|To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints.| -|**Instance type** |Select the compute instance type to be used for all nodes in the node pool.| -|**Availability Zones**| Choose one or more availability zones. Palette provides fault tolerance to guard against hardware failures, network failures, etc., by provisioning nodes across availability zones if multiple zones are selected.| -|**Disk Size**|Give the required storage size| - -### Worker Pool - -|**Parameter** | **Description**| -|------------------|---------------| -|**Name** |A descriptive name for the node pool.| -|**Enable Autoscaler**|You can enable the autoscaler by toggling the **Enable Autoscaler** button. Autoscaler scales resources up and down between the defined minimum and maximum number of nodes to optimize resource utilization.| -||Set the scaling limit by setting the **Minimum Size** and **Maximum Size**, as per the workload the number of nods will scale up from minimum set value to maximum set value and the scale down from maximum set value to minimum set value| -|**Size** |Number of VMs to be provisioned for the node pool.| -|**Rolling Update**| Rolling update has two available options. Review the [Update Parameter](#update-parameter-table) table below for more details. -|**[Labels](/clusters/cluster-management/taints#overviewonlabels)**|Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. -|**[Taints](/clusters/cluster-management/taints#overviewontaints)**|To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints.| -|**Instance type** |Select the compute instance type to be used for all nodes in the node pool.| -|**Availability Zones**| Choose one or more availability zones. Palette provides fault tolerance to guard against hardware failures, network failures, etc., by provisioning nodes across availability zones if multiple zones are selected.| -|**Disk Size**|Provide the required storage size - -6. Review settings and deploy the cluster. Provisioning status with details of ongoing provisioning tasks is available to track progress. - - -New worker pools may be added if it is desired to customize certain worker nodes to run specialized workloads. As an example, the default worker pool may be configured with 4 CPUs, 8 GB of memory for general-purpose workloads, and another worker pool with 8 CPUs, 16 GB of memory for advanced workloads that demand larger resources. - - -# Delete a VMware Cluster - -The deletion of a VMware cluster results in the removal of all Virtual machines and associated storage disks created for the cluster. The following tasks need to be performed to delete a VMware cluster: - - -1. Select the cluster to be deleted from the **Cluster** **View** page and navigate to the **Cluster Overview** page. - - -2. Invoke the delete action available on the page: **Cluster** > **Settings** > **Cluster** **Settings** > **Delete** **Cluster**. - - -3. Click **Confirm** to delete. - - -The Cluster Status is updated to **Deleting** while the Cluster Resources are being deleted. Provisioning status is updated with the ongoing progress of the delete operation. Once all resources are successfully deleted, the Cluster Status changes to **Deleted** and is removed from the list of Clusters. - - -The Delete action is only available for Clusters that are fully provisioned. For Clusters that are still in the process of being provisioned, Abort action is available to stop provisioning and delete all resources. - - -# Force Delete a Cluster - -A cluster stuck in the **Deletion** state can be force deleted by the user through the User Interface. The user can go for a force deletion of the cluster, only if it is stuck in a deletion state for a minimum of **15 minutes**. Palette enables cluster force delete from the Tenant Admin and Project Admin scope. - -## To force delete a cluster: - -1. Log in to the Palette Management Console. - - -2. Navigate to the **Cluster Details** page of the cluster stuck in deletion mode. - - - If the deletion status is stuck for more than 15 minutes, click the **Force Delete Cluster** button from the **Settings** dropdown. - - - If the **Force Delete Cluster** button is not enabled, wait for 15 minutes. The **Settings** dropdown will give the estimated time for the auto-enabling of the **Force Delete** button. - - - -If there are any cloud resources still on the cloud, the user should cleanup those resources before going for the force deletion. - diff --git a/content/docs/04-clusters/03-edge.md b/content/docs/04-clusters/03-edge.md deleted file mode 100644 index d096bb0334..0000000000 --- a/content/docs/04-clusters/03-edge.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: "Edge" -metaTitle: "Creating new clusters on Spectro Cloud" -metaDescription: "The methods of creating clusters for a speedy deployment on any CSP" -hideToC: false -icon: "hdd" -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Edge clusters are Kubernetes clusters set up on Edge hosts installed in isolated locations like grocery stores and restaurants versus a data center or cloud environment. These Edge hosts can be bare metal machines or virtual machines and are managed by operators at remote sites. - -Palette provisions workload clusters on Edge hosts from the Palette management console. Palette also provides end-to-end cluster management through scaling, upgrades, and reconfiguration operations. - - -Edge computing brings computing and data storage closer to the source, reducing latency and bandwidth issues that result from central computing and improving overall application performance. Industries such as retail, restaurants, manufacturing, oil and gas, cruise ships, healthcare, and 5G telecommunication providers typically have use cases that require content data and processing to be closer to their applications. - - -
- -![A drawing of Edge architecture with humans interacting](/clusters_edge_edge-arch-drawing.png) - -
- - - -The following are some highlights of the comprehensive Palette Edge Solution: - -
- -* Centralized Full Stack Management - - -* Low touch, plug-and-play setup - - -* Support for AMD64 and ARM64 architectures - - -* Immutable update for Kubernetes and operating system (OS) with zero downtime - - -* Distro-agnostic Kubernetes and OS - - -* Secured remote troubleshooting - - -* Scalable from tens to thousands of locations - - -* Support for pre-provisioned and on-site device registration - - - - -Palette's Edge solution is designed for sites that typically have one or more small devices, such as [Intel NUC](https://www.intel.com/content/www/us/en/products/docs/boards-kits/nuc/what-is-nuc-article.html). An instance of Palette optimized for edge computing is installed in the device along with the operating system and Kubernetes. - - -
- - - -Edge is built on top of the open-source project [Kairos](https://kairos.io), which provides a tamper-proof immutable operating system with zero downtime rolling upgrade. - - - -Palette manages the installation and all the Day-2 activities, such as scaling, upgrades, and reconfiguration. - -
- - - -Edge is still in active development and is subject to change. Review the Palette [release notes](/release-notes) for updates and changes. - - - - - -
- - - -# Get Started With Edge - - -To start with Edge, review the [architecture](/clusters/edge/architecture) and the [lifecycle](/clusters/edge/edge-native-lifecycle) resource to gain a high-level understanding of the Edge components and installation process. Next, become familiar with the [EdgeForge workflow](/clusters/edge/edgeforge-workflow). EdgeForge is the workflow you will use to customize the Edge host installation to match your environment and organizational needs - this includes creating the Edge artifacts for Edge hosts. The last step of the Edge deployment lifecycle is the deployment step. Review the [Deployment](/clusters/edge/site-deployment) guide to understand what it takes to deploy an Edge host. - -
- - -# Resources - -- [Edge Native Architecture](/clusters/edge/architecture) - - -- [Deployment Lifecycle](/clusters/edge/edge-native-lifecycle) - - -- [Install Configuration](/clusters/edge/edge-configuration) - - -- [EdgeForge Workflow](/clusters/edge/edgeforge-workflow) - - -- [Site Deployment](/clusters/edge/site-deployment) - -
diff --git a/content/docs/04-clusters/03-edge/01-architecture.md b/content/docs/04-clusters/03-edge/01-architecture.md deleted file mode 100644 index dcc4ea4a22..0000000000 --- a/content/docs/04-clusters/03-edge/01-architecture.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: "Architecture" -metaTitle: "Palette Edge Architecture" -metaDescription: "Learn about Palette Edge and the architecture used to suppport edge clusters." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -The following are architectural highlights of Palette-provisioned Edge native clusters. - -
- -* Kubernetes is natively installed on the host. - - - -* Support for AMD64 and ARM64 architectures. - - -* Support for bare metal and virtualized edge devices. - - -* Customizable site properties such as network proxies and certificates. - - -* Configurable Kubernetes API servers to work with virtual IP address (VIP) or Dynamic DNS. - - -* Edge supports adding multiple devices to the site to form a multi-node Kubernetes cluster. - - -* Operating system (OS) images are derived from immutable container-based OS images provided by the [Kairos](http://kairos.io) open-source project. - - -* The installation is bootstrapped using a relatively small distribution-agnostic *Stylus* installer image. The operating system and Kubernetes version are derived from cluster profile settings associated with the edge site and dynamically downloaded and installed. - - -* Palette Edge Distribution supports use cases that require customizing OS packages, device drivers, and more. - -![Architecture diagram of Edge](/native-edge.png "#title=An architecture diagram of Palette and all of the components.") - - -# Minimum Device Requirements - -The following minimum device requirements must be met to deploy an Edge host successfully. - -* 2 CPU - - -* 8 GB Memory - - -* 60 GB Storage - - -If Trusted Platform Module (TPM) is used, it must be TPM 2.0 or greater. - - -# Supported Architectures - -Palette supports AMD64 and ARM64 (beta) architectures for Edge installations. However, we cannot guarantee that all hardware and software configurations will work due to the various options available in the market. We recommend that you test your hardware configuration before deploying to production. - -
- - - - ARM64 support is a preview feature and requires Palette version 4.0.0 or later. - - - - -# Palette Edge Distribution - -Palette provides the following distributions for edge installations. - -|Name|OS |Kubernetes Distro|CNIs|CSIs| -|----|---|----------|----|----| -|Palette Optimized K3s |openSUSE, Ubuntu |K3s |Calico, Flannel|Rook Ceph| -|Palette Optimized RKE2|openSUSE, Ubuntu |RKE2|Calico, Flannel|Rook Ceph| -|[Palette eXtended Kubernetes Edge (PXK-E)](/glossary-all#paletteextendedkubernetesedge(pxk-e))|openSUSE, Ubuntu|CNCF|Calico, Flannel|Rook Ceph| - - -# Supported Configurations - -Palette offers complete flexibility in deploying clusters at edge sites with various aspects you can customize. The table below describes these aspects and the available options. - -| **Parameter** | **Choices** | -|-|-| -| Cluster Mode | - Connected: The site has internet connectivity and the installation is initiated via Palette Management Console
- Air-Gapped: The site does not have internet connectivity. Installation is initiated via the Palette CLI.| -| OS | - Ubuntu
- OpenSUSE
- Bring your own OS (BYOOS) | -| K8s Flavor | - Palette eXtended K8s for Edge FIPS (PXK-E)
- Palette eXtended K8s for Edge (PXK-E)
- Palette Optimized K3s
- Palette Optimized RKE2 | -| K8s Version |- 1.24.x
- 1.25.x
- 1.26.x | -| FIPS Mode |- True: Enforce usage of FIPS packs and other required FIPS configuration to meet FIPS compliance
- False | -| Edge Host Registration Mode | - Manual: A unique Edge host ID is manually entered into the Palette Management Console
- Auto: Edge hosts automatically register with the Palette through the usage of a registration token supplied in the use-data
- QR Code: Scan a QR code that takes you to a web application that registers the Edge host with Palette. This method is considered advanced with the benefit of simplifying the Edge host registration without needing a tenant token or a manual entry. | -| Edge Host Type - Installer Format | Create an ISO image that contains all your dependencies and custom configurations. | - -
- - -# Kubernetes Defaults - -The Kubernetes Packs for Edge Native deployments disable a few items by default to allow users to install those items independently or to avoid duplication. The following items are disabled by default. - -* Traefik - -* SERVICE-lb - -* local-path provisioner - -* Flannel - -**Example Scenario:** - -For the Palette optimized K3s pack, the default network component flannel is disabled to allow the user to independently use any container network interface pack such as Flannel or others, as part of the network layer of a cluster profile. - -The component metrics server is disabled to avoid duplicating it because Palette installs the metrics server by default. - -``` -cluster: - config: - # disable the built in cni - flannel-backend: none - no-flannel: true - disable-network-policy: true - Disable: - - metrics-server -``` - -
diff --git a/content/docs/04-clusters/03-edge/01.5-edge-native-lifecycle.md b/content/docs/04-clusters/03-edge/01.5-edge-native-lifecycle.md deleted file mode 100644 index 813c6cc00f..0000000000 --- a/content/docs/04-clusters/03-edge/01.5-edge-native-lifecycle.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Lifecycle" -metaTitle: "Edge Deployment Lifecycle" -metaDescription: "Learn about the Edge Deployment Lifecycle" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Edge Deployment Lifecycle - -The typical end-to-end lifecycle of deploying clusters at edge locations involves several distinct phases in which different organizations or teams need to perform specific tasks. - -
- - ![A flow of the lifecycle, starting with model, staging, install, and finally register. Once all phases are complete the cluster provision occurs.](/native-edge-deployment-lifecycle.png) - -* **Modeling** - App owners build and test the applications in test environments and model application profiles in Palette for a cluster installation. - - -* **Staging** - IT/Ops teams prepare an Edge host installer variant from Palette's base installer. In this phase, available customizations that are common to all edge locations are applied to the base installer. This includes specifying or overriding properties such as Palette endpoints, app URL for QR code-based registration, default OS users, and default network settings. The installer variant is then exported to all the site locations. - - -* **Installation** - Site operators provision one or more Edge hosts at edge locations using the image prepared in the staging phase. In this phase, the site operator applies site-specific properties such as static IP address, network proxy, and certificate. - - -* **Registration** - Edge hosts need to be registered with Palette or through registration tokens. Each cluster requires a cluster profile. Clusters are configured with infrastructure and add-on profiles that application architects applied in the modeling phase. - -IT Ops teams can perform this step in two different ways: - - * Deploy an application without having to manage a server to automate edge device registration. We provide a sample application you can customize to fit your needs. - - * Register Edge hosts and configure clusters using the Palette UI, API or Terraform. - -The Palette Edge Management agent inside the Edge host waits for the configuration to be available in Palette. Once registration and configuration are complete, the agent installs the Kubernetes cluster. The agent reads the Kubernetes distribution, version, and configuration properties from the cluster profile. Additional add-ons, if any, are deployed after the Kubernetes installation. You can install a single or multi-node cluster using this process. You can scale up your cluster at a later time after deployment. - -If the edge location configuration is known and predictable, then the IT/Ops team can combine staging, installation, and registration into one step and ship the fully configured Edge hosts to the edge location. The site operator at the edge location only needs to hook up the power and network cables without further device configuration. The Edge cluster will be ready to be centrally managed for future upgrades. - -# Next Steps - -Now that you have an understanding of the deployment lifecycle, start the deployment of your Edge host by reviewing the [Site Deployment](/clusters/edge/site-deployment) instructions. - - -
\ No newline at end of file diff --git a/content/docs/04-clusters/03-edge/02-edge-configuration.md b/content/docs/04-clusters/03-edge/02-edge-configuration.md deleted file mode 100644 index 9ed5c97032..0000000000 --- a/content/docs/04-clusters/03-edge/02-edge-configuration.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: "Install Configuration" -metaTitle: "Install Configuration" -metaDescription: "Learn about the possible Palette Edge install configurations available." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -The Edge Installer is responsible for preparing the Edge host to be ready for workloads. The Edge Installer supports the ability to specify a user data configuration file. You can use this configuration file to customize the installation and ensure your Edge host has all the required dependencies and settings to work properly in your environment. - -To better understand the Edge installation process, review the order of operations. - -
- -### Order of Operations: - -1. Boot device with Edge Installer. - - -2. Edge Installer gets copied to disk. - - -3. Device powers off or reboots based on the user data configuration. - - -4. Upon boot up or reboot, cloud-init stages that are specified in the Edge Installer configuration file take effect. - - -5. Edge Host Registers with Palette. - - -6. Device pairs with Palette. - - -7. Edge Installer identifies cloud-init stages as specified in the OS pack. - - -8. Operating System (OS) is installed on the device. - - -9. Device reboots. - - -10. OS cloud-init stages are applied in the proper order. - - -11. Edge Host is ready for use. - - -![The boot order sequence, listing 9 steps that flow in a sequential order.](/clusters_edge_cloud-init_boot-order-squence.png) - - -The Edge installation process accepts two types of configurations that you can use to customize the installation: Edge Installer Configuration and Edge OS Configuration. - - - -# Edge Installer Configuration - -The Edge installation process expects you to specify installation parameters. You can supply the install parameters in multiple stages. You can provide common installation configurations for all your sites during the manufacturing or staging phases. - -You can also specify additional location-specific configurations at the site during the installation. The install configurations provided in various stages are merged to create the edge host's final configuration. - -# Edge OS Configuration - -The Edge installation process supports the ability for you to customize your operating system (OS) through the usage of cloud-init stages. You can supply Edge configurations during the edge host installation with the Edge Installer and at the Operating System (OS) layer by customizing the OS pack. Once the edge host installation process is complete, the OS stages take effect during the boot-up process. - -
- - - -To effectively use the Edge Installer, we recommend you review the Edge [installer configuration](/clusters/edge/edge-configuration/installer-reference) page so you gain an overview of all the available parameters. - - - - -# Resources - -- [Edge OS Configuration: Cloud-Init Stages](/clusters/edge/edge-configuration/cloud-init) - -- [Edge Install Configuration](/clusters/edge/edge-configuration/installer-reference) diff --git a/content/docs/04-clusters/03-edge/02-edge-configuration/30-cloud-init.md b/content/docs/04-clusters/03-edge/02-edge-configuration/30-cloud-init.md deleted file mode 100644 index d8aa52b51d..0000000000 --- a/content/docs/04-clusters/03-edge/02-edge-configuration/30-cloud-init.md +++ /dev/null @@ -1,305 +0,0 @@ ---- -title: "Cloud Init Stages" -metaTitle: "Edge Install Cloud Init Stages" -metaDescription: "Learn how to use cloud-init stages when installing an Edge device with Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Cloud-Init Stages - -The installation process supports all the cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/). Kairos is an open-source project that is used to create immutable images, Kairos is a container layer that enables you to specify dependencies and create resources before locking down the image. - -The following diagram displays the available cloud-init stages you can use to customize the device installation. - -![A diagram that displays all the cloud-init stages supported. The stages are listed in the markdown table below.](/clusters_edge_cloud-init_cloud-init-stages-supported.png) - -You can read more about Kairos and cloud-init by reviewing [Kairo's cloud-init](https://kairos.io/docs/architecture/cloud-init/) resource. For your convenience, all the supported cloud-init stages are listed below. - - -| Stage | Description -|-------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------| -| `rootfs` | This is the earliest stage, running before switching to root. It happens right after the root is mounted in /sysroot and before applying the immutable rootfs configuration. This stage is executed over initrd root, no chroot is applied. | | -| `initramfs` | This is still an early stage, running before switching to root. Here you can apply changes to the booting setup of Elemental. Despite executing this before switching to root, this invocation is chrooted into the target root after the immutable rootfs is set up and ready. | | -| `boot` | This stage executes after initramfs has switched to root and during the systemd boot-up process. | | -| `fs` | This stage is executed when fs is mounted and is guaranteed to have access to the state and persistent partitions ( COS_STATE and COS_PERSISTENT respectively). | | -| `network` | This stage executes when the network is available | | -| `reconcile` | This stage executes 5m after boot up and every 60m. | | -| `after-install` | This stage executes after the installation of the OS ends. | | -| `after-install-chroot` | This stage executes after the installation of the OS ends. | | -| `after-upgrade` | This stage executes after the OS upgrade ends. | | -| `after-upgrade-chroot` | This stage executes after the OS upgrade ends (chroot call). | | -| `after-reset` | This stage executes after the OS resets. | | -| `after-reset-chroot` | This stage executes after the OS resets (chroot call). | | -| `before-install` | This stage executes before installation. | | -| `before-upgrade` | This stage executes before the upgrade. | | -| `before-reset` | This stage executes before reset. | | - - - - - Each stage has a before and after hook you can use to achieve more granular customization. For example, you can use `network.after` to verify network connectivity. - - - - -# Where to Apply Cloud-Init Stages - -You may ask yourself where to use cloud-init stages, as both the Edge Installer and the OS pack support the usage of cloud-init stages. Use the following statements to help you decide. -
- - -* If you need to apply a set of configurations to a specific site, then use the Edge Installer user data configuration file and its cloud-init stages to provide site settings to that specific site. - - -* If you have common configurations across a fleet of Edge host devices, customize the OS pack and use the cloud-init stages to apply those configurations. - -# Example Use Cases - - -To help you become familiar with the cloud-init stages and better understand how to use them to achieve your goals, check out the following use cases. - -
- - - -Remember that the following code snippets are only intended to help you understand how cloud-init can be used to customize the edge host. -You can use countless combinations of the Edge Installer and OS cloud-init stages to achieve the desired customization. Check out the Kairos [stages](https://kairos.io/docs/reference/configuration/#stages) resource to learn more about other key terms, options, and advanced examples. - - - -
- -## Edge Configuration Stages - -Use the Edge Installer user data to apply specific site configurations to the edge host. - - -
- -#### Set the User Password - -The `initramfs` stage is used to set the password for the user, `kairos`. - -```yaml -stages: - initramfs: - - users: - kairos: - passwd: kairos -``` - -#### Assign a User to a Group - -Another example of the `initramfs`, but this time the user is assigned to the `sudo` group. - -```yaml -stages: - initramfs: - - users: - kairos: - groups: - - sudo -``` - - -#### Assign an SSH Key - -An example configuration of assigning an SSH key to a user. - -```yaml -stages: - initramfs: - - users: - kairos: - ssh_authorized_keys: - - ssh-rsa AAAAB3N… -``` - - -#### Configure a Registry Mirror - -For situations where you need to configure a registry mirror, you can use the following example that uses the `initramfs` stage. - -```yaml -stages: - initramfs: - files: - - path: "/etc/rancher/k3s/registries.yaml" - permissions: 0644 - owner: 0 - group: 0 - content: | - mirrors: - "gcr.io": - endpoint: - - "https://my-mirror.example.com" - rewrite: - "(^.*)": "test/$1" - configs: - "my-mirror.example.com": - auth: - username: "user1" - password: "mysupermagicalpassword" - tls: - insecure_skip_verify: true -``` - -#### Erase Partitions - -You can use the `before-install` stage to remove partitions if needed. - -
- -```yaml -stages: - before-install: - - name: "Erase Old Partitions on Boot Disk" - commands: - - wipefs -a /dev/nvme0n1 -``` - - -#### Install Tooling - -This is an example of installing third-party software or tooling. - -
- -```yaml -stages: - after-install-chroot: - - name: "Install SSM" - commands: - - snap install amazon-ssm-agent --classic -``` - -#### Pass a Sensitive Information - -If you need to transmit sensitive information, such as credentials, during the site installation phase, you can make the Edge installer skip copying specific stages to the edge hosts. The Edge installer will skip copying the stages that follow the `skip-copy-[string]` naming convention. Refer to the [Sensitive Information in the User Data Stages](/clusters/edge/edge-configuration/skip-copying-stages) guide to learn more. -
- -```yaml -stages: - network.after: - - name: skip-copy-subscribe - if: [ -f "/usr/sbin/subscription-manager" ] - commands: - - subscription-manager register --username "myname" --password 'mypassword' -``` - - -#### Complete Example - -This is an Edge Installer user data configuration that configures the user `kairos` and prepares the edge host by providing network settings and adding SSL certificates. - -
- -```yaml -stages: - boot: - - users: - kairos: - groups: - - sudo - passwd: kairos -stylus: - site: - paletteEndpoint: api.spectrocloud.com - name: edge-randomid - registrationURL: https://edge-registration-app.vercel.app/ - network: - httpProxy: http://proxy.example.com - httpsProxy: https://proxy.example.com - noProxy: 10.10.128.10,10.0.0.0/8 - nameserver: 1.1.1.1 - interfaces: - enp0s3: - type: static - ipAddress: 10.0.10.25/24 - gateway: 10.0.10.1 - nameserver: 10.10.128.8 - enp0s4: - type: dhcp - caCerts: - - | - ------BEGIN CERTIFICATE------ - ***************************** - ***************************** - ------END CERTIFICATE------ - - | - ------BEGIN CERTIFICATE------ - ***************************** - ***************************** - ------END CERTIFICATE------ -``` - - -## OS User Data Stages - -You can also customize the device by using the OS cloud-init stages. As mentioned previously, use OS cloud-init stages to apply common configurations to many edge hosts. - -
- -#### Assign User to Group - -In this example snippet, the OS pack is using the cloud-init stage `initramfs` to assign a default password to the user `kairos` and add the user to the `sudo` group. -
- -```yaml -stages: - initramfs: - - users: - kairos: - groups: - - sudo - passwd: kairos -``` - -#### Custom Commands - -This is an example of moving files to a different location prior to another stage or boot-up process that requires the file. -
- -```yaml -stages: - initramfs: - - name: "Move Files" - commands: - - | - mv /myCLI/customCLI /usr/local/bin/ - rm -R /myCLI -``` - - -#### Update Network Settings - -The network settings will get updated when the `network` stage takes effect. - -
- -```yaml -stages: - network: - - name: "Configure DNS host" - commands: - - echo "10.100.45.98 example.local" >> /etc/hosts -``` - - -#### Invoke Custom Script - -An example of applying logic after the device has booted by using the `boot.after` stage. -
- -```yaml -boot.after: - - | - sftp -i /credentials/ssh/id_rsa.pub@cv543.example.internal.abc:/inventory/2023/site-inventory.json - mv site-inventory.json /location/inventory/ -``` \ No newline at end of file diff --git a/content/docs/04-clusters/03-edge/02-edge-configuration/33-skip-copying-stages.md b/content/docs/04-clusters/03-edge/02-edge-configuration/33-skip-copying-stages.md deleted file mode 100644 index 064950c495..0000000000 --- a/content/docs/04-clusters/03-edge/02-edge-configuration/33-skip-copying-stages.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: "Sensitive User Data Handling" -metaTitle: "Sensitive User Data Handling" -metaDescription: "Learn how to make the Edge installer skip copying the specific user data stages to the edge hosts so that you can use sensitive information in the user data stages." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - -# Sensitive Information in the User Data Stages - -Suppose you must add sensitive information, such as credentials, in your user data configuration file. In the Edge deployment lifecycle, you have two opportunities to apply user data to edge hosts. The first is during the staging phase, where you add the Edge installer to the Edge host. The second opportunity is during the site installation phase, where you can provide supplementary user-data configurations if needed. The diagram below highlights the two mentioned phases in the Edge lifecycle. -
- -![A diagram highlighting the two stages in the edge deployment lifecycle where you can apply user data.](/edge_edge-configuration_cloud-init_user-data.png) -
- -
- -- **Staging Phase** - In the staging phase, you prepare your edge hosts using the organization-level configurations. The configurations include the Edge Installer, the user data, and, optionally, a content bundle. You boot the edge hosts using the Edge Installer and apply the configurations. All the configurations, including the user data, are copied to the edge host during installation. - - Once the edge hosts are prepared with the initial installation, you ship your devices to the site for installation. This step is also called the *installer handoff* step. Refer to the [Prepare Edge Host](/clusters/edge/site-deployment/stage#prepareedgehost) guide to learn more about driving the installer handoff step. - - -- **Site Installation Phase** - In the site installation phase, you use supplementary user data to apply site-specific configurations to the edge hosts. The user data is copied to the edge host during the installation unless you follow the specific naming convention for your user data stages as described below. - - Refer to the [Multiple User Data Use Case](/clusters/edge/edgeforge-workflow/prepare-user-data#multipleuserdatausecase) guide to understand the use cases for applying supplementary user data. If you need to apply a supplementary user data, refer to the [Perform Site Install](/clusters/edge/site-deployment/site-installation) guide to learn the site installation process in detail. - - -In both steps mentioned above, the Edge Installer copies the user data configuration file provided to the **/run/stylus/userdata** file or the **/oem/userdata** file on the edge hosts. If you want to prevent some user data stages from getting copied to the edge host's storage, you can use a specific naming convention to disable the default copy behavior. However, be aware that different persistence behaviors apply depending on which stage of the Edge deployment life cycle you provide sensitive data in the user data configuration file. Refer to the [Sensitive Information in the Site Installation](#sensitiveinformationduringthesiteinstallation) section below to learn more. -
- -## Sensitive Information in the Installer Handoff - -
- - -We do not recommend inserting sensitive information in the user data configuration file provided in the installer handoff phase. Use a supplementary user data configuration file and apply it at the site installation phase. - - -
- -In the installer handoff step, the Edge Installer copies and persists *all* your user data stages into the configuration files on the edge hosts. Copying sensitive information to the edge hosts may pose security risks. Therefore, we recommend you avoid inserting sensitive information in the user data configuration file provided in the installer handoff phase. Use a supplementary user data configuration file and apply it at the site installation phase. - - -
- -## Sensitive Information in the Site Installation - -If you want to use sensitive information, such as credentials for patching the OS on your edge hosts, in any user data stage during the site installation phase. In such scenarios, you must use the `skip-copy-[string]` naming convention for your user data stages. Replace the `[string]` placeholder with any meaningful string per your requirements. The Edge Installer will skip copying the stages whose name matches the regular expression `skip-copy-*` to the edge host. The stages will execute as long as the drive containing the user data configuration file is mounted to the edge hosts. In most cases, the drive will be a bootable USB flash drive. - -For example, the `skip-copy-subscribe` stage below follows the `skip-copy-[string]` naming convention. Therefore, the Edge Installer will skip copying the stage to the **/run/stylus/userdata** file or the **/oem/userdata** file on the edge host. The stage and the sensitive information below are marked with the points of interest 1 and 2, respectively. -
- - - -```yaml -stages: - network.after: - - name: skip-copy-subscribe - if: [ -f "/usr/sbin/subscription-manager" ] - commands: - - subscription-manager register --username "myname" --password 'mypassword' -``` - - - -The stage will execute as long as you have mounted the drive containing the user data configuration file. You must unmount the drive from the edge host after the device registers with Palette and before you deploy a Kubernetes cluster on the device. \ No newline at end of file diff --git a/content/docs/04-clusters/03-edge/02-edge-configuration/35-installer-reference.md b/content/docs/04-clusters/03-edge/02-edge-configuration/35-installer-reference.md deleted file mode 100644 index 9174d52551..0000000000 --- a/content/docs/04-clusters/03-edge/02-edge-configuration/35-installer-reference.md +++ /dev/null @@ -1,411 +0,0 @@ ---- -title: "Installer Configuration" -metaTitle: "Edge Installer Configuration" -metaDescription: "Review the available Edge Installer configuration options." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - -# Overview - -The Edge Installer configuration user data accepts a parameter named `stylus`. In addition to the `stylus` parameter, the user data file also supports the use of cloud-init stages and other Kairos-supported parameters. The `stylus.site` parameter is how you primarily configure the Edge host, but you can also use cloud-init stages to customize the installation. Refer to the [General Parameters](/clusters/edge/edge-configuration/installer-reference#generalparameters) for a list of all the parameters supported in the `stylus.site` parameter block. - -
- - - -The `#cloud-config` value is a required cloud-init header required by the [cloud-init](https://cloudinit.readthedocs.io/en/latest/explanation/format.html) standard. - - - -# User Data Parameters - -## Defaults - -The Edge Installer is configured with a set of default values. - -| Parameter | Default | Description | -| --- | --- | --- | -| `PaletteEndpoint`| `api.console.spectrocloud.com`| The Palette API endpoint. | -| `Prefix`| `edge`| The default prefix to apply to the unique identifier. | -| `RegistrationURL`| `https://edge-registration-generic.vercel.app`| The URL that operators should use when registering the Edge host with Palette.| -| `disableAutoRegister`| `false` | Set to `true` if you want to disable auto registration. Refer to the [Register Edge Host](/clusters/edge/site-deployment/site-installation/edge-host-registration) reference page to learn more about Edge host registrations.| - -The default values assume you are installing the Edge host in an environment without a network proxy, do not require remote access to the Edge host, and are using Palette SaaS. If you have requirements different from the default values, you must provide the Edge Installer with additional information. - -You can provide the installer with additional configuration values in the user data configuration file. The following table contains all the supported user data parameters the installer accepts. - -## Debug Parameters - -You can enable the `debug` and `trace` parameters when you need to troubleshoot Edge Installer issues. - -| Parameter | Description | -| --- | --- | -| `debug` | Enable this parameter for debug output. Allowed values are `true` or `false`. Default value is `false`. | -| `trace` | Enable this parameter to display trace output. Allowed values are `true` or `false`. Default value is `false`.| -| `imageOverride`| You can specify a different Edge Installer image versus the default image. | - -```yaml -#cloud-config -stylus: - debug: true - trace: true - imageOverride: "example.com/example-installer:v1.4.0" -``` - -## Install Mode - -You can specify the mode the Edge Installer should prepare the installation for. The Edge Installer supports two different modes. - -
- -- Connected: The site has internet connectivity and the installation is initiated through Palette. - - -- Air-Gapped: The site does not have internet connectivity. The Installation is initiated through the Palette Edge CLI. - - -| Parameter | Description | -| --- | --- | -| `installationMode` | Allowed values are `connected`. Default value is `connected`. | - -
- -```yaml -#cloud-config -stylus: - installationMode: "connected" -``` - - - - -## External Registry - -You can point the Edge Installer to a non-default registry to load content from another source. Use the `registryCredentials` parameter object to specify the registry configurations. - - -| Parameter | Description | -|----------------------|--------------------------------------------------------| -| `domain` | The domain of the registry. You can use an IP address plus the port or a domain name. | -| `username` | The username to authenticate with the registry. | -| `password` | The password to authenticate with the registry. | -| `insecure` | Whether to allow insecure connections to the registry. Default value is `false`. | - - -
- - -```yaml -#cloud-config -stylus: - registryCredentials: - domain: 10.10.254.254:8000/spectro-images - username: ubuntu - password: - insecure: true -``` - -## Site Parameters - -The `stylus.site` blocks accept the following parameters. - -| Parameter | Description | -| --- | --- | -| `paletteEndpoint` | The URL endpoint that points to Palette. Example: `api.spectrocloud.com` | -| `edgeHostToken` | A token created at the tenant scope that is required for auto registration. | -| `projectUid` | The id of the project the Edge host will belong to. | -| `projectName` | The name of the project. -| `name` | If you do not specify an Edge hostname, the system will generate one from the serial number of the device. If Stylus is unable to identify the serial number, it will generate a random ID instead. In cases where the hardware does not have a serial number, we suggest that you specify a value so there is minimal chance of duplication. Use the value `"$random"` to generate a random ID. You can also use the `DeviceUIDPaths` to read in a value from a system file. | -| `prefix` | The system assigns a prefix value to the device UID generated by the Edge Installer. By default, this value is set to `edge`. | -| `network` | The network configuration settings. Review the [Site Network Parameters](/clusters/edge/edge-configuration/installer-reference#sitenetworkparameters) below for more details. | -| `registrationURL` | The URL that operators should use to register the Edge host with Palette. | -| `insecureSkipVerify` | This controls whether or not a client verifies the server's certificate chain and host name. | -| `caCerts` | The Secure Sockets Layer (SSL) certificate authority (CA) certificates.| -| `clusterId` | The id of the host cluster the edge host belongs to. | -| `clusterName` | The name of the host cluster the edge host belongs to. | -| `tags` | A parameter object you use to provide optional key-value pairs. Refer to the [Tags](#tags) section to learn more. | -| `tagsFromFile` | Specify tags from a file. Refer to the [Tags](#tags) section to learn more. | -| `tagsFromScript` | Use a script to generate the tags. Refer to the [Tags](#tags) section to learn more. | -| `deviceUIDPaths` | Specify the file path for reading in product or board serial that can be used to set the device ID. The default file path is **/sys/class/dmi/id/product_uuid**. Refer to the [Device ID (UID) Parameters](/clusters/edge/edge-configuration/installer-reference#deviceid(uid)parameters) section to learn more.| - -## Site Network Parameters - -Use the site network parameters to configure network settings so the edge host can communicate with Palette. - -| Parameter | Description | -| --- | --- | -| `siteNetwork.httpProxy` | The URL of the HTTP proxy endpoint. | -| `siteNetwork.httpSProxy` | The URL of the HTTPS proxy endpoint. | -| `siteNetwork.noProxy` | The list of IP addresses or CIDR ranges to exclude routing through the network proxy. | -| `siteNetwork.interfaces` | The network settings respective to the interfaces. Review the [Network Parameters](/clusters/edge/edge-configuration/installer-reference#networkparameters) table below for more details. | -| `siteNetwork.nameserver` | The IP address of the global DNS nameserver that requests should be routed to. | - -## Network Parameters - -Network settings specific to the network interface of the edge host. You can configure multiple interfaces. - -| Parameter | Description | -| --- | --- | -| `networkInterface.ipAddress` | The assigned IP address to the network interface. | -| `networkInterface.mask` | The network mask for the assigned IP address. | -| `networkInterface.type` | Defines how the IP address is assigned. Allowed values are `dhcp` or `static`. Defaults to `dhcp`. | -| `networkInterface.gateway` | The network gateway IP address. | -| `networkInterface.nameserver` | The IP address of the DNS nameserver this interface should route requests to.| - -## Device ID (UID) Parameters - -The device ID is generated by a specific priority sequence. The below table outlines the priority order from top to bottom when generating a UID for the Edge host. The UID generation starts with priority one, the device `name`, followed by attributes within the `deviceUIDPAths`, and lastly generating a random UUID if all other methods are unsuccessful. - -| Priority | Method | Description | -|----------|------------------------------------------------|-------------------------------------------------------------------------------------------------------| -| 1 | `name` | The device name is used as the primary identifier for the Edge host. | -| 2 | `deviceUIDPaths` | Specifies the paths and associated regular expressions to extract the UID. | -| 3 | `"$random"` | Assigns a random UUID as the Edge host ID. | - - -By default, the product UID path is set to `/sys/class/dmi/id/product_uuid`. To modify this path and use other attributes within the same folder, such as the product or board serial, use the `regex` parameter. For example, instead of the default path **/sys/class/dmi/id/product_uuid**, you can use the board Serial Number path **/sys/class/dmi/id/board_serial** by applying a `regex` parameter. Refer to the [regex syntax](https://github.com/google/re2/wiki/Syntax) reference guide to learn more. - -| Parameter | Description | -|-------------------|-------------------------------------------------------| -| `name` | The path of the file containing the UID. | -| `regex` | The regular expression pattern to match the UID. | - -You can use the `regex` parameter to remove unsupported characters from attributes to Refer to the warning box below for a list of unsupported characters. - -
- -```yaml -#cloud-config -stylus: - site: - deviceUIDPaths: - - name: /etc/palette/metadata-regex - regex: "edge.*" -``` - -
- - - -The length of the UID truncates to a maximum allowed length of 128 characters. The following characters are unsupported: - -`/ ? # & + % , $ ~ ! @ * () {} | = ; : <> ' . ^ "` - - - - -## Tags - -You can assign tags to the Edge host by specifying tags manually in the configuration file. The tags object accepts key-value pairs. The following example shows how to assign tags manually to the Edge host. - -
- -```yaml -#cloud-config -stylus: - site: - tags: - env: prod - department: engineering -``` - - -You can also specify tags through alternative methods that are more dynamic, such as reading in tags from a file or from a script that returns a JSON object. You can combine the various methods to provide tags to the Edge host. The following sections describe the various methods you can use to provide tags dynamically to the Edge host. - -
- - - -The order of precedence for tags is as follows: -
- -1. Manually provided tags - `tags`. - -2. Tags from a script - `tagsFromScript`. - -3. Tags from a file - `tagsFromFile`. - -Tags from higher priority orders override tags from lower priority. For example, if you specify a tag manually and also specify the same tag in a `tagsFromFile`, the tag from the `tag` object is what the Edge installer will use. - -
- -
- -### Tags From a File - -You can specify tags from a file by using the `tagsFromFile` parameter object. The `tagsFromFile` parameter object accepts the following parameters. - -| Parameter | Description | Default Value | -| --- | --- | --- | -| `fileName` | The path to the file containing the tags. | `''` | -| `delimiter` | The delimiter used to separate the key-value pairs. | `\n` | -| `separator` | The separator used to separate the key from the value. | `:` | - -
- -```yaml -#cloud-config -stylus: - site: - tags: - department: 'sales' - tagsFromFile: - fileName: "/etc/palette/tags.txt" - delimiter: ";" - separator: ":" -``` - -Example: - -You can specify different delimiters and separators to parse the content depending on how the content is formatted. Assume the file **/etc/palette/tags.txt** contains the following content. - -
- -```text hideClipboard -Location:Mumbai,India; Latitude:48.856614; Longitude:2.352221; owner:p78125d -``` - -
- -### Tags From a Script - -You can specify tags from a script by using the `tagsFromScript` parameter object. The script must be executable and return a JSON object that contains the tags in the following format. - -
- -```json hideClipboard -{ - "key": "value", -} -``` - -Example: - -
- -```json -{ - "department": "sales", - "owner": "p78125d" -} -``` - - - -The `tagsFromScript` parameter object accepts the following parameters. - -| Parameter | Description | Default Value | -| --- | --- | --- | -| `scriptName` | The path to the script that returns a JSON object. | `''` | -| `timeout` | The timeout value in seconds. | `60` | - -
- -```yaml -#cloud-config -stylus: - site: - tags: - department: 'sales' - tagsFromScript: - scriptName: "/etc/palette/tags.py" - timeout: 60 -``` - - -# Installer Example Configuration - -The following example shows how user data configuration is used to customize the Edge host installation process. - -
- -```yaml -#cloud-config -stylus: - site: - paletteEndpoint: api.spectrocloud.com - edgeHostToken: yourEdgeRegistrationTokenHere - projectUid: 12345677788 - tags: - env: east - terraform_managed: true - os: ubuntu - name: edge-59d3f182-35fe-4e10-b0a0-d7f761f1a142 - - network: - httpProxy: http://proxy.example.com - httpsProxy: https://proxy.example.com - noProxy: 10.10.128.10,10.0.0.0/8 - nameserver: 1.1.1.1 - interfaces: - enp0s3: - type: static - ipAddress: 10.0.10.25/24 - gateway: 10.0.10.1 - nameserver: 10.10.128.8 - enp0s4: - type: dhcp - caCerts: - - | - ------BEGIN CERTIFICATE------ - ***************************** - ***************************** - ------END CERTIFICATE------ - - | - ------BEGIN CERTIFICATE------ - ***************************** - ***************************** - ------END CERTIFICATE------ -``` - -
- - - -Check out the [Prepare User Data](/clusters/edge/edgeforge-workflow/prepare-user-data) resource for more examples. - - - - -# Additional Configurations - -The Edge Installer will honor other Kairos parameters, such as `install`, and `options`. To learn more about Kairos parameters, refer to the [Kairos configuration](https://kairos.io/docs/reference/configuration/) page. - -The following is an example Edge installer configuration that is using the `install` parameter block to power off the device upon completion of the installation process. - -
- -```yaml -#cloud-config -stylus: - site: - paletteEndpoint: api.spectrocloud.com - registrationURL: https://edge-registration.vercel.app - projectUid: yourProjectIdHere - edgeHostToken: yourEdgeRegistrationTokenHere - tags: - myTag: myValue - myOtherTag: myOtherValue - tagsFromScript: - scriptName: /etc/palette/tags.sh - timeout: 30 - reboot: false - -stages: - initramfs: - - users: - palette: - groups: - - sudo - passwd: palette - -install: - poweroff: true -``` diff --git a/content/docs/04-clusters/03-edge/04-edgeforge-workflow.md b/content/docs/04-clusters/03-edge/04-edgeforge-workflow.md deleted file mode 100644 index 4d214a7fed..0000000000 --- a/content/docs/04-clusters/03-edge/04-edgeforge-workflow.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: "EdgeForge Workflow" -metaTitle: "Edge Artifact Builder Workflow" -metaDescription: "Learn how to build your own Edge artifacts customized to your specific needs." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -*EdgeForge* is the process or workflow of preparing an Edge host with all the required components and dependencies. The EdgeForge workflow contains several steps and key elements that you must complete to ensure the Edge host is ready for a successful site deployment. - -EdgeForge contains three critical components. - -
- -* Edge Installer ISO. - - -* Edge Host Agent Container Image. - - -* Edge Provider Container Images. - - - -Each component plays a critical role in the [lifecycle](/clusters/edge/edge-native-lifecycle) of an Edge deployment. Review the [Edge Artifacts](/clusters/edge/edgeforge-workflow#edgeartifacts) section to learn more about each component. - -![A diagram that displays the relationship between the three components and how they relate to an Edge host](/clusters_edge-forge-workflow_edgeforge-workflow_components-diagram.png) - - -# Get Started - - -To start building a custom Edge artifact, use the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide. - - -
- -# Edge Artifacts - -## Edge Installer ISO - -An ISO file that bootstraps the installation is created in the EdgeForge process. The ISO image contains the Edge Installer that installs the Palette Edge host agent and metadata to perform the initial installation. - -
- - ![A diagram breaking up the internal components of the ISO image](/clusters_edge_edgeforge-workflow_iso-diagram.png) - - - -## Edge Host Agent Container Image - -The Edge host agent container image contains the Palette Edge host agent. The agent is responsible for Day-2 operations and management of the Edge host. The Edge host agent also provides ongoing support during cluster runtime. - -
- -## Edge Provider Container Images - -These are [Kairos](https://kairos.io/)-based container images for each supported Operating System (OS) and Kubernetes combination. These container images are downloaded during the installation by the Edge Installer and converted to disk images for the system to boot into. - -Palette provides these artifacts out-of-the-box. All the container images are hosted in Palette's public container registries, or a private self-hosted OCI registry and automatically downloaded during installation. You can use the default Palette container registries to familiarize yourself with the installation process. However, in a typical production scenario, you would need to customize these artifacts to suit your specific needs or perform some [content bundle](/clusters/edge/edgeforge-workflow/build-content-bundle) optimization. - -
- - ![A diagram breaking up the internal components of the Edge Provider container images](/clusters_edge_edgeforge-workflow_provider-diagram.png) - - - -
- - - -You can specify a custom registry for the Edge Installer to use during installation with the user data parameter `registryCredentials`. Refer to the [Installer Configuration](/clusters/edge/edge-configuration/installer-reference#externalregistry) reference resource for more details. - - - -
- - - - - - - -# Deployment Scenarios - -The Edge Installer supports various deployment scenarios. You can customize your Edge host deployment by using the Edge Installer configuration user data, creating content bundles, and creating a custom Edge artifact. Below are a few common scenarios that organizations encounter when deploying an Edge host that requires customization. If you have a similar scenario, use the CLIs to help you with the customization. - -
- -- **Additional Packages**: -You may need to install additional OS packages for your specific needs, such as an NVIDIA driver or a network package essential for your hardware to establish an outgoing network connection. These additional OS packages would need to be added to the Edge Installer and the Edge Provider images. - - -- **Installer OS Restriction**: -Palette's out-of-the-box Edge Installer is based on the OpenSUSE OS. If you want to install an Ubuntu or an RHEL-based Edge cluster, you may need an Edge Installer based on another OS. - - -- **Optimize Bandwidth**: -In your Edge environments, you may have internet connectivity but limited bandwidth. You can optimize the installation process by embedding all the required components such as the Edge Host Container Image, the Edge Provider Container Images, and content bundles into the Edge Installer. By embedding the required components in the Edge Installer, you remove the need to download the components during installation. - - -- **Bootstrap Install Configuration**: -You can embed the Edge Installer configuration user data into the Edge Installer. This removes the need to create separate user data uploaded as an ISO through a USB drive. Check out the [Prepare User Data](/clusters/edge/edgeforge-workflow/prepare-user-data) guide to learn more about user data and when to use multiple user data files. - - -- **Bring Your Own OS (BYOOS)**: -For environments that require a different runtime OS, you can specify another OS through the [BYOOS](/integrations/byoos) option. Follow the instructions in the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide to learn more about how you can customize the OS used in an Edge deployment. - -
- -# Resources - - -- [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) - - -- [Build Preloaded Content Bundles](/clusters/edge/edgeforge-workflow/build-content-bundle) - - -- [Prepare User Data](/clusters/edge/edgeforge-workflow/prepare-user-data) \ No newline at end of file diff --git a/content/docs/04-clusters/03-edge/04-edgeforge-workflow/03-prepare-user-data.md b/content/docs/04-clusters/03-edge/04-edgeforge-workflow/03-prepare-user-data.md deleted file mode 100644 index ee131f3f71..0000000000 --- a/content/docs/04-clusters/03-edge/04-edgeforge-workflow/03-prepare-user-data.md +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: "Prepare User Data" -metaTitle: "Prepare User Data - Stage user data" -metaDescription: "Learn about building your staging user data" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -The Edge Installer supports using a custom configuration file in the format of a YAML that you can use to customize the installation process. You can provide the customized configuration to the Edge Installer as a user data file. - -
- - - - -Review the Edge [Install Configuration](/clusters/edge/edge-configuration/installer-reference) resource to learn more about all the supported configuration parameters you can use in the configuration user data. - - - -You can also use the Operating System (OS) pack to apply additional customization using cloud-init stages. Both the Edge Installer configuration file and the OS pack support the usage of cloud-init stages. Refer to the [Cloud-Init Stages](/clusters/edge/edge-configuration/cloud-init) to learn more. - -# User Data Samples - -You may encounter the following scenarios when creating an Edge Installer configuration user data file. Use these examples as a starting point to help you create user data configurations that fit your needs. - -## Sample User Data -```yaml -#cloud-config -stylus: - site: - # The Palette API endpoint to use. The default value is api.spectrocloud.com. - paletteEndpoint: api.spectrocloud.com - - # The edgeHostToken is an auto-registration token to register the Edge host with Palette upon bootup. - # This token can be generated by navigating to the Tenant Settings -> Registration Token. - # Specify a name and select the project id with which the Edge host should register itself. - edgeHostToken: aUAxxxxxxxxx0ChYCrO - - # The Palette project ID the Edge host should pair with. This is an optional field if an edgeHostToken is used and the token was assigned to a project. - projectUid: 12345677788 - # Tags which that will be assigned to the device as labels. - tags: - key1: value1 - key2: value2 - key3: value3 - - # The device's name, may also be referred to as the Edge ID or Edge host ID. If no Edge hostname is specified, - # a hostname will be generated from the device serial number. If the Edge Installer cannot identify the device serial number, then a random ID will - # be generated and used instead. In the case of hardware that does not have a serial number, we recommended specifying a - # random name, with minimal chances of being re-used by a different Edge host. - name: edge-appliance-1 - - # Optional - # If the Edge host requires a proxy to connect to Palette or to pull images, then specify the proxy information in this section - network: - # configures http_proxy - httpProxy: http://proxy.example.com - # configures https_proxy - httpsProxy: https://proxy.example.com - # configures no_proxy - noProxy: 10.10.128.10,10.0.0.0/8 - - # Optional: configures the global nameserver for the system. - nameserver: 1.1.1.1 - # configure interface specific info. If omitted all interfaces will default to dhcp - interfaces: - enp0s3: - # type of network dhcp or static - type: static - # Ip address including the mask bits - ipAddress: 10.0.10.25/24 - # Gateway for the static ip. - gateway: 10.0.10.1 - # interface specific nameserver - nameserver: 10.10.128.8 - enp0s4: - type: dhcp - caCerts: - - | - ------BEGIN CERTIFICATE------ - ***************************** - ***************************** - ------END CERTIFICATE------ - - | - ------BEGIN CERTIFICATE------ - ***************************** - ***************************** - ------END CERTIFICATE------ - -# There is no password specified to the default kairos user. You must specify authorized keys or passwords to access the Edge host console. -stages: - initramfs: - - users: - kairos: - groups: - - sudo - passwd: kairos -``` - -## Connected Sites - Multiple User Data Configuration - -In this example, two configuration user user data files are used. The first one is used in the staging phase and is included with the Edge Installer image. Note how the first user data contains the registration information and creates a user group. A bootable USB stick applies the second user data at the physical site. The secondary user data includes network configurations specific to the edge location. - -**Staging** - included with the Edge Installer. - -```yaml -#cloud-config -stylus: - site: - paletteEndpoint: api.spectrocloud.com - edgeHostToken: - tags: - city: chicago - building: building-1 - -install: - poweroff: true - -stages: - initramfs: - - users: - kairos: - groups: - - sudo - passwd: kairos -``` - -**Site** - supplied at the edge location through a bootable USB drive. If specified, the `projectName` value overrides project information specified in the `edgeHostToken` parameter. You can add optional tags to identify the city, building, and zip-code. If the edge site requires a proxy for an outbound connection, provide it in the network section of the site user data. - -```yaml -#cloud-config -stylus: - site: - projectName: edge-sites - tags: - zip-code: 95135 -``` - -## Connected Sites - Single User Data - -This example configuration is for a *connected site*. -In this scenario, only a single Edge Installer configuration user data is used for the entire deployment process. - -
- -```yaml -#cloud-config -stylus: - site: - paletteEndpoint: api.spectrocloud.com - edgeHostToken: - projectName: edge-sites - tags: - city: chicago - building: building-1 - zip-code: 95135 - -install: - poweroff: true - -stages: - initramfs: - - users: - kairos: - groups: - - sudo - passwd: kairos -``` - -## Apply Proxy & Certificate Settings - -This example showcases how you can include network settings in a user data configuration. - -```yaml -#cloud-config -stylus: - site: - paletteEndpoint: api.spectrocloud.com - edgeHostToken: - projectName: edge-sites - tags: - city: chicago - building: building-1 - zip-code: 95135 - network: - httpProxy: http://proxy.example.com - httpsProxy: https://proxy.example.com - noProxy: 10.10.128.10,10.0.0.0/8 - nameserver: 1.1.1.1 - # configure interface specific info. If omitted all interfaces will default to dhcp - interfaces: - enp0s3: - # type of network dhcp or static - type: static - # Ip address including the mask bits - ipAddress: 10.0.10.25/24 - # Gateway for the static ip. - gateway: 10.0.10.1 - # interface specific nameserver - nameserver: 10.10.128.8 - enp0s4: - type: dhcp - caCerts: - - | - ------BEGIN CERTIFICATE------ - ***************************** - ***************************** - ------END CERTIFICATE------ - - | - ------BEGIN CERTIFICATE------ - ***************************** - ***************************** - ------END CERTIFICATE------ - -install: - poweroff: true - -stages: - initramfs: - - users: - kairos: - groups: - - sudo - passwd: kairos -``` - -## Load Content From External Registry - -In this example, content is downloaded from an external registry. - -```yaml -#cloud-config -stylus: - registryCredentials: - domain: 10.10.254.254:8000/spectro-images - username: ubuntu - password: - insecure: true - site: - debug: true - insecureSkipVerify: false - paletteEndpoint: api.console.spectrocloud.com - name: edge-appliance-1 - caCerts: - - | - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- - -install: - poweroff: false - -stages: - initramfs: - - users: - kairos: - groups: - - sudo - passwd: kairos -``` - - -# Multiple User Data Use Case - -If you don't need to apply any unique configurations on the device once it arrives at the physical site, then your site deployment flow would look like the following. - -![The flow of an install process not requiring additional customization](/clusters_site-deployment_prepare-edge-configuration_install-flow.png) - -Should you need to apply different configurations once the device arrives at the physical site, you can use a secondary user data to support this use case. - -Use the additional user data to override configurations from the previous user data that was flashed into the device or to inject new configuration settings. Using secondary user data at the physical site is a common pattern for organizations that need to change settings after powering on the Edge host at the physical location. - -To use additional user data, create a bootable device, such as a USB stick, that contains the user data in the form of an ISO image. The Edge Installer will consume the additional user data during the installation process. - -![The flow of an install process with an additional customization occurring at the physical site. The additional customization is using a USB stick to upload the new user data.](/clusters_site-deployment_prepare-edge-configuration_install-flow-with-more-user-data.png) - -When creating your Edge Installer, you can embed the user data into the installer image to eliminate providing it via a USB drive. - -In the staging phase, you may identify user data parameter values that apply uniformly to all your edge sites. But you may also have some edge locations that require different configurations such as site network proxy, site certs, users and groups, etc. -Site-specific configurations are typically not included in the Edge installer image. For the latter scenario, you can use a secondary user data configuration. Refer to the [Apply Site User Data](/clusters/edge/site-deployment/site-installation/site-user-data) guide to learn more about applying secondary site-specific user data. - -
- - - -For your initial testing, your user data may include global settings and site-specific properties in a single user data. As you gain more experience, you should evaluate whether secondary site-specific user data is a better design for your use case. - - - - - -# Next Steps - -The last step of the EdgeForce workflow is to build the Edge artifacts. Check out the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide to learn how to create the Edge artifacts. diff --git a/content/docs/04-clusters/03-edge/04-edgeforge-workflow/03.5-palette-canvos.md b/content/docs/04-clusters/03-edge/04-edgeforge-workflow/03.5-palette-canvos.md deleted file mode 100644 index 632a75438a..0000000000 --- a/content/docs/04-clusters/03-edge/04-edgeforge-workflow/03.5-palette-canvos.md +++ /dev/null @@ -1,816 +0,0 @@ ---- -title: "Build Edge Artifacts" -metaTitle: "Build Edge Artifacts" -metaDescription: "Learn how to build Edge artifacts, such as the Edge Installer ISO and provider images using Spectro Cloud's CanvOS utility." -icon: "" -hideToC: false -fullWidth: false -hideToCSidebar: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from "shared/components/common/PointOfInterest"; - -# Build Edge Artifacts -Palette's Edge solution requires Edge hosts to be ready with the required dependencies and [user data](/clusters/edge/edge-configuration/installer-reference) configurations before deploying a Kubernetes cluster. An Edge host requires the following artifacts to prepare for successful cluster deployment: - -
- -* **Edge installer ISO image** - This bootable ISO image installs the necessary dependencies and configurations on a bare host machine. During installation, the host machine will boot from the Edge installer ISO, partition the disk, copy the image content to the disk, install the Palette Edge host agent and metadata, and perform several configuration steps. These configuration steps include registering the host with Palette, setting user privileges, and configuring network or security settings. -
- -* **Provider Images** - These are [Kairos](https://kairos.io/)-based images containing the OS and the desired Kubernetes versions. These images install an immutable Operating System (OS) and software dependencies compatible with a specific Kubernetes version at runtime, i.e., during the cluster deployment. A provider image is used in the OS and the Kubernetes layer when creating a cluster profile. - - -In this guide, you will use the utility, [CanvOS](https://github.com/spectrocloud/CanvOS/blob/main/README.md), to build an Edge installer ISO image and provider images for all the Palette-supported Kubernetes versions. The utility builds multiple provider images, so you can use either one that matches the desired Kubernetes version you want to use with your cluster profile. - - -The diagram below shows the high-level steps to building the Edge artifacts and pushing the provider images to an image registry. - - - -![Overarching diagram showing the workflow in the current guide.](/tutorials/palette-canvos/clusters_edge_palette-canvos_artifacts.png) - - -This guide presents two workflows - Basic and Advanced. - -The basic workflow has minimal customizations and offers a quick start to build Edge artifacts. This workflow builds an Ubuntu based Edge installer ISO and provider images. You will also push the provider images to the default image registry, [ttl.sh](https://ttl.sh/). - -The advanced workflow uses more customization options. This workflow builds an openSUSE based Edge installer ISO and provider images. You will push the provider images to your Docker Hub image registry. - -You can follow either of the workflows below that suits your use case. -
- - - - -
- -## Prerequisites - -To complete this basic guide, you will need the following items: -
- -* A physical or virtual Linux machine with *AMD64* (also known as *x86_64*) processor architecture to build the Edge artifacts. You can issue the following command in the terminal to check your processor architecture. -
- - ```bash - uname -m - ``` - -* Minimum hardware configuration of the Linux machine: - - 4 CPU - - 8 GB memory - - 50 GB storage - - -* [Git](https://cli.github.com/manual/installation). You can ensure git installation by issuing the `git --version` command. - - -* [Docker Engine](https://docs.docker.com/engine/install/) version 18.09.x or later. You can use the `docker --version` command to view the existing Docker version. You should have root-level or `sudo` privileges on your Linux machine to create privileged containers. - - -* A [Spectro Cloud](https://console.spectrocloud.com) account. If you have not signed up, you can sign up for a [free trial](https://www.spectrocloud.com/free-tier/). - - -* Palette registration token for pairing Edge hosts with Palette. You will need tenant admin access to Palette to generate a new registration token. For detailed instructions, refer to the [Create Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide. - - - -## Instructions - -Use the following instructions on your Linux machine to create all the required Edge artifacts with minimal customization. -
- -1. Check out the [CanvOS](https://github.com/spectrocloud/CanvOS) GitHub repository containing the starter code. - -
- - ```bash - git clone https://github.com/spectrocloud/CanvOS.git - ``` - - - - - -2. Change to the **CanvOS/** directory. -
- - ```bash - cd CanvOS - ``` - - -3. View the available [git tag](https://github.com/spectrocloud/CanvOS/tags). -
- - ```bash - git tag - ``` - - -4. Check out the newest available tag. This guide uses **v3.4.3** tag as an example. -
- - ```shell - git checkout v3.4.3 - ``` - - -5. Review the files relevant for this guide. - - **.arg.template** - A sample **.arg** file that defines arguments to use during the build process. - - **Dockerfile** - Embeds the arguments and other configurations in the image. - - **Earthfile** - Contains a series of commands to create target artifacts. - - **earthly.sh** - Script to invoke the Earthfile, and generate target artifacts. - - **user-data.template** - A sample user-data file. -
- - -6. Issue the command below to assign an image tag value that will be used when creating the provider images. This guide uses the value `palette-learn` as an example. However, you can assign any lowercase and alphanumeric string to the `CUSTOM_TAG` argument. -
- - ```bash - export CUSTOM_TAG=palette-learn - ``` -
- -7. Issue the command below to create the **.arg** file containing the custom tag. The remaining arguments in the **.arg** file will use the default values. For example, `ubuntu` is the default operating system, `demo` is the default tag, and [ttl.sh](https://ttl.sh/) is the default image registry. Refer to the existing **.arg.template** file in the current directory or the [README](https://github.com/spectrocloud/CanvOS#readme) to learn more about the available customizable arguments. -
- - - - The default ttl.sh image registry is free and does not require a sign-up. Images pushed to ttl.sh are ephemeral and will expire after the 24 hrs time limit. Should you need to use a different image registry, refer to the Advanced workflow in the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide. - - - - Using the arguments defined in the **.arg** file, the final provider images you generate will have the following naming convention, `[IMAGE_REGISTRY]/[IMAGE_REPO]:[CUSTOM_TAG]`. For example, one of the provider images will be `ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-demo`. -
- - ```bash - cat << EOF > .arg - CUSTOM_TAG=$CUSTOM_TAG - IMAGE_REGISTRY=ttl.sh - OS_DISTRIBUTION=ubuntu - IMAGE_REPO=ubuntu - OS_VERSION=22 - K8S_DISTRIBUTION=k3s - ISO_NAME=palette-edge-installer - PE_VERSION=$(git describe --abbrev=0 --tags) - platform=linux/amd64 - EOF - ``` - - View the newly created file to ensure the customized arguments are set correctly. -
- - ```bash - cat .arg - ``` -
- -8. Issue the command below to save your tenant registration token to an environment variable. Replace `[your_token_here]` with your actual registration token. -
- - ```bash - export token=[your_token_here] - ``` -
- -9. Use the following command to create the **user-data** file containing the tenant registration token. Also, you can click on the *Points of Interest* numbers below to learn more about the main attributes relevant to this example. -
- - - - ```shell - cat << EOF > user-data - #cloud-config - stylus: - site: - edgeHostToken: $token - install: - poweroff: true - users: - - name: kairos - passwd: kairos - EOF - ``` - - -
- - View the newly created user data file to ensure the token is set correctly. -
- - ```bash - cat user-data - ``` -
- -10. The CanvOS utility uses [Earthly](https://earthly.dev/) to build the target artifacts. Issue the following command to start the build process. -
- - ```bash - sudo ./earthly.sh +build-all-images - ``` - - ```bash coloredLines=2-2 hideClipboard - # Output condensed for readability - ===================== Earthly Build SUCCESS ===================== - Share your logs with an Earthly account (experimental)! Register for one at https://ci.earthly.dev. - ``` -
- - - - If you plan to build Edge artifacts using a content bundle, use the `+build-provider-images` option instead of the `+build-all-images` option in the command above. The command, `sudo ./earthly.sh +build-provider-images`, will build the provider images but not the Edge installer ISO. - - - - This command may take up to 15-20 minutes to finish depending on the resources of the host machine. Upon completion, the command will display the manifest, as shown in the example below, that you will use in your cluster profile later in this tutorial. Note that the `system.xxxxx` attribute values in the manifest example are the same as what you defined earlier in the **.arg** file. - - Copy and save the output attributes in a notepad or clipboard to use later in your cluster profile. -
- - ```bash - pack: - content: - images: - - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" - options: - system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" - system.registry: ttl.sh - system.repo: ubuntu - system.k8sDistribution: k3s - system.osName: ubuntu - system.peVersion: v3.4.3 - system.customTag: demo - system.osVersion: 22 - ``` -
- - -11. List the Docker images to review the provider images created. By default, provider images for all the Palette's Edge-supported Kubernetes versions are created. You can identify the provider images by reviewing the image tag value you used in the **.arg** file's `CUSTOM_TAG` argument. -
- - ```shell - docker images --filter=reference='*/*:*palette-learn' - ``` - - ```bash coloredLines=3-4 hideClipboard - # Output - REPOSITORY TAG IMAGE ID CREATED SIZE - ttl.sh/ubuntu k3s-1.25.2-v3.4.3-palette-learn b3c4956ccc0a 6 minutes ago 2.49GB - ttl.sh/ubuntu k3s-1.24.6-v3.4.3-palette-learn fe1486da25df 6 minutes ago 2.49GB - ``` -
- - -12. To use the provider images in your cluster profile, push them to the image registry mentioned in the **.arg** file. The current example uses the [ttl.sh](https://ttl.sh/) image registry. This image registry is free to use and does not require a sign-up. Images pushed to *ttl.sh* are ephemeral and will expire after the 24 hrs time limit. Use the following commands to push the provider images to the *ttl.sh* image registry. -
- - ```bash - docker push ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-palette-learn - docker push ttl.sh/ubuntu:k3s-1.24.6-v3.4.3-palette-learn - ``` -
- - - - As a reminder, [ttl.sh](https://ttl.sh/) is a short-lived image registry. If you do not use these provider images in your cluster profile within 24 hours of pushing to *ttl.sh*, they will expire and must be re-pushed. Refer to the Advanced workflow in the current guide to learn how to use another registry, such as Docker Hub, and tag the docker images accordingly. - - -
- - -13. After pushing the provider images to the image registry, open a web browser and log in to [Palette](https://console.spectrocloud.com). Ensure you are in the **Default** project scope before creating a cluster profile. - - -14. Navigate to the left **Main Menu** and select **Profiles**. Click on the **Add Cluster Profile** button, and fill out the required basic information fields to create a cluster profile for Edge. - - -15. Add the following [BYOS Edge OS](/integrations/byoos) pack to the OS layer in the **Profile Layers** section. - - |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| - |---|---|---|---| - |OS|Public Repo|BYOS Edge OS|`1.0.0`| - - -16. Replace the the cluster profile's BYOOS pack manifest with the following custom manifest so that the cluster profile can pull the provider image from the ttl.sh image registry. - - The `system.xxxxx` attribute values below refer to the arguments defined in the **.arg** file. If you modified the arguments in the **.arg** file, you must modify the attribute values below accordingly. -
- - ```yaml - pack: - content: - images: - - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" - options: - system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" - system.registry: ttl.sh - system.repo: ubuntu - system.k8sDistribution: k3s - system.osName: ubuntu - system.peVersion: v3.4.3 - system.customTag: demo - system.osVersion: 22 - ``` - The screenshot below displays how to reference a provider image in the BYOOS pack of your cluster profile. - - ![Screenshot of a sample cluster profile's OS layer ](/tutorials/palette-canvos/clusters_edge_palette-canvos_edit_profile.png) -
- - - - The BYOOS pack's `system.uri` attribute references the Kubernetes version selected in the cluster profile by using the `{{ .spectro.system.kubernetes.version }}` [macro](/clusters/cluster-management/macros). This is how the provider images you created and pushed to a registry are tied to the OS and Kubernetes version you selected in the **.arg** file. - - - -17. Add the following **Palette Optimized K3s** pack to the Kubernetes layer of your cluster profile. Select the k3s version 1.25.x because earlier in this how-to guide, you pushed a provider image compatible with k3s v1.25.2 to the ttl.sh image registry. - - |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| - |---|---|---|---| - |Kubernetes|Public Repo|Palette Optimized k3s|`1.25.x`| - - -18. Add the network layer to your cluster profile, and choose a Container Network Interface (CNI) pack that best fits your needs, such as Calico, Flannel, Cilium, or Custom CNI. For example, you can add the following network layer. This step completes the core infrastructure layers in the cluster profile. - - |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| - |---|---|---|---| - |Network|Public Repo|Calico|`3.25.x`| - - -19. Add add-on layers and manifests to your cluster profile per your requirements. - - -20. If there are no errors or compatibility issues, Palette displays the newly created complete cluster profile for review. Verify the layers you added, and finish creating the cluster profile. -
- - -## Validate -List the Edge installer ISO image and checksum by issuing the following command from the **CanvOS/** directory. -
- -```shell -ls build/ -``` - -```shell hideClipboard -# Output -palette-edge-installer.iso -palette-edge-installer.iso.sha256 -``` - -You can validate the ISO image by creating a bootable USB flash drive using any third-party software and attempting to flash a bare host machine. Most software that creates a bootable USB drive will validate the ISO image. Here, the flash process means installing the necessary tools and configurations on a host machine. - - -
- - -
- -## Prerequisites - -To complete this advanced guide, you will need the following items: -
- -* A physical or virtual Linux machine with *AMD64* (also known as *x86_64*) processor architecture to build the Edge artifacts. You can issue the following command in the terminal to check your processor architecture. -
- - ```bash - uname -m - ``` - -* Minimum hardware configuration of the Linux machine: - - 4 CPU - - 8 GB memory - - 50 GB storage - - -* [Git](https://cli.github.com/manual/installation). You can ensure git installation by issuing the `git --version` command. - - -* [Docker Engine](https://docs.docker.com/engine/install/) version 18.09.x or later. You can use the `docker --version` command to view the existing Docker version. You should have root-level or `sudo` privileges on your Linux machine to create privileged containers. - - -* A [Spectro Cloud](https://console.spectrocloud.com) account. If you have not signed up, you can sign up for a [free trial](https://www.spectrocloud.com/free-tier/). - - -* Palette registration token for pairing Edge hosts with Palette. You will need tenant admin access to Palette to generate a new registration token. For detailed instructions, refer to the [Create Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide. - - -* An account with [Docker Hub](https://hub.docker.com/). If you do not have an account with Docker Hub already, refer to the [Create an account](https://docs.docker.com/docker-id/) page for signing-up instructions. -
- - - - This guide uses Docker Hub as an example. You can use any other image registry that suit your requirements. - - -
- -* A public repository named `opensuse-leap` in your image registry. Refer to the [Create a repository](https://docs.docker.com/docker-hub/repos/create/#create-a-repository) instructions for creating a Docker Hub repository and setting the repository's visibility to `public`. - - -## Instructions - -Use the following instructions on your Linux machine to customize the arguments and Dockerfile and then create all the required Edge artifacts. - -
- -1. Check out the [CanvOS](https://github.com/spectrocloud/CanvOS.git) GitHub repository containing the starter code. -
- - ```bash - git clone https://github.com/spectrocloud/CanvOS.git - ``` - - -2. Change to the **CanvOS/** directory. -
- - ```bash - cd CanvOS - ``` - - -3. View the available [git tag](https://github.com/spectrocloud/CanvOS/tags). -
- - ```bash - git tag - ``` - - -4. Check out the newest available tag. This guide uses **v3.4.3** tag as an example. -
- - ```shell - git checkout v3.4.3 - ``` -
- -5. Review the files relevant for this guide. - - **.arg.template** - A sample **.arg** file that defines arguments to use during the build process. - - **Dockerfile** - Embeds the arguments and other configurations in the image. - - **Earthfile** - Contains a series of commands to create target artifacts. - - **earthly.sh** - Script to invoke the Earthfile, and generate target artifacts. - - **user-data.template** - A sample user-data file. -
- - -6. Review the **.arg** file containing the customizable arguments, such as image tag, image registry, image repository, and OS distribution. The table below shows all arguments, their default value, and allowed values. - - |**Argument**|**Description**|**Default Value**| **Allowed Values** | - |---|---|---|---| - |`CUSTOM_TAG`|Tag for the provider images|demo|Lowercase alphanumeric string without spaces.| - |`IMAGE_REGISTRY`|Image registry name|ttl.sh|Your image registry hostname, without `http` or `https`
Example: docker.io/spectrocloud| - |`OS_DISTRIBUTION`|OS Distribution |ubuntu | ubuntu, opensuse-leap| - |`IMAGE_REPO`|Image repository name.
It is the same as the OS distribution.|`$OS_DISTRIBUTION`|Your image repository name.| - |`OS_VERSION`|OS version, only applies to Ubuntu |22| 20, 22| - |`K8S_DISTRIBUTION`|Kubernetes Distribution |k3s| k3s, rke2, kubeadm | - |`ISO_NAME`|Name of the Installer ISO|palette-edge-installer|Lowercase alphanumeric string without spaces. The charaters `-` and `_` are allowed. | - | `PE_VERSION` | The Palette Edge installer version. This should match the tag checked out from Git. This is an advanced setting. Do not modify unless told to do so. | String | Git tags. | -| `platform` | Type of platform to use for the build. Used for cross platform builds (arm64 to amd64 as example). | string | `linux/amd64` | - - Next, you will customize these arguments to use during the build process. -
- -7. Issue the command below to assign an image tag value that will be used when creating the provider images. This guide uses the value `palette-learn` as an example. However, you can assign any lowercase and alphanumeric string to the `CUSTOM_TAG` argument. -
- - ```bash - export CUSTOM_TAG=palette-learn - ``` -
- -8. Use the command below to save the Docker Hub image registry hostname in the `IMAGE_REGISTRY` argument. Before you execute the command, replace `[DOCKER-ID]` in the declaration below with your Docker ID. Your image registry hostname must comply with standard DNS rules and may not contain underscores. -
- - ```bash - export IMAGE_REGISTRY=docker.io/[DOCKER-ID] # Follows [HOST]/[DOCKER-ID] syntax. Example: docker.io/spectrocloud - ``` -
- -9. Issue the following command to use the openSUSE Leap OS distribution. -
- - ```bash - export OS_DISTRIBUTION=opensuse-leap - ``` -
- -10. Issue the command below to create the **.arg** file containing the custom tag, Docker Hub image registry hostname, and openSUSE Leap OS distribution. The **.arg** file uses the default values for the remaining arguments. You can refer to the existing **.arg.template** file to learn more about the available customizable arguments. -
- - ```bash - cat << EOF > .arg - IMAGE_REGISTRY=$IMAGE_REGISTRY - OS_DISTRIBUTION=$OS_DISTRIBUTION - IMAGE_REPO=$OS_DISTRIBUTION - CUSTOM_TAG=$CUSTOM_TAG - K8S_DISTRIBUTION=k3s - ISO_NAME=palette-edge-installer - PE_VERSION=$(git describe --abbrev=0 --tags) - platform=linux/amd64 - EOF - ``` - - View the newly created file to ensure the customized arguments are set correctly. -
- - ```bash - cat .arg - ``` -
- - - - Using the arguments defined in the **.arg** file, the final provider image name will have the following naming pattern, `[IMAGE_REGISTRY]/[IMAGE_REPO]:[CUSTOM_TAG]`. Ensure the final artifact name conforms to the Docker Hub image name syntax - `[HOST]/[DOCKER-ID]/[REPOSITORY]:[TAG]`. - - -
- -11. Use the following command to append the [WireGuard](https://www.wireguard.com/install/) installation instructions to the Dockerfile. You can install more tools and dependencies and configure the image to meet your needs. Add your customizations below the line tagged with the `Add any other image customizations here` comment in the Dockerfile. Do not edit or add any lines before this tagged comment. -
- - ```bash - echo 'RUN sudo zypper refresh && sudo zypper install -y wireguard-tools' >> Dockerfile - ``` - - View the newly created file to ensure the instruction to install WireGuard is appended correctly. -
- - ```bash - cat Dockerfile - ``` -
- - - Using the `-y` option with the `sudo zypper install` command is critical to successfully build the images. The default behavior for package installations is to prompt the user for permission to install the package. A user prompt will cause the image creation process to fail. This guidance applies to all dependencies you add through the **Dockerfile**. - - -
- -12. Issue the command below to save your tenant registration token to a local variable. Replace `[your_token_here]` with your actual registration token. -
- - ```bash - export token=[your_token_here] - ``` -
- -13. Use the following command to create the **user-data** file containing the tenant registration token. Also, you can click on the *Points of Interest* numbers below to learn more about the main attributes relevant to this example. -
- - - - ```shell - cat << EOF > user-data - #cloud-config - stylus: - site: - edgeHostToken: $token - install: - poweroff: true - users: - - name: kairos - passwd: kairos - EOF - ``` - - - - View the newly created user data file to ensure the token is set correctly. -
- - ```bash - cat user-data - ``` - - If you want further customization, check the existing **user-data.template** file, and refer to the [Edge Configuration Stages](https://docs.spectrocloud.com/clusters/edge/edge-configuration/cloud-init#edgeconfigurationstages) and [User Data Parameters](https://docs-latest.spectrocloud.com/clusters/edge/edge-configuration/installer-reference) documents to learn more. -
- -14. CanvOS utility uses [Earthly](https://earthly.dev/) to build the target artifacts. Issue the following command to start the build process. -
- - ```bash - sudo ./earthly.sh +build-all-images - ``` - - ```bash coloredLines=2-2 hideClipboard - # Output condensed for readability - ===================== Earthly Build SUCCESS ===================== - Share your logs with an Earthly account (experimental)! Register for one at https://ci.earthly.dev. - ``` -
- - - - If you plan to build Edge artifacts using a content bundle, use the `+build-provider-images` option instead of the `+build-all-images` option in the command above. The command, `sudo ./earthly.sh +build-provider-images`, will build the provider images but not the Edge installer ISO. - - - - - This command may take up to 15-20 minutes to finish depending on the resources of the host machine. Upon completion, the command will display the manifest, as shown in the example below, that you will use in your cluster profile later in this tutorial. Note that the `system.xxxxx` attribute values in the manifest example are the same as what you defined earlier in the **.arg** file. - - Copy and save the output attributes in a notepad or clipboard to use later in your cluster profile. -
- - ```bash - pack: - content: - images: - - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" - options: - system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" - system.registry: docker.io/spectrocloud - system.repo: opensuse-leap - system.k8sDistribution: k3s - system.osName: opensuse-leap - system.peVersion: v3.4.3 - system.customTag: palette-learn - ``` -
-
- -15. List the Docker images to review the provider images created. By default, provider images for all the Palette's Edge-supported Kubernetes versions are created. You can identify the provider images by reviewing the image tag value you used in the **.arg** file's `CUSTOM_TAG` argument. -
- - ```shell - docker images --filter=reference='*/*:*palette-learn' - ``` - - ```bash coloredLines=3-4 hideClipboard - # Output - REPOSITORY TAG IMAGE ID CREATED SIZE - spectrocloud/opensuse-leap k3s-1.25.2-v3.4.3-palette-learn 2427e3667b2f 24 minutes ago 2.22GB - spectrocloud/opensuse-leap k3s-1.24.6-v3.4.3-palette-learn 0f2efd533a33 24 minutes ago 2.22GB - ``` -
- -16. To use the provider images in your cluster profile, push them to your image registry mentioned in the **.arg** file. Issue the following command to log in to Docker Hub. Provide your Docker ID and password when prompted. -
- - ```bash - docker login - ``` - - ```bash hideClipboard - # Output - Login Succeeded - ``` -
- -17. Use the following commands to push the provider images to the Docker Hub image registry you specified. Replace the `[DOCKER-ID]` and version numbers in the command below with your Docker ID and respective Kubernetes versions that the utility created. -
- - ```bash - docker push docker.io/[DOCKER-ID]/opensuse-leap:k3s-1.25.2-v3.4.3-palette-learn - docker push docker.io/[DOCKER-ID]/opensuse-leap:k3s-1.24.6-v3.4.3-palette-learn - ``` -
- -18. After pushing the provider images to the image registry, open a web browser and log in to [Palette](https://console.spectrocloud.com). Ensure you are in the **Default** project scope before creating a cluster profile. - - -19. Navigate to the left **Main Menu** and select **Profiles**. Click on the **Add Cluster Profile** button, and fill out the required basic information fields to create a cluster profile for Edge. - - -20. Add the following [BYOS Edge OS](/integrations/byoos) pack to the OS layer in the **Profile Layers** section. - - |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| - |---|---|---|---| - |OS|Public Repo|BYOS Edge OS|`1.0.0`| - - -21. Replace the the cluster profile's BYOOS pack manifest with the following custom manifest so that the cluster profile can pull the provider image from the ttl.sh image registry. - - The `system.xxxxx` attribute values below refer to the arguments defined in the **.arg** file. If you modified the arguments in the **.arg** file, you must modify the attribute values below accordingly. -
- - ```yaml - pack: - content: - images: - - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" - options: - system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" - system.registry: docker.io/spectrocloud - system.repo: opensuse-leap - system.k8sDistribution: k3s - system.osName: opensuse-leap - system.peVersion: v3.4.3 - system.customTag: palette-learn - ``` - The screenshot below displays how to reference a provider image in the BYOOS pack of your cluster profile. - - ![Screenshot of a sample cluster profile's OS layer ](/tutorials/palette-canvos/clusters_edge_palette-canvos_edit_profile.png) -
- - - - The BYOOS pack's `system.uri` attribute references the Kubernetes version selected in the cluster profile by using the `{{ .spectro.system.kubernetes.version }}` [macro](/clusters/cluster-management/macros). This is how the provider images you created and pushed to a registry are tied to the OS and Kubernetes version you selected in the **.arg** file. - - - -22. Add the following **Palette Optimized K3s** pack to the Kubernetes layer of your cluster profile. Select the k3s version 1.25.x because earlier in this how-to guide, you pushed a provider image compatible with k3s v1.25.2 to the ttl.sh image registry. - - |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| - |---|---|---|---| - |Kubernetes|Public Repo|Palette Optimized k3s|`1.25.x`| - - -23. Add the network layer to your cluster profile, and choose a Container Network Interface (CNI) pack that best fits your needs, such as Calico, Flannel, Cilium, or Custom CNI. For example, you can add the following network layer. This step completes the core infrastructure layers in the cluster profile. - - |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| - |---|---|---|---| - |Network|Public Repo|Calico|`3.25.x`| - - -24. Add add-on layers and manifests to your cluster profile per your requirements. - - -25. If there are no errors or compatibility issues, Palette displays the newly created complete cluster profile for review. Verify the layers you added, and finish creating the cluster profile. -
- - -## Validate -List the Edge installer ISO image and checksum by issuing the following command from the **CanvOS/** directory. -
- -```shell -ls build/ -``` - -```shell hideClipboard -# Output -palette-edge-installer.iso -palette-edge-installer.iso.sha256 -``` - -You can validate the ISO image by creating a bootable USB flash drive using any third-party software and attempting to flash a bare host machine. Most software that creates a bootable USB drive will validate the ISO image. Here, the flash process means installing the necessary tools and configurations on a host machine. - -
- -
- -# Next Steps - -After building the Edge artifacts and creating an Edge cluster profile, the next step is to use the Edge installer ISO image to prepare your Edge host. To learn more about utilizing Edge artifacts to prepare Edge hosts and deploy Palette-managed Edge clusters, we encourage you to check out the reference resources below. -
- -- [Deploy an Edge Cluster on VMware](/clusters/edge/site-deployment/deploy-cluster) - - -- [Prepare Edge Host for Installation](/clusters/edge/site-deployment/stage) \ No newline at end of file diff --git a/content/docs/04-clusters/03-edge/04-edgeforge-workflow/10-build-content-bundle.md b/content/docs/04-clusters/03-edge/04-edgeforge-workflow/10-build-content-bundle.md deleted file mode 100644 index 0aa5a1c198..0000000000 --- a/content/docs/04-clusters/03-edge/04-edgeforge-workflow/10-build-content-bundle.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: "Build Content Bundle" -metaTitle: "Build Content Bundle - Optimize Edge Deployments" -metaDescription: "Learn about building your edge content bundles in order to optimize cluster deployments" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Content bundles are archives of all the required container images required for a cluster profiles. The content bundle includes Helm charts, Packs, and manifest files needed to deploy your Edge host cluster. In addition to core container images, the content bundle can include artifacts from your applications that you wish to deploy to the Edge cluster. [Cluster Profiles](/cluster-profiles) are the primary source for building these content bundles. - -
- - - -Currently, the content bundles include Helm charts and Packs. However, keep in mind that the container images of the Helm Charts and Packs are extracted and predeployed into the container runtime [containerd](https://containerd.io/) for optimization. In the future, Palette will include a built-in OCI registry to host Helm Charts and other artifacts to avoid downloading these from the internet if included in a content bundle - - - - -# Benefits of Content Bundle - -Creating a content bundle provides several benefits that may address common use cases related to deploying Edge hosts. - -
- -* Preloading required software dependencies removes the need to download assets during cluster deployment. - - -* If connectivity to a container registry is unstable or bandwidth limited, preloading the software dependencies can address these concerns. - - -* Preloading required software dependencies optimizes the Edge host deployment process when the Edge host is in an internet bandwidth-constrained environment. - - -* Organizations that want better control over the software used by their Edge hosts can use content bundles to ensure that only approved software is consumed. - - -# Prerequisites - -- Linux Machine (Physical or VM) with an AMD64 architecture. - - -- Palette API key. Refer to the [User Authentication](/user-management/user-authentication/#apikey) resource to learn how to create a Palette API key. - - -- An Edge Native cluster profile. Refer to [Create Edge Native Cluster Profile](/clusters/edge/site-deployment/model-profile) guide to learn how to create an Edge Native cluster profile. You may also have other add-on profiles that you wish to attach to your cluster. - - -- Content tags in your profiles highlight the exact location of container images to be downloaded. - -# Create Content Bundle - -1. Download Palette Edge Content CLI and assign the executable bit to the CLI. -
- - ```shell - VERSION=3.4.3 - wget https://software.spectrocloud.com/stylus/v$VERSION/cli/linux/palette-edge - chmod +x palette-edge - ``` - -2. Log in to [Palette](https://console.spectrocloud.com). - - -3. Select the project you want to deploy the Edge host to and copy down the **Project ID**. -You can find the project id at the top right side corner of the landing page below the **User drop-down Menu**. - - -4. Navigate to the left **Main Menu** and select **Profiles**. - - -5. Use the **Cloud Types drop-down Menu** and select **Edge Native**. - - -6. Click on the cluster profile you want to include in the content bundle. - - -7. You can find the cluster profile ID by reviewing the URL of the current page. The cluster profile ID is the last value in the URL. Repeat this step for all the cluster profiles you want to specify in the content bundle. - -
- - ```text - https://console.spectrocloud.com/projects/yourProjectId/profiles/cluster/ - ``` - -8. Navigate back to your terminal window and issue the following command to create the content bundle. Replace the placeholder values with your actual values. - -
- - - - There are several Spectro Cloud CLI flags that you can use to customize the content bundle. Use the command `./palette-edge build --help` to learn more about the available flags. - - - -
- - ```shell - ./palette-edge build --api-key \ - --project-id \ - --cluster-profile-ids \ - --palette-endpoint \ - --outfile .tar \ - --iso - ``` - - ```shell hideClipboard - # Output - INFO[0000] getting hubble export for build - INFO[0000] Fetching latest version for service 'stylus' - INFO[0000] stylus version: 3.4.3 - INFO[0000] Fetching manifest for service stylus and version 3.4.3 for action resources - INFO[0000] Fetching manifest of service stylus and version '3.4.3' for action resources - INFO[0000] Fetching manifest from service stylus and version '3.4.3' for action resources with file name images.yaml - INFO[0000] Get manifest with file name: images.yaml - INFO[0000] Get manifest with file content: image: gcr.io/spectro-images-public/stylus:v3.4.3 - INFO[0002] successfully pulled image : gcr.io/spectro-images-public/calico/cni:v3.25.0 - ... - ... - INFO[0143] Total translation table size: 0 - INFO[0143] Total rockridge attributes bytes: 272 - INFO[0143] Total directory bytes: 0 - INFO[0143] Path table size(bytes): 10 - INFO[0143] Max brk space used 0 - INFO[0143] 872027 extents written (1703 MB) - INFO[0144] ISO file created successfully - ``` - -The result is a content bundle that you can use to preload into your installer. Alternatively, you can use the ISO version of the content bundle and transfer it to a USB drive to be used separately at the time of Edge host installation. - -# Validate - -You can validate that the ISO image has not been corrupted by attempting to flash a bootable device. Most software that creates a bootable device will validate the ISO image before the flash process. - - -# Next Steps - -Your next step is to build the Edge artifacts so that you can deploy an Edge host. To create an Edge artifacts, check out the [Build Images](/clusters/edge/edgeforge-workflow/palette-canvos) guide. diff --git a/content/docs/04-clusters/03-edge/04-edgeforge-workflow/12-build-artifacts.md b/content/docs/04-clusters/03-edge/04-edgeforge-workflow/12-build-artifacts.md deleted file mode 100644 index 6786c4281f..0000000000 --- a/content/docs/04-clusters/03-edge/04-edgeforge-workflow/12-build-artifacts.md +++ /dev/null @@ -1,278 +0,0 @@ ---- -title: "Build Edge Artifacts using a Content Bundle" -metaTitle: "Build Edge Artifacts using a Content Bundle" -metaDescription: "Learn how to build an Edge installer ISO using the Palette Edge CLI and the CanvOS utilities." -icon: "" -hideToC: false -fullWidth: false -hideToCSidebar: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from "shared/components/common/PointOfInterest"; - -# Build Edge Artifacts using a Content Bundle - -Palette's Edge solution supports creating Edge artifacts for edge devices deployed in a low internet bandwidth environment or an *air-gapped* environment. An air-gapped environment is a deployment site with no direct internet access. Using a content bundle, you can build Edge artifacts for installation in such environments. - - -A content bundle is an archive that includes the Operating System (OS) image, the Kubernetes distribution, the Network Container Interface (CNI), and all other dependencies specified in the cluster profiles you want to deploy to the Edge cluster. A content bundle provides several benefits, such as: -
- -- Software dependencies are pre-loaded into the installer image. - - -- Optimizes the deployment process for bandwidth-constrained environments or air-gapped environments. - - -- The ability to more granularly manage the software dependencies available to Edge clusters. - - -This how-to guide provides instructions for creating and using a content bundle to build the Edge artifacts. You will begin with installing a necessary tool, the Palette Edge CLI, on your development machine. The Palette Edge CLI is a command-line utility to interact with Palette and perform specific tasks in your development environment, such as creating a content bundle. Next, you will download all the software dependencies mentioned in your cluster profile using the Palette Edge CLI and create a content bundle. Lastly, when your content bundle is ready, you will use the CanvOS utility to embed the content bundle and user data into the Edge installer ISO image. - -The diagram below displays the overarching steps to build the Edge installer ISO using a content bundle. The diagram also highlights the primary prerequisites to create a content bundle. - -![An overarching diagram displaying the workflow in the current guide.](/clusters_edge_edge-forge-workflow_build-images_build-artifacts_overarching.png) - - -# Prerequisites - - - - This how-to guide extends the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) workflow. Therefore, you must complete it before proceeding with the current guide. - - - - -To complete this guide, you will need the following items: -
- -* A physical or virtual Linux machine with *AMD64* (also known as *x86_64*) processor architecture to build the Edge installer ISO image. You can issue the following command in the terminal to check your processor architecture. -
- - ```bash - uname -m - ``` - -* The Linux machine should have the following minimum hardware configuration: - - 4 CPU - - 8 GB memory - - 100 GB storage. The actual storage will depend on the size of the content bundle you will use to build the Edge installer ISO image. - - -* You must have completed the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide to build the provider images and create a cluster profile referencing one of the provider images. - - -* A Spectro Cloud API key. Later in this guide, you will use this API key to authenticate the Palette Edge CLI utility and allow it to interact with Palette. Refer to the [User Authentication](https://docs.spectrocloud.com/user-management/user-authentication/#apikey) guide to create a new API key. - - -# Instructions - -Use the following instructions on your Linux machine, which this guide refers to as the development environment. -
- -1. Visit the [Downloads](https://docs.spectrocloud.com/spectro-downloads#paletteedgecli) page and download the latest Palette Edge CLI. You can download the Palette Edge CLI by clicking on the available URL or using the download URL in the following command. Replace the `[PALETTE-EDGE-BINARY-URL]` placeholder with the download URL. -
- - ```bash - curl [PALETTE-EDGE-BINARY-URL] --output palette-edge - ``` - - -2. Open a terminal session and navigate to the folder where you have downloaded the palette-edge binary. Set the executable permissions for the palette-edge binary by issuing the following command. -
- - ```bash - chmod 755 palette-edge - ``` - - -3. Use the following command to move the palette-edge binary to the **/usr/local/bin** directory to make the binary available in your system $PATH. This will allow you to issue the `palette-edge` command from any directory in your development environment. -
- - ```bash - mv palette-edge /usr/local/bin - ``` - - -4. Verify the installation of the Palette Edge CLI by issuing the following command. The output will display information about the currently supported OS and Kubernetes distributions. -
- - ```bash - palette-edge show - ``` - - ```bash hideClipboard - # Sample output - ┌────────────────────────────────────────────────────────────────────────┐ - | OS Flavor | Description | Base Image URI | - | opensuse-leap | Opensuse Leap 15.4 | quay.io/kairos/core-opensuse-leap | - | ubuntu-20 | Ubuntu 20.4 LTS | quay.io/kairos/core-ubuntu-20-lts | - | ubuntu-22 | Ubuntu 22.4 LTS | quay.io/kairos/core-ubuntu-22-lts | - └────────────────────────────────────────────────────────────────────────┘ - ┌─────────────────────────────────────────────────────────────────────────────────────────────┐ - | K8S Flavor | Description | Supported Versions | - | k3s | Rancher K3s | 1.25.2-k3s1,1.24.6-k3s1,1.23.12-k3s1,1.22.15-k3s1 | - | kubeadm | Kubernetes kubeadm | 1.25.2,1.24.6,1.23.12,1.22.15 | - | rke2 | Rancher RK2 | 1.25.2-rke2r1,1.24.6-rke2r1,1.23.12-rke2r1,1.22.15-rke2r1 | - └─────────────────────────────────────────────────────────────────────────────────────────────┘ - ┌─────────────────────────────────┐ - | Component | Version | - | Spectro Agent Version | v3.4.3 | - | Kairos Version | v2.0.3 | - └─────────────────────────────────┘ - ``` -
- - - -5. Set the Spectro Cloud API key as an environment variable by issuing the following command. Replace the `[USE-YOUR-API-KEY_HERE]` placeholder with your API key. The Palette Edge CLI will use this API key to authenticate with Palette. Once authenticated, the Palette Edge CLI can interact with your Palette account. -
- - ```bash - export API_KEY=[USE-YOUR-API-KEY_HERE] - ``` - - -6. Log in to [Palette](https://console.spectrocloud.com). - - -7. Copy your Palette project ID. You will use this ID in a later step. The project ID is on the top-right corner of your Palette project overview page. Use the following screenshot to help you find your project ID. - - ![A screenshot highlighting the project ID in Palette project overview page](/clusters_edge_edge-forge-workflow_build-images_build-project_id.png) - - -8. Navigate to the left **Main Menu** and select **Profiles**. - - -9. Select the cluster profile you want to include in the content bundle. Click on the target cluster profile to access its details page. - - -10. Examine the cluster details page URL. The cluster details page URL follows the `[Palette-URL]/projects/[PROJECT-ID]/profiles/cluster/[CLUSTER-PROFILE-ID]` syntax. The cluster details page URL has your project ID and the cluster profile ID. For example, the screenshot below highlights the project ID and the cluster profile ID in a cluster details page URL. - - ![A screenshot highlighting the cluster profile ID and project ID in the URL of the cluster details page.](/clusters_edge_edge-forge-workflow_build-images_build-artifacts_url.png) - - - -11. Copy the cluster profile ID from the cluster details page URL for the next step. - - -12. Switch back to your development environment, and set the project ID as an environment variable by issuing the following command. Replace the `[USE-YOUR-PROJECT-ID_HERE]` placeholder with your project ID. -
- - ```bash - export PROJECT_ID=[USE-YOUR-PROJECT-ID_HERE] - ``` - - -13. Set the cluster profile ID as an environment variable using the following command. Replace the `[USE-YOUR-PROFILE-ID_HERE]` placeholder with your cluster profile ID. The Palette Edge CLI uses the cluster profile ID to reference the correct cluster profile and download all its software dependencies. -
- - ```bash - export PROFILE_ID=[USE-YOUR-PROFILE-ID_HERE] - ``` - - -14. Issue the command below to create the content bundle. The `build` command uses the following flags: - - |**Command Flag**|**Value**| - |---|---| - |`--api-key`|Spectro Cloud API key| - |`--project-id`|Palette project ID| - |`--cluster-profile-ids`|Cluster profile IDs. If you want to include multiple cluster profiles in the content bundle, add multiple cluster profile IDs separated by a comma.| - |`--palette-endpoint`|Palette API endpoint. The default Palette API endpoint is `api.spectrocloud.com`| - |`--outfile`|Path to write the final content bundle. | - You can issue `palette-edge build --help` to know about other available flags. -
- - ```bash - palette-edge build --api-key $API_KEY \ - --project-id $PROJECT_ID \ - --cluster-profile-ids $PROFILE_ID \ - --palette-endpoint api.spectrocloud.com \ - --outfile content - ``` - - - -15. Use the command below to list all files in the current directory to verify that you created the content bundle successfully. The content bundle will have the following naming convention, `content-[random-string]`, for example, **content-8e61a9e5**. -
- - ```bash - ls -al - ``` - - -16. List the files in the content bundle folder using the following command. The output will display the compressed core and app content files. -
- - ```bash - ls -al content-*/ - ``` - ```bash hideClipboard - # Sample output - total 3981104 - -rw-rw-r-- 1 jb jb 1598552722 Jul 26 18:20 app-content-8e61a9e5.zst - -rw-rw-r-- 1 jb jb 2478086360 Jul 26 18:20 core-content-8e61a9e5.zst - ``` - - -17. Issue the following command to build the Edge artifacts with your content bundle. The `+iso` option specifies the build target. This command will generate an ISO image from the content bundle and other configurations you have specified in the **.arg** and **user-data** files. -
- - ```bash - sudo ./earthly.sh +iso - ``` - This command may take up to 15-20 minutes to finish depending on the resources of the host machine. - - -# Validate - -List the Edge installer ISO and checksum by issuing the following command from the **CanvOS/** directory. -
- -```shell -ls build/ -``` - -```shell hideClipboard -# Output -palette-edge-installer.iso -palette-edge-installer.iso.sha256 -``` -
- -To validate, you can prepare an edge device using the Edge installer ISO. You can follow the [Prepare Edge Host for Installation](/clusters/edge/site-deployment/stage) guide if you prepare a bare metal machine or a VMware VM as a host. Below are the high-level steps for your reference: -
- -1. Create a bootable USB flash drive using any third-party software. Most software that creates a bootable USB drive will validate the ISO image. - - -2. Select a physical or virtual host machine to emulate as an edge device. Enable (Dynamic Host Configuration Protocol) DHCP on the host before proceeding with the installation process. Enabling DHCP is necessary for the device to obtain an IP address automatically from the network. - - -3. Flash the edge device with a bootable USB drive. - - -4. The last step is to power on the edge device and start the installation process. For more information, refer to the [Perform Site Install](/clusters/edge/site-deployment/site-installation) documentation. -
- -# Next Steps - -Palette's Edge solution allows you to create Edge artifacts using a content bundle for edge devices deployed in low internet bandwidth or air-gapped environments. You created a content bundle using the Palette Edge CLI in this guide. Next, you used the CanvOS utility to embed the content bundle and user data into an Edge installer ISO. -
- - -As the next step, we recommend you check out the end-to-end tutorial, [Deploy an Edge Cluster on VMware](/clusters/edge/site-deployment/deploy-cluster). The tutorial provides a detailed walkthrough on deploying an Edge cluster in a VMware environment. - - -Check out the reference resources below to learn more about preparing an Edge host. -
- -- [Prepare Edge Host for Installation](/clusters/edge/site-deployment/stage) - - -- [Perform Site Install](/clusters/edge/site-deployment/site-installation) \ No newline at end of file diff --git a/content/docs/04-clusters/03-edge/08-deployment.md b/content/docs/04-clusters/03-edge/08-deployment.md deleted file mode 100644 index f8daf1638d..0000000000 --- a/content/docs/04-clusters/03-edge/08-deployment.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "Deployment & Management" -metaTitle: "Palette Edge Deployment" -metaDescription: "Learn how to deploy Palette Edge." -hideToC: false -fullWidth: false -hiddenFromNav: true ---- - -# Overview - -The deployment of an edge host contains different options that are intended to help install the edge host in a manner that works best for your environment. To deploy an Edge host, follow the steps in the [Register and Manage Edge Native Clusters](/clusters/edge/deployment/native) guide. - - -# Resources - -- [Register and Manage Edge Native Clusters](/clusters/edge/deployment/native) \ No newline at end of file diff --git a/content/docs/04-clusters/03-edge/09-site-deployment.md b/content/docs/04-clusters/03-edge/09-site-deployment.md deleted file mode 100644 index 91234ad2e9..0000000000 --- a/content/docs/04-clusters/03-edge/09-site-deployment.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: "Deployment" -metaTitle: "Edge Site Deployment" -metaDescription: "Learn about the Palette Edge installation process." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - - -The Edge host deployment process consists of four phases described in the following table. - -| Phase| Description| -| ---| ---| -| Model Cluster Profile | The process of creating a [cluster profile](/cluster-profiles) for the host cluster that will be made up of Edge hosts. | -| Install Handoff | The Edge Installer is copied over from a portable storage device to the Edge host's hard disk. This step is typically performed in the preparation step. Refer to [Prepare Edge Hosts for Installation](/clusters/edge/site-deployment/stage) to learn more.| -| Registration | The Edge host is registered with Palette. The Edge host will remain in this phase until the registration process is complete.| -|Cluster Provisioning | The Edge host boots into the specified provider OS and proceeds with the cluster deployment.| - - -Review the following guides in sequential order to successfully deploy an Edge host. - -
- -1. [Model Edge Native Cluster Profile](/clusters/edge/site-deployment/model-profile) - - -2. [Prepare Edge Hosts for Installation](/clusters/edge/site-deployment/stage) - - -3. [Perform Site Install](/clusters/edge/site-deployment/site-installation) - - - -In a lab environment, you must perform all the steps. In a non-learning environment, these steps are typically performed by people with different roles. The Palette Edge lifecycle is explained in detail in the [lifecycle](/clusters/edge/edge-native-lifecycle) resource, highlighting the various roles involved. - - - - -# Resources - -- [Model Cluster Profile](/clusters/edge/site-deployment/model-profile) - - -- [Prepare Edge Hosts for Installation](/clusters/edge/site-deployment/stage) - - -- [Perform Site Install](/clusters/edge/site-deployment/site-installation) - - -- [Register Edge Host](/clusters/edge/site-deployment/site-installation/edge-host-registration) - - -- [Create Cluster Definition](/clusters/edge/site-deployment/site-installation/cluster-deployment) diff --git a/content/docs/04-clusters/03-edge/09-site-deployment/01-model-profile.md b/content/docs/04-clusters/03-edge/09-site-deployment/01-model-profile.md deleted file mode 100644 index d93579c643..0000000000 --- a/content/docs/04-clusters/03-edge/09-site-deployment/01-model-profile.md +++ /dev/null @@ -1,236 +0,0 @@ ---- -title: "Model Edge Native Cluster Profile" -metaTitle: "Instructions for creating an Edge Native Cluster Profile" -metaDescription: "Instructions for creating an Edge Native Cluster Profile" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -[Cluster profiles](/cluster-profiles) contain the desired specifications the Kubernetes cluster Edge host makes up. The cluster profile defines the following components. - -
- -- Kubernetes flavor and version - -- Operating system (OS) - -- Container network interface (CNI) - -- Container storage interface (CSI) - -You define these components in an Edge Native Infrastructure profile. As with any other environment in Palette, you can define additional add-on cluster profiles. You can use add-on profiles to define integrations or applications that must be included when Palette deploys the cluster. - - - -The following steps will guide you on how to create a cluster profile for Edge. Choose the tab that matches your use case. - - -
- - - - - -## Prerequisites - -- Ensure all required provider images are created and uploaded to the respective registry. Refer to the EdgeForge [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide for details. - - -## Enablement - - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Choose the desired scope, project or **Tenant Admin**. - - -3. Navigate to the left **Main Menu** and select **Profiles**. - - -4. Click on **Add Cluster Profile**. - - -5. Provide **Basic Information**, such as profile name, description, and tags. Select **Full** and click on **Next**. - - -6. Select **Edge Native** as the **Cloud Type** and click on **Next**. - - -7. Select **Public Repo** in the **Registry field**. - - -8. Select **BYOS Edge OS** in the **Pack Name** field and the pack version. - - -9. Click on the code editor button **** to open up the editor - -
- - ![A view of the Kubernetes pack editor with a YAML configuration](/clusters_site-deployment_model-profile_byoos-pack-yaml.png) - - -10. Update the `system.uri` parameter in the pack editor. Use the custom OS image you created in the EdgeForge process. Refer to the EdgeForge [Build Images](/clusters/edge/edgeforge-workflow/palette-canvos) guide if you are missing a custom OS image. The following is an example configuration using a custom OS image. - - -
- - ```yaml - pack: - content: - images: - - image: '{{.spectro.pack.edge-native-byoi.options.system.uri}}' - # - image: example.io/my-other-images/example:v1.0.0 - # - image: example.io/my-super-other-images/example:v1.0.0 - #drain: - #cordon: true - #timeout: 60 # The length of time to wait before giving up, zero means infinite - #gracePeriod: 60 # Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used - #ignoreDaemonSets: true - #deleteLocalData: true # Continue even if there are pods using emptyDir (local data that will be deleted when the node is drained) - #force: true # Continue even if there are pods that do not declare a controller - #disableEviction: false # Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution - #skipWaitForDeleteTimeout: 60 # If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip. - - options: - system.uri: example.io/my-images/example-custom-os:v1.4.5 - ``` - -
- - - - You can customize the node drainage behavior and specify additional images that you may have created that are part of the content bundle. Specify any additional image required by the cluster profile in the `images` section. Add an `- image: ` entry for each image you need to specify. Refer to the [BYOOS Pack](/integrations/byoos) resource to learn more about the pack details. - - - - -11. Click on the **Next layer** button to continue. - - - -12. Complete the cluster profile creation process by filling out the remaining layers. - - -You have successfully created a cluster profile that you can use to deploy Edge clusters. - -## Validate - -Verify you created a cluster profile for Edge hosts by using the following steps. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Choose the desired scope, project or **Tenant Admin**. - - -3. Navigate to the left **Main Menu** and select **Profiles**. - - -4. Use the **Cloud Types** **drop-down Menu** and select **Edge Native**. - - -5. Your newly created cluster profile is displayed along with other cluster profiles of the same type. - - -
- - - - -
- - - -This workflow is unavailable for new Edge clusters. Use the **Custom OS** tab to learn how to use a custom OS with your cluster profile. - - - -
- -## Prerequisites - -No prerequisites. - -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Choose the desired scope, project or **Tenant Admin**. - - -3. Navigate to the left **Main Menu** and select **Profiles**. - - -4. Click the **Add New Profile** button. - - -5. Provide the profile with a name, description, version, and tags. Select **Full** for the profile type. Click on **Next**. - - -6. Select **Edge Native** as the cloud type and click on **Next**. - - -7. In the profile layers screen, for the OS layer, choose the desired OS type and OS version. Click on **Next layer**. - - - -You can select **Bring Your Own OS (BYOOS)** if you build your enterprise Edge artifacts. Specify the registry that hosts your provider images as the system URI. You can also provide additional cloud-init configurations in the OS pack YAML file to set up Edge host users, install other OS packages, install certificates, and more. Refer to the [Cloud-Init Stages](/clusters/edge/edge-configuration/cloud-init) resource to learn more about the cloud-init stages. - - - - -8. Choose the desired Kubernetes distribution and version. Click on **Next layer**. - - -9. Choose the desired CNI type and version. Click on **Next layer**. - - -10. Review and save your cluster profile. - -You now have a cluster profile you can use for deploying Edge hosts. - -Consider creating additional profiles with out-of-the-box packs for monitoring, security, authentication, or other capabilities. If you need remote access to the cluster, consider adding the [Spectro Proxy](/integrations/frp) pack to one of the add-on profiles. - -Optionally, add additional Helm or OCI registries and include applications hosted in those registries in add-on profiles. Check out the guide for adding a [Helm](/registries-and-packs/helm-charts) or [OCI](/registries-and-packs/oci-registry) registry to learn more. - -# Validate - -Verify you created a cluster profile for Edge hosts by using the following steps. - - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Choose the desired scope, project or **Tenant Admin**. - - -3. Navigate to the left **Main Menu** and select **Profiles**. - - -4. Select **Edge Native** as the cloud type. - - -You can view your newly created cluster profile on the **Cluster Profiles** page. - - -
- -
- - - - -# Next Steps - -Your next step in the deployment lifecycle is to prepare the Edge host for the installation. Use the [Prepare Edge Hosts for Installation](/clusters/edge/site-deployment/stage) guide to continue. - -
diff --git a/content/docs/04-clusters/03-edge/09-site-deployment/04-stage.md b/content/docs/04-clusters/03-edge/09-site-deployment/04-stage.md deleted file mode 100644 index ab06b14030..0000000000 --- a/content/docs/04-clusters/03-edge/09-site-deployment/04-stage.md +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: "Prepare Edge Hosts for Installation" -metaTitle: "Stage common user data and prepare edge host for installation" -metaDescription: "Learn how to prepare edge hosts for installation before shipping them out to site for site installation" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -In this step, you will prepare the Edge host for installation. You will copy the following items to the storage device of the Edge host. - -- The Edge Installer image. - - -- The Edge Installer user data. - - -- Content bundles. - - - -If you need to create any of the items mentioned above, review the [EdgeForge Workflow](/clusters/edge/edgeforge-workflow) to learn how to create your own Edge artifacts. - - - -You can ship your Edge hosts after you complete this step. Use the following steps to prepare your Edge host for installation. - -
- -# Prepare Edge Host - -Pick the target environment for your Edge host. - -
- - - - - -## Prerequisites - -- Edge Installer ISO file. Check out the [EdgeForge Workflow](/clusters/edge/edgeforge-workflow/palette-canvos/) to learn how to create an Edge Installer image or use the default Edge Installer image. - -- A Bare Metal appliance with USB drives. - - -- The ability to modify the boot order settings to boot from a USB drive. - - -- A USB disk containing the installer ISO - - -The following items are optional and not required but may apply to your use case: - -
- -- USB disk that contains a user data ISO. This is applicable in [multiple user data](/clusters/edge/edgeforge-workflow/prepare-user-data) scenarios where you want to override or provide additional configurations after the Edge host is powered on at the physical site. - - -- USB disk containing the content bundle ISO. You can avoid this by creating a custom installer. Refer to the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide. - - - -## Installer Handoff - -1. Insert the USB drive containing the Edge Installer ISO and potentially your user data. - - -2. If you created a content bundle and loaded it to a USB disk, then insert the content bundle USB drive. - - -3. Power on the Edge host. - - -4. Wait for the Edge Installer to complete copying content to the hard drive. The Edge host will reboot by default upon completion unless you specify a different option in the Edge Installer configuration user data. - -5. Repeat steps one through four for all Edge hosts. - - -6. Remove the USB disks and ship your Edge host devices to the site for installation. - - -## Validate - -You can validate that the Edge host is ready for the site installation by simulating a site deployment on one of the Edge hosts. The simulation process will require you to complete the installation process and reset the device after the validation. - -
- - - -You will create a Virtual Machine Disk (VMDK) from the Edge Installer ISO and upload it to a vCenter environment. In the vCenter environment, you will convert the VMDK to a VM template, and export it out as an OVF template. - -
- - -## Prerequisites - -- Edge Installer ISO file. Check out the [build images](/clusters/edge/edgeforge-workflow/palette-canvos/) guide to learn how to create an Edge Installer image or use the default Edge Installer image. - - -- vCenter environment with sufficient resources and access privileges to complete the following actions: - - Upload files to a datastore. - - Ability to create VMs. - - - -## Installer Handoff - -1. Log in to vCenter Server by Using the vSphere Client. - - -2. Prepare a build server by launching a VM with Ubuntu version 20.04 or greater in your VMware environment. - - -3. Issue the following commands to prepare your server for VMDK creation. - -
- - ```shell - apt update - apt install qemu qemu-kvm \ - libvirt-clients libvirt-daemon-system bridge-utils virt-manager - systemctl enable --now libvirtd systemctl enable --now virtlogd - mkdir -p /etc/apt/keyrings - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg - echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - apt update - apt install docker-ce docker-ce-cli containerd.io - docker-compose-plugin - ``` - - If you need a graphical user interface (GUI), add `x11-apps` to the `apt install` command. - -
- - ```shell - apt install x11-apps - ``` - -4. You can add additional packages for content creation, compression, and preparing your workspace. - -
- - ```shell - curl -L -o - "https://github.com/vmware/govmomi/releases/latest/download/govc_$( uname -s)_$(uname -m).tar.gz" | tar -C /usr/local/bin -xvzf - govc - mkdir -p ~/workspace/ - cd workspace/ - git clone https://github.com/spectrocloud/stylus-image-builder.git - ``` - - If you need ZSTD for compression or `govc` for interacting with vCenter, use the following command. - - ```shell - apt install zstd govc - ``` - -5. Build the VMDK from the Edge Installer ISO to serve as a template for deploying Edge hosts to virtual machines. Issue the following commands on your build server. - - ```shell - cd ~/workspace/stylus-image-builder/ - chmod +x entrypoint.sh - export ISO_URL=[your-installer-name].iso - export PALETTE_ENDPOINT=[your tenant].spectrocloud.com - export REGISTRATION_URL=[QR Code registration app link] - export EDGE_HOST_TOKEN=[token generated on Palette Portal] - export DISK_SIZE=100000M - EMBED=false make docker-build - nohup make vmdk & - ``` - - - - If you are using a *Tenant Registration Token* for auto-registration, you can omit the environment variable `REGISTRATION_URL`. - - - - A VMDK file was generated in the **stylus-image-builder/images** folder. Rename this VMDK to a preferred installer name. Ensure the VMDK file retains the `.vmdk` extension. - - -6. Transfer the VMDK to a datastore in your VMware environment. Review the commands below and ensure you replace the placeholders with the respective values from your environment. - - ```shell - export GOVC_URL=https://[IP address OR the DNS of vCenter] - export GOVC_USERNAME=[vcenter username] - export GOVC_PASSWORD=[vcenter password] - govc datastore.upload -ds=[datastore name] images/[your-installer-name].vmdk [folder in datastore]]/[your-installer-name].vmdk - govc datastore.cp -ds=[datastore name] [folder in datastore]]/[your-installer-name].vmdk [folder in datastore]]/[your-installer-name]-uncompressed.vmdk - ``` - - If you are using test or development environments, you may need to enable the following option. This environment variable is not recommended for production environments. - -
- - ```shell - export GOVC_INSECURE=1 - ``` - -7. Create a VM from the VMDK by logging into your vCenter console in the UI. - - -8. Navigate to the **Dataceter/Folder**, under the **VMs and Templates** section. - - -9. Start the **New Virtual** machine deployment wizard. - - -10. Choose a cluster that has access to the datastore used for storing the VMDK. Choose the datastore where VMDK is stored. - - - -11. Select **Ubuntu Linux (64)** as your guest OS version. This is required even though you will be launching an RHEL based clusters - - - -12. Select the Hardware settings. - - - -13. Delete the hard drive displayed by default. Add a new device of the type **Existing Hard Disk**. For this device select the option **Datastore ISO file**. - - - -14. Navigate to the datastore folder with the uncompressed VMDK and select the VMDK. - - - -15. Finish the creation wizard and save your virtual machine. - - -16. Navigate to **VMs and Templates** and right-click on the newly created VM. Select **Template** and **Convert to Template**. - - - -17. Navigate to **VMs and Templates** and right-click on the newly created VM Template. Select **Export to OVF Template**. - - - -You can ship this OVF template along with the Edge host to the physical site. Use the OVM template for the site installation. - -## Validate - -You can validate that the Edge host is ready for the site installation by simulating a site deployment on one of the Edge hosts. The simulation process will require you to complete the installation process and reset the device after the validation. - -
- -
- - -# Next Steps - -Now that you have completed the staging process, you can ship the Edge hosts to the destination site. Proceed to the [Perform Site Install](/clusters/edge/site-deployment/site-installation) step. \ No newline at end of file diff --git a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation.md b/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation.md deleted file mode 100644 index e661301bc4..0000000000 --- a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: "Perform Site Install" -metaTitle: "Run the Palette edge installer on all your edge hosts " -metaDescription: "Learn how to run the Palette Edge installer on your edge hosts " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -You perform a site installation by powering on the Edge host. The Edge Installer will start and begin the installation process, which may vary depending on your environment and Edge host type. - - - -The Edge host site installation has three stages, as described in the table. - -| Phase| Description| Required | -| ---| ---| --- | -| Apply Site User Data | As described in the [Multiple User Data Use Case](/clusters/edge/edgeforge-workflow/prepare-user-data#multipleuserdatausecase), you can apply a secondary Edge Installer configuration user date to apply additional settings or override global values. This is optional but may be required for certain use cases. Refer to the [Apply Site User Data](/clusters/edge/site-deployment/site-installation/site-user-data) guide to learn more. | No | -| Registration | The Edge host is registered with Palette. The Edge host will remain in this phase until the registration process is complete. The *Registration* phase has a unique set of instructions. Refer to [Register Edge Host](/clusters/edge/site-deployment/site-installation/edge-host-registration) for guidance.| Yes| -|Cluster Provisioning | The Edge host boots into the specified provider Operating System and proceeds with the cluster deployment. You can find the instructions in the [Create Cluster Definition](/clusters/edge/site-deployment/site-installation/cluster-deployment) resource | Yes | - - -# Installation - -Use the following steps to complete the Edge host installation. - -
- - - -The community resource, [Painting with Palette](https://www.paintingwithpalette.com/tutorials/) has a great Edge Native tutorial. - - - -
- - - - - -## Prerequisites - -- Access to Palette and the ability to register an Edge host. - -- Access to network information about the physical site, specifically the network Virtual IP Address (VIP). - -- Physical access to the Edge host. - -## Site Install - -1. If you have a site-specific user data ISO, then insert the USB stick into the Edge host. - - -2. Power on the Edge host. The Edge host will boot into registration mode where it will connect with the Palette endpoint that was specified in the user data. - - -3. The Edge host will remain in a wait mode until you register the device in Palette. Review the [Register Edge Host](/clusters/edge/site-deployment/site-installation/edge-host-registration) documentation to learn more about each registration method. - -
- - - - Once the Edge host is registered, Palette will wait for you to create a host cluster and assign the Edge host to the cluster. - - - -4. The last step is to create a cluster definition if you don't have a host cluster that the Edge host can join. Follow the steps in the [Create Cluster Definition](/clusters/edge/site-deployment/site-installation/cluster-deployment) to complete the site installation. - -When the cluster is created, the installation process will continue. The Palette Edge Host agent will download all required artifacts and reboot the Edge host. - -When the Edge host finishes rebooting, the *Cluster Provisioning* phase begins. In this phase, the system boots into the OS defined in the cluster profile, and cluster configuration begins. Kubernetes components are initialized and configured based on the specifications in the cluster profile. - -Any content bundles you provided are extracted and loaded into the container runtime process. Refer to the [EdgeForge Workflow](/clusters/edge/edgeforge-workflow) to learn more about content bundles. Any [cloud-init](/clusters/edge/edge-configuration/cloud-init) stages defined in the OS pack will also be invoked as the OS initializes. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the host cluster you created to view its details page. - - -4. Review the **Cluster Status**. Ensure the **Cluster Status** field displays **Running**. - -You can also use `kubectl` to issue commands against the cluster. Check out the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl#overview) to learn how to use `kubectl` with a host cluster. - -
- - - -Use the following steps to complete the Edge host installation in a VMware environment. - -## Prerequisites - -- Access to Palette and the ability to register an Edge host. - -- Access to network information about the physical site, specifically the network Virtual IP (VIP) address . - -- Physical access to the Edge host. - -- An Edge Installer OVF template. Check out the [Prepare Edge Hosts for Installation](/clusters/edge/site-deployment/stage) for guidance on how to create an Edge Installer OVF template. - -## Site Install - -Perform the following steps to proceed with the installation at the site in your VMware environment. - -
- -1. Log in to vCenter Server using the vSphere Client. - - -2. Navigate to **VMs and Templates** and right-click on the desired folder, then select the option **Deploy VM(s) from this OVF template**. - - -3. Specify the location of the OVF template and start the deployment. - - -4. Proceed through the installation steps and deploy the virtual machine. - - -5. The VM will start up in the registration phase and wait for you to register the Edge host with Palette. If you provided the Edge Installer user data with an `EdgeHostToken` then the Edge host will automatically register with Palette. Otherwise, the Edge host will wait until you manually register the device in Palette. Go ahead and register the Edge host with Palette. Review the [Register Edge Host](/clusters/edge/site-deployment/site-installation/edge-host-registration) for additional guidance. - -
- - - - Once the Edge host is registered, Palette will wait for you to create a host cluster and assign the Edge host to the cluster. - - - -6. The last step is to create a cluster if you don't have a host cluster that the Edge host can join. Follow the steps in the [Create Cluster Definition](/clusters/edge/site-deployment/site-installation/cluster-deployment) to complete the site installation. - -When the cluster is created, the installation process continues. The Palette Edge Host agent will download all required artifacts and reboot the Edge host. - -After the reboot, the *Cluster Provisioning* phase begins. In this phase, the system boots into the OS defined in the cluster profile, and cluster configuration begins. Kubernetes components are initialized and configured based on the specifications in the cluster profile. - -Any content bundles you provided are extracted and loaded into the container runtime process. Refer to the [EdgeForge Workflow](/clusters/edge/edgeforge-workflow) to learn more about content bundles. Any [cloud-init](/clusters/edge/edge-configuration/cloud-init) stages defined in the OS pack will also be invoked as the OS initializes. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the host cluster you created to view its details page. - - -4. Review the **Cluster Status**. Ensure the **Cluster Status** field displays **Running**. - -You can also use `kubectl` to issue commands against the cluster. Check out the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl#overview) to learn how to use `kubectl` with a host cluster. - -
- -
- -# Next Steps - -Your Edge host is now registered with Palette and is part of a host cluster. You can repeat the steps from [Prepare Edge Host for Installation](/clusters/edge/site-deployment/stage) and [Perform Site Install](/clusters/edge/site-deployment/site-installation) for any additional Edge host you want to add to the host cluster. The next step is for you to become more familiar with Day-2 responsibilities. Check out the [Manage Clusters](/clusters/cluster-management) guide to learn more about Day-2 responsibilities. diff --git a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/04-site-user-data.md b/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/04-site-user-data.md deleted file mode 100644 index 2a143e1e77..0000000000 --- a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/04-site-user-data.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "Apply Site User Data" -metaTitle: "Site User Data" -metaDescription: "Learn how to create a secondary Edge Installer configuration user data." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -You can provide site-specific Edge Installer configuration user data if you need to apply new values or override default values from the Edge Installer user data you created in the [Prepare Edge Hosts for Installation](/clusters/edge/site-deployment/stage) step or, as often referenced, the *Installer Handoff* phase. - -Use the following steps to create an ISO file containing the additional user data. You will load the newly created ISO to a bootable device, such as a USB stick. - -## Prerequisites - -- A bootable device such as a USB drive, or a Preboot Execution Environment (PXE) server. - -- `mkisofs`, or `genisoimage`, or similar ISO management software. - -- `cdrtools` or `wodim` for Windows. - -## Create ISO - -1. Create a file called **user-data** that contains the additional configurations you want to override or inject. - -
- - ```shell - touch user-data - ``` - -2. Create an empty **meta-data** file: - -
- - ```shell - touch meta-data - ``` - -3. Create an ISO using the following command. - - MacOS/Linux: - - ```shell - mkisofs -output site-user data.iso -volid cidata -joliet -rock user data meta-data - ``` - - Windows: - - ```shell - genisoimage -output site-user-data.iso -volid cidata -joliet -rock user-data meta-data - ``` - - This generates an ISO file called site-user-data.iso in the current directory. -
- -4. Copy the ISO to a bootable device such as a USB drive. - -
- - - - You can use several software tools to create a bootable USB drive, such as [balenaEtcher](https://www.balena.io/etcher). For a PXE server, there are open-source projects such as [Fog](https://fogproject.org/download) or [Windows Deployment Services](https://learn.microsoft.com/en-us/windows/deployment/wds-boot-support) for Windows. - - - - -5. Once the Edge host arrives at the physical site. Load the USB drive to the Edge host before powering it on. The Edge Installer will apply the new user data during the installation process. - -
- - - -## Validate - -You can validate that the ISO image is not corrupted by attempting to flash a bootable device. Most software that creates a bootable device will validate the ISO image before the flash process. - -# Next Steps - -Before you register your Edge host with Palette you must have a tenant registration token. Review the [Create Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide for steps on how to create a tenant registration token. diff --git a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/05-create-registration-token.md b/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/05-create-registration-token.md deleted file mode 100644 index 688db11802..0000000000 --- a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/05-create-registration-token.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "Create a Registration Token" -metaTitle: "Create a Registration Token" -metaDescription: "Learn how to create a tenant registration token for Edge host registrations." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - - -# Create Registration Token - - -To successfully register an Edge host with Palette you must provide the Edge Installer with a tenant registration token. To create a registration token, use the following steps. - - -# Prerequisites - -- Tenant admin access. - - -# Create Token - -1. Log into [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Switch to the tenant scope. - - -3. Navigate to the left **Main Menu** and select **Settings**. - - -4. Select **Registration Tokens** in the **Tenant Settings Menu**. - - -5. Click **Add New Registration Token**. - - - -6. Fill out the input fields for and **Confirm** your changes. - -
- - - **Registration Token Name** - Used to name the token. - - - **Description** - An optional value used to explain the context usage of the token. - - - **Default Project** - Set a default project for Edge host registration. - - - **Expiration Date** - Set an expiration date for the token. - - - - -7. Save the **Token** value. - - -# Validate - - -1. Log into [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Switch to the tenant scope. - - -3. Navigate to the left **Main Menu** and select **Settings**. - - -4. Select **Registration Tokens** in the **Tenant Settings Menu**. - - -5. Validate the tenant registration token is available - -
- -# Next Steps - -The next stage in the Edge host site installation process is registering the Edge host. Go ahead and review the instructions in the [Register Edge Host](/clusters/edge/site-deployment/site-installation/edge-host-registration) guide. \ No newline at end of file diff --git a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/06-edge-host-registration.md b/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/06-edge-host-registration.md deleted file mode 100644 index 7af268aa67..0000000000 --- a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/06-edge-host-registration.md +++ /dev/null @@ -1,314 +0,0 @@ ---- -title: "Register Edge Host" -metaTitle: "Register your edge hosts with the Palette Management Console" -metaDescription: "Learn how to register your edge hosts with the Palette Management Console" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -To use an Edge host with a host cluster, you must first register it with Palette. A registration token in the user data is required to complete the registration process. You have three options to register the Edge host with Palette. - -| Method | Description | Set up Effort | -|---|---|---| -| Auto Registration | Edge hosts can automatically register with Palette by using a *Registration Token*. This method requires you to specify the registration token in the user data. | Low | -| Manual Registration | You can manually enter a unique Edge host ID into Palette. | Low | -| QR Code | Scan a QR code that takes you to a web application that registers the Edge host with Palette. This method is considered advanced with the benefit of simplifying the Edge host registration without needing a tenant token or a manual entry.| High | - - - - -A registration token is required for the Edge host registration process. Without the registration token, the registration process will be unable to complete. Review the [Create Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide for steps on how to create a tenant registration token. - - - - - -# Registration Method - -To register the Edge host, you are required to use a registration token with all three registration options. Edge hosts are registered under the default project chosen for the registration token. You can override the default project by specifying the project in the Edge Installer [user data](/clusters/edge/edge-configuration/installer-reference) configuration file. - - -By default, devices automatically register during the site installation process when a tenant registration token value is present. Set the parameter `disableAutoRegister` to `true` in the Edge Installer configuration to disable auto registration and require manual device registration. - - -
- - -```yaml -stylus: - site: - edgeHostToken: MjBhNTQxYzZjZTViNGFhM2RlYTU3ZXXXXXXXXXX - disableAutoRegister: true -``` - - -
- - - -Select the registration method that best fits your organizational needs and review the steps to get started. - - - -
- - - - - -## Auto Registration - -You can automate the registration process by using registration tokens. - -If you selected a default project for the registration token, that is the project the Edge host will be registered under. You can override the default project by providing a project name in the user data. - - -
- -```yaml -stylus: - site: - paletteEndpoint: api.spectrocloud.com - edgeHostToken: yourEdgeRegistrationTokenHere -``` - -## Prerequisites - -- Tenant admin access. - - -- A tenant registration token is required. Refer to the [Create a Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide for more information. -## Create Registration Token - -To create a registration token, use the following steps. - -
- -1. Log into [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Switch to the tenant scope. - - -3. Navigate to the left **Main Menu** and select **Settings**. - - -4. Select **Registration Tokens** in the **Tenant Settings Menu**. - - - -5. Click **Add New Registration Token**. - - - -6. Fill out the input fields and **Confirm** your changes. - - -7. Save the **Token** value. - - -Your next step is to decide how you want to provide the registration token. You can include the registration token in the user data added to the device before shipping. Or you can create a user data ISO and have the registration token in the secondary user data. Check out the [Apply Site User Data](/clusters/edge/site-deployment/site-installation/site-user-data/) resource to learn more about creating site-specific user data. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the **Edge Hosts** tab. - - -Your Edge host is displayed and marked as **Registered** in the Edge hosts list. - - - -
- - - - -## Manual Registration - -In this mode, you must manually register the Edge host in Palette by providing the Edge host's unique identifier. Optionally, you can specify a project name to associate the Edge host with a particular project. - - -Use the following steps to manually register an Edge host in Palette. - -## Prerequisites - -- Tenant admin access - - -- A tenant registration token is required. Refer to the [Create a Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide for more information. - - -- Access to the Edge host's unique identifier. You can get the unique identifier or machine ID from the console output as the Edge host powers on. The Edge host unique identifier has the default prefix `edge-`. - - Example Output: - ```shell - time="2022-11-03T11:30:10Z" level=info Msg="starting stylus reset plugin" - time="2022-11-03T11:30:102" level=info Msg="reset cached site name from uuid, cached: edge-03163342f7f0e6fe20de095ed8548c93" - time="2022-11-03T11:30:102" level=info Msg="reset cached site name from unid, new: edge-9e8e3342bafa9eb6d45f81c1f6714ea2" MachineD: edge-9eBe3342bafaeb6d45f81c1fb714ea2 - time="2022-11-03T11:30:192" level=info Msg="MachineIP: 10.239.10.145" - ``` - - - - You can also specify an Edge host's unique identifier in the user data by using the `stylus.site.Name` parameter. Refer to the [Installer Configuration](/clusters/edge/edge-configuration/installer-reference) resource to learn more about available configuration parameters. - - - -## Register the Edge Host in Palette - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the **Edge Hosts** tab. - - -4. Click on **Add Edge Hosts**. - - -5. Paste the Edge host's unique identifier in the **Edge Host IDs** input box. - - -6. Specify any tags or pairing keys if you desire. - - -7. Confirm your changes to register the Edge host. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the **Edge Hosts** tab. - - -Your Edge host is displayed and marked as **Registered** in the Edge hosts list. - - - - - - - -## QR Code Registration - -You can provide a QR case-based automated registration to simplify the registration process. Upon boot up, a QR code is displayed on the Edge host's console if enabled during the installation phase. - -Site operators scan the QR code to visit the registration page. This web page pre-populates the Edge host's unique ID in the web app and provides a list of edge sites they can associate with this edge host. - -Site operators can select a site and submit a registration request. The web application automatically creates the Edge host entry in Palette and defines a cluster with that Edge host. This workflow also supports adding Edge hosts to an existing host cluster. - -
- - - -## Prerequisites - -- A tenant registration token is required. Refer to the [Create a Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide for more information. - - -- Access to the Spectro Cloud GitHub repository that hosts the Palette Edge Registration App. Contact our sales team at [sales@spectrocloud.com](mailto:sales@spectrocloud.com) to gain access. - - -- Sufficient permissions to enable third-party integrations with a GitHub repository. - - -- A [Vercel](https://vercel.com/) account or a similar serverless website hosting service. - - -- Experience deploying and maintaining web applications to serverless website hosting services. - - -- git v2.39.0 or greater. - -## Enable Palette Edge Registration App - - -We provide you with a sample serverless application, the Palette Edge Registration App. The Palette Edge Registration App is built on Next.js and deployed using the Vercel platform. - -Use the following steps to enable this workflow. - -
- - -1. Clone the repository. - - -2. Configure Vercel or your hosting provider to [automatically deploy](https://vercel.com/docs/concepts/deployments/git) pull requests against the main branch. - - -3. Update the sample site provided with your site names and locations. Make the required changes in the **pages/index.js** file. The **readme** file provides additional details about the files to be changed and instructions on how to build and test the application locally. - - -4. Map the infrastructure and add-on cluster profiles to be used with each site. Refer to the [Model Edge Native Cluster Profile](/clusters/edge/site-deployment/model-profile) to learn more about Edge Native cluster profiles. - - -5. Specify site-specific Virtual IP (VIP) addresses or DNS values for each site. - - -6. Compile and test the code locally. - - -7. Create GitHub pull request towards your main branch to automatically trigger the build process and deploy the app. - - -8. Provide the URL of the deployed app to the Edge Installer user data. Use the `stylus.site.registrationURL` parameter. - -
- - ```yaml - stylus: - site: - paletteEndpoint: api.spectrocloud.com - registrationURL: https://edge-registration-url.vercel.app - ``` - -9. Your next step is to decide how you want to provide the registration URL value. You can include the registration URL in the user data added to the device before shipping. Or you can create a user data ISO and have the registration URL in the secondary user data. Check out the [Perform Site Install](/clusters/edge/site-deployment/site-installation/site-user-data/) to learn more about creating site specific user data. - - -10. Power on the Edge host device and scan the QR code. - - -11. Fill out the required information in the web application and submit the registration request. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the **Edge Hosts** tab. - - -Your Edge host is displayed and marked as **Registered** in the Edge hosts list. - - - - -
- -
- -# Next Steps - -The next step in the installation process is to add the Edge host to a cluster or to create an Edge Native host cluster. Check out the [Create Cluster Definition](/clusters/edge/site-deployment/site-installation/cluster-deployment) guide to complete the last step of the installation process. diff --git a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/07-cluster-deployment.md b/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/07-cluster-deployment.md deleted file mode 100644 index a72f5eb84a..0000000000 --- a/content/docs/04-clusters/03-edge/09-site-deployment/05-site-installation/07-cluster-deployment.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: "Create Cluster Definition" -metaTitle: "Create Cluster Definition" -metaDescription: "Define your Edge cluster using the Edge hosts that are registered and available." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -To complete the Edge Installation process, an Edge host must become a member of a host cluster. You can add an Edge host to an existing host cluster of type Edge Native, or you can create a new host cluster for Edge hosts and make the Edge host a member. - - -
- - - - - - -Use the following steps to create a new host cluster so that you can add Edge hosts to the node pools. - -## Prerequisites - -- A registered Edge host. - -## Create Cluster - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Click on **Add New Cluster**. - - -4. Choose **Edge Native** for the cluster type and click **Start Edge Native Configuration**. - - -5. Give the cluster a name, description, and tags. Click on **Next**. - - -6. Select a cluster profile. If you don't have a cluster profile for Edge Native, refer to the [Create Edge Native Cluster Profile](/clusters/edge/site-deployment/model-profile#createedgenativeclusterprofile) guide. Click on **Next** after you have selected a cluster profile. - -7. Review your cluster profile values and make changes as needed. Click on **Next**. - - -8. Provide the host cluster with the Virtual IP (VIP) address used by the physical site. You can also select any SSH keys in case you need to remote into the host cluster. You can also provide a list of Network Time Protocol (NTP) servers. Click on **Next**. - - -9. The node configuration page is where you can specify what Edge hosts make up the host cluster. Assign Edge hosts to the **master-pool** and the **worker-pool**. When you have completed configuring the node pools, click on **Next**. - - -10. The Settings page is where you can configure a patching schedule, security scans, backup settings, and set up Role-Based Access Control (RBAC). Review the settings and make changes if needed. Click on **Validate**. - - -11. Review the settings summary and click on **Finish Configuration** to deploy the cluster. - -After you create the cluster, the Palette Edge Host agent will start the installation process. You can track the installation progress in Palette. The cluster overview page displays a summary of the progress. Use the *Events* tab to review detailed logs. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the host cluster you created to view its details page. - - -4. Review the **Cluster Status**. Ensure the **Cluster Status** field displays **Running**. - -You can also use the command `kubectl get nodes` to review the status of all nodes in the cluster. Check out the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl#overview) guide to learn how to use `kubectl` with a host cluster. - - - - - - -You can add Edge hosts to the node pool of an existing host cluster. Use the following steps to add the Edge host to the node pool. - -## Prerequisites - -- A registered Edge host. - -- A host cluster of type Edge Native. - - - -When adding a new Edge host to an existing cluster, ensure you are not creating a scenario where [etcd](https://etcd.io/) could fail in establishing a quorum. Quorum failures typically result when there is an even number of nodes. -To learn more, check out the resource from the etcd documentation titled [Why an odd number of cluster members](https://etcd.io/docs/v3.3/faq/#why-an-odd-number-of-cluster-members). - - - -## Add Edge Host to Node Pool - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Use the **Cloud Types drop-down Menu** and select **Edge Native**. - - -4. Select the host cluster to add the registered Edge host. - - -5. Click on the **Nodes** tab. - - -6. Select the node pool to add the Edge host and click the **Edit** button. - - -7. Navigate to the **Edge Hosts drop-down Menu** and select your Edge host. - - -8. Confirm your changes. - -The Palette Edge Host agent will start the installation process. You can track the installation progress in Palette. The cluster overview page displays a summary of the progress. Use the **Events** tab to review detailed logs. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the host cluster you created to view its details page. - - -4. Review the **Cluster Status**. Ensure the **Cluster Status** field displays **Running**. - -You can also use the command `kubectl get nodes` to review the status of all nodes in the cluster. Check out the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl#overview) to learn how to use `kubectl` with a host cluster. - - - - - - - -
- - diff --git a/content/docs/04-clusters/03-edge/09-site-deployment/06-deploy-cluster.md b/content/docs/04-clusters/03-edge/09-site-deployment/06-deploy-cluster.md deleted file mode 100644 index 83d4f28f05..0000000000 --- a/content/docs/04-clusters/03-edge/09-site-deployment/06-deploy-cluster.md +++ /dev/null @@ -1,1007 +0,0 @@ ---- -title: "Deploy an Edge Cluster on VMware" -metaTitle: "Deploy an Edge Cluster on VMware" -metaDescription: "Learn how to deploy an Edge host using VMware as the deployment platform. You will learn how to use the Edge Installer ISO, create a cluster profile, and deploy a Kubernetes cluster to the Edge host on VMware." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from "shared/components/common/PointOfInterest"; - -# Deploy Edge Cluster - -Palette supports deploying Kubernetes clusters in remote locations to support edge computing workloads. Palette's Edge solution enables you to deploy your edge devices, also called Edge hosts, which contain all the required software dependencies to support Palette-managed Kubernetes cluster deployment. - -Maintaining consistency while preparing edge devices at scale can be challenging for operation teams. For example, imagine you are an IT administrator for a retail company that has decided to expand to 1000 new stores this year. The company needs you to deploy Kubernetes clusters in each new store using edge devices, such as Intel NUC, and ensure each device has the same software and security configurations. Your job is to prepare each device so the development team can deploy Kubernetes clusters on each device. You have decided to use Palette's Edge solution to help you meet the organizational requirements. You will prepare a small set of Edge devices and deploy a Kubernetes cluster to verify readiness for consistent deployment across all physical sites. - -The following points summarize the primary stages of Edge cluster deployment to a production environment: - -- Create Edge artifacts such as the Edge Installer ISO, provider images, and content bundles. - -- Initialize the Edge device with the Edge installer ISO. The ISO includes a base Operating System (OS) and other configurations such as networking, proxy, security, tooling, and user privileges. - -- Create a cluster profile to ensure consistency in all the Edge hosts. The cluster profile lets you declare the desired software dependencies for each Kubernetes cluster. - - -Following the primary stages outlined above, this tutorial will guide you to build the Edge artifacts (Edge installer ISO image and provider images) and use the Edge installer ISO image to prepare Edge hosts. Next, you will use the provider image to create a cluster profile and then deploy a cluster on those Edge hosts. You will use VMware to deploy the Edge hosts to simulate a bare metal environment. - - -For learning purposes, you will set up Virtual Machines (VMs) as Edge hosts and deploy a cluster on Edge host VMs. VMs provide a more accessible Edge learning experience, as you do not require connecting to physical Edge devices. The diagram below shows the main steps to prepare Edge hosts and deploy a cluster. - - -![An overarching diagram showing the tutorial workflow.](/tutorials/edge/clusters_edge_deploy-cluster_overarching.png) - - -# Prerequisites - -To complete this tutorial, you will need the following: -
- -* Access to a VMware vCenter environment where you will provision VMs as Edge hosts. You will need the server URL, login credentials, and names of the data center, data store, resource pool, folder, cluster, and DHCP-enabled network. - - -* The VMs you will prepare as Edge hosts must be attached to a DHCP-enabled network. To ensure DHCP is enabled on the network, review the network settings on your ESXi Host. -You can refer to the [Prepare the DHCP Server for vSphere](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.esxi.install.doc/GUID-9D8333F5-5F5B-4658-8166-119B44895098.html) guide from VMware to configure a DHCP server on the network. - - -* A physical or virtual Linux machine with *AMD64* (also known as *x86_64*) processor architecture to build the Edge artifacts. You can issue the following command in the terminal to check your processor architecture. -
- - ```bash - uname -m - ``` -
- - - - The Linux machine must have network connectivity to your VMware vCenter environment. - - - -* The following minimum hardware configuration: - - 4 CPU - - 8 GB memory - - 50 GB storage - - -* [Git](https://cli.github.com/manual/installation). Ensure git installation by issuing the `git --version` command. - - -* [Docker Engine](https://docs.docker.com/engine/install/) version 18.09.x or later. You can use the `docker --version` command to view the existing Docker version. You should have root-level or `sudo` privileges on your Linux machine to create privileged containers. - - -* A [Spectro Cloud](https://console.spectrocloud.com) account. If you have not signed up, you can sign up for a [free trial](https://www.spectrocloud.com/free-tier/). - - -* A Palette registration token for pairing Edge hosts with Palette. You will need tenant admin access to Palette to generate a new registration token. For detailed instructions, refer to the [Create Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide. Copy the newly created token to a clipboard or notepad file to use later in this tutorial. - - The screenshot below shows a sample registration token in the **Tenant Settings** > **Registration Tokens** section in Palette. - - ![A screenshot of a registration token in Palette](/tutorials/edge/clusters_edge_deploy-cluster_registration-token.png) - - -# Build Edge Artifacts - -In this section, you will use the [CanvOS](https://github.com/spectrocloud/CanvOS/blob/main/README.md) utility to build an Edge installer ISO image and provider images for all the Palette-supported Kubernetes versions. The utility builds multiple provider images, so you can use either one that matches the desired Kubernetes version you want to use with your cluster profile. - -This tutorial builds and uses the provider image compatible with K3s v1.25.2. -
- -## Check Out Starter Code - -Issue the following and subsequent command-line instructions on your Linux machine, which this tutorial refers to as the development environment. - -Clone the [CanvOS](https://github.com/spectrocloud/CanvOS) GitHub repository containing the starter code to build Edge artifacts. -
- -```bash -git clone https://github.com/spectrocloud/CanvOS.git -``` - -Change to the **CanvOS** directory. -
- -```bash -cd CanvOS -``` - -View the available [git tag](https://github.com/spectrocloud/CanvOS/tags). -
- -```bash -git tag -``` - -Check out the newest available tag. This guide uses **v3.4.3** tag as an example. -
- -```shell -git checkout v3.4.3 -``` -
- -## Define Arguments - -CanvOS requires arguments such as image tag, registry, repository, and OS distribution. The arguments are defined in the **.arg** file. In this step, you will create the **.arg** file and define all the required arguments. - - -Issue the command below to assign an image tag value for the provider images. This guide uses the default value `demo` as an example. However, you can assign any lowercase and alphanumeric string to the `CUSTOM_TAG` variable. -
- -```bash -export CUSTOM_TAG=demo -``` -
- -Issue the command below to create the **.arg** file with the custom tag. The remaining arguments will use the default values. For example, `ubuntu` is the default operating system, `demo` is the default tag, and [ttl.sh](https://ttl.sh/) is the default image registry. The default ttl.sh image registry is free and does not require a sign-up. Images pushed to ttl.sh are ephemeral and will expire after the 24 hrs time limit. - -Using the arguments defined in the **.arg** file, the final provider images you generate will have the following naming convention, `[IMAGE_REGISTRY]/[IMAGE_REPO]:[CUSTOM_TAG]`. In this example, the provider images will be `ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-demo`. Refer to the **.arg.template** sample file in the current directory or the [README](https://github.com/spectrocloud/CanvOS#readme) to learn more about the default values. -
- -```bash -cat << EOF > .arg -CUSTOM_TAG=$CUSTOM_TAG -IMAGE_REGISTRY=ttl.sh -OS_DISTRIBUTION=ubuntu -IMAGE_REPO=ubuntu -OS_VERSION=22 -K8S_DISTRIBUTION=k3s -ISO_NAME=palette-edge-installer -PE_VERSION=$(git describe --abbrev=0 --tags) -platform=linux/amd64 -EOF -``` - -View the newly created file to ensure the arguments are defined per your requirements. -
- -```bash -cat .arg -``` -
- - -Refer to the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide to learn more about customizing arguments. -
- -## Create User Data - -Next, you will create a **user-data** file that embeds the tenant registration token and Edge host's login credentials in the Edge Installer ISO image. - - -Issue the command below to save your tenant registration token to a local variable. Replace `[your_token_here]` placeholder with your actual registration token. -
- -```bash -export token=[your_token_here] -``` - - -Use the following command to create the **user-data** file containing the tenant registration token. You can click on the *Points of Interest* numbers below to learn more about the main attributes relevant to this example. -
- - - - -```shell -cat << EOF > user-data -#cloud-config -stylus: - site: - edgeHostToken: $token -install: - poweroff: true -users: - - name: kairos - passwd: kairos -EOF -``` - - - -Review the newly created user data file. -
- -```bash -cat user-data -``` -The expected output should show that the `edgeHostToken` and login credentials for Edge hosts are set correctly. The `edgeHostToken` value must match your Palette registration token. Otherwise, your Edge hosts will not register themselves with Palette automatically. Below is a sample output with a dummy token value. -
- -```bash hideClipboard -#cloud-config -stylus: - site: - edgeHostToken: 62ElvdMeX5MdOESgTleBjjKQg8YkaIN3 -install: - poweroff: true -users: - - name: kairos - passwd: kairos -``` - -
- -## Build Artifacts - -The CanvOS utility uses [Earthly](https://earthly.dev/) to build the target artifacts. Issue the following command to start the build process. -
- -```bash -sudo ./earthly.sh +build-all-images -``` - -```bash coloredLines=2-2 hideClipboard -# Output condensed for readability -===================== Earthly Build SUCCESS ===================== -Share your logs with an Earthly account (experimental)! Register for one at https://ci.earthly.dev. -``` - -This command may take 15-20 minutes to finish depending on the hardware resources of the host machine. Upon completion, the command will display the manifest, as shown in the example below, that you will use in your cluster profile later in this tutorial. Note that the `system.xxxxx` attribute values in the manifest example are the same as what you defined earlier in the **.arg** file. - -Copy and save the output attributes in a notepad or clipboard to use later in your cluster profile. -
- -```bash -pack: - content: - images: - - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" -options: - system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" - system.registry: ttl.sh - system.repo: ubuntu - system.k8sDistribution: k3s - system.osName: ubuntu - system.peVersion: v3.4.3 - system.customTag: demo - system.osVersion: 22 -``` -
- -## View Artifacts - -After completing the build process, list the edge installer ISO image and checksum by issuing the following command from the **CanvOS** directory. -
- -```bash -ls build/ -``` - -```bash hideClipboard -# Output -palette-edge-installer.iso -palette-edge-installer.iso.sha256 -``` - - -Export the path to the ISO file, the **build** directory, in the `ISOFILEPATH` local variable. Later in the tutorial, you will use this local variable to mount the **build** directory to a Docker container. -
- -```bash -export ISOFILEPATH=$PWD/build -echo $ISOFILEPATH -``` - - -List the Docker images to review the created provider images. By default, provider images are created for all the Palette-supported Kubernetes versions. You can identify the provider images by the image tag value you used in the **.arg** file's `CUSTOM_TAG` variable. -
- -```shell -docker images --filter=reference='*/*:*demo' -``` - -```bash coloredLines=3-4 hideClipboard -# Output -REPOSITORY TAG IMAGE ID CREATED SIZE -ttl.sh/ubuntu k3s-1.24.6-v3.4.3-demo 3a672a023bd3 45 minutes ago 4.61GB -ttl.sh/ubuntu k3s-1.25.2-v3.4.3-demo 0217de3b9e7c 45 minutes ago 4.61GB -``` -
- -## Push Provider Images - -Push the provider images to the image registry indicated in the **.arg** file so that you can reference the provider image later in your cluster profile. - -Since we used the provider image compatible with K3s v1.25 in the cluster profile, you would use the following command to push the provider image compatible with K3s v1.25 to the image registry. If you want to use the other provider image compatible with K3s v1.24 instead, push that version to the image registry. The example below and default behavior uses the [ttl.sh](https://ttl.sh/) image registry. This image registry is free and does not require you to sign up to use it. Images pushed to ttl.sh are ephemeral and will expire after 24 hours. -
- -```bash -docker push ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-demo -``` - - - -As a reminder, [ttl.sh](https://ttl.sh/) is a short-lived image registry. If you do not use these provider images in your cluster profile within 24 hours of pushing to *ttl.sh*, they will expire and must be re-pushed. If you want to use a different image registry, refer to the Advanced workflow in the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide to learn how to use another registry. - - -
- - -# Provision Virtual Machines - -In this section, you will create a VM template in VMware vCenter from the Edge installer ISO image and clone that VM template to provision three VMs. Think of a VM template as a snapshot that can be used to provision new VMs. You cannot modify templates after you create them, so cloning the VM template will ensure all VMs have *consistent* guest OS, dependencies, and user data configurations installed. - -This tutorial example will use [Packer](https://www.packer.io/) to create a VM template from the Edge installer ISO image. Later, it will use [GOVC](https://github.com/vmware/govmomi/tree/main/govc#govc) to clone the VM template to provision three VMs. You do not have to install Packer or GOVC in your Linux development environment. You will use our official tutorials container that already contains the required tools.
- -## Create a VM Template - -You will use the **heredoc** script to create a VM template. The script prompts you to enter your VMWare vCenter environment details and saves them as environment variables in a file named **.packerenv**. Packer reads the environment variables during the build process. - -Before you invoke the **heredoc** script, have values handy in a notepad for the VMWare vCenter environment variables listed in the table. -
- -|**Variable**|**Description**| **How to find its value?**| -|---|---|---| -| `PKR_VAR_vcenter_server` | vCenter server URL |Check with your VMware data center administrator. Omit `http://` or `https://` in the URL. Example, use `vcenter.spectrocloud.dev`. | -|`PKR_VAR_vcenter_username`| vSphere client username |Request credentials from your VMware data center administrator. Example: `myusername@vsphere.local`| -|`PKR_VAR_vcenter_password`|vSphere client password|--| -|`PKR_VAR_vcenter_datacenter`|Data center name |Expand your vSphere client's main menu and select **Inventory** > **Hosts and Clusters**. The data center name is displayed in the left navigation tree.| -|`PKR_VAR_vcenter_cluster`|Cluster name | Expand the data center inventory to view the cluster name in the left navigation tree. | -|`PKR_VAR_vcenter_resource_pool`|Resource pool name | Expand the cluster inventory to view the resource pool name. | -|`PKR_VAR_vcenter_folder`|Folder name | Switch to the **VMs and Templates** view in your vSphere client. The folder name is displayed in the left navigation tree.| -|`PKR_VAR_vcenter_datastore`|Datastore name | Switch to the **Storage** view in your vSphere client. The datastore name is displayed in the left navigation tree.| -|`PKR_VAR_vcenter_network`| Network name | Switch to the **Networking** view in your vSphere client. The network name is displayed in the left navigation tree.| - - -Use the **heredoc** script to create the **.packerenv** file shown below that contains the VMware vCenter details as environment variables. -
- -```bash -cat << EOF > .packerenv -PKR_VAR_vcenter_server=$(read -ep 'Enter vCenter Server URL without http:// or https://, for example: vcenter.spectrocloud.dev ' vcenter_server && echo $vcenter_server) -PKR_VAR_vcenter_username=$(read -ep 'Enter vCenter Username value: ' vcenter_username && echo $vcenter_username) -PKR_VAR_vcenter_password=$(read -ep 'Enter vCenter Password value: ' vcenter_password && echo $vcenter_password) -PKR_VAR_vcenter_datacenter=$(read -ep 'Enter vCenter Datacenter name: ' vcenter_datacenter && echo $vcenter_datacenter) -PKR_VAR_vcenter_cluster=$(read -ep 'Enter vCenter Cluster name: ' vcenter_cluster && echo $vcenter_cluster) -PKR_VAR_vcenter_resource_pool=$(read -ep 'Enter vCenter Resource Pool name: ' vcenter_resource_pool && echo $vcenter_resource_pool) -PKR_VAR_vcenter_folder=$(read -ep 'Enter vCenter Folder name: ' vcenter_folder && echo $vcenter_folder) -PKR_VAR_vcenter_datastore=$(read -ep 'Enter vCenter Datastore name: ' vcenter_datastore && echo $vcenter_datastore) -PKR_VAR_vcenter_network=$(read -ep 'Enter vCenter Network name: ' vcenter_network && echo $vcenter_network) -EOF -``` -View the file to ensure you have filled in the details correctly. -
- -```bash -cat .packerenv -``` - -You will use the **.packerenv** file later in the tutorial when you start Packer. - -Next, verify the `ISOFILEPATH` local variable has the path to the ISO file. The `docker run` command uses this variable to bind mount the host's **build** directory to the container. -
- -```bash -echo $ISOFILEPATH -``` - - - -The environment variable you set using `export [var-name]=[var-value]` will not persist across terminal sessions. If you opened a new terminal session in your development environment, you will lose the `ISOFILEPATH` variable and will need to reset it. - - -
- -The next step is to use the following `docker run` command to trigger Packer build process to create a VM template. Here is an explanation of the options and sub-command used below: -
- -- The `--env-file` option reads the **.packerenv** file. - - -- The `--volume ` option mounts a local directory to our official tutorials container, `ghcr.io/spectrocloud/tutorials:1.0.7`. - - -- The `sh -c "cd edge/vmware/packer/ && packer build -force --var-file=vsphere.hcl build.pkr.hcl` shell sub-command changes to the container's **edge/vmware/packer/** directory and invokes `packer build` to create the VM template. The `packer build` command has the following options: - - - The `-force` flag destroys any existing template. - - The `--var-file` option reads the **vsphere.hcl** file from the container. This file contains the VM template name, VM configuration, and ISO file name to use. The VM configuration conforms to the [minimum device requirements](https://docs.spectrocloud.com/clusters/edge/architecture/#minimumdevicerequirements). - -The **vsphere.hcl** file content is shown below for your reference. This tutorial does not require you to modify these configurations. -
- -```bash hideClipboard -# VM Template Name -vm_name = "palette-edge-template" -# VM Settings -vm_guest_os_type = "ubuntu64Guest" -vm_version = 14 -vm_firmware = "bios" -vm_cdrom_type = "sata" -vm_cpu_sockets = 4 -vm_cpu_cores = 1 -vm_mem_size = 8192 -vm_disk_size = 51200 -thin_provision = true -disk_eagerly_scrub = false -vm_disk_controller_type = ["pvscsi"] -vm_network_card = "vmxnet3" -vm_boot_wait = "5s" -# ISO Objects -iso = "build/palette-edge-installer.iso" -iso_checksum = "build/palette-edge-installer.iso.sha256" -``` -
- - - -Should you need to change the VM template name or VM settings defined in the **vsphere.hcl** file, or review the Packer script, you must open a bash session into the container using the `docker run -it --env-file .packerenv --volume "${ISOFILEPATH}:/edge/vmware/packer/build" ghcr.io/spectrocloud/tutorials:1.0.7 bash` command, and change to the **edge/vmware/packer/** directory to make the modifications. After you finish the modifications, issue the `packer build -force --var-file=vsphere.hcl build.pkr.hcl` command to trigger the Packer build process. - - -
- -Issue the following command to trigger the Packer build process to create a VM template in the VMware vCenter. It will also upload and keep a copy of the **palette-edge-installer.iso** to the **packer_cache/** directory in the specified datastore. - -
- -```bash -docker run --interactive --tty --rm \ - --env-file .packerenv \ - --volume "${ISOFILEPATH}:/edge/vmware/packer/build" \ - ghcr.io/spectrocloud/tutorials:1.0.7 \ - sh -c "cd edge/vmware/packer/ && packer build -force --var-file=vsphere.hcl build.pkr.hcl" -``` - -Depending on your machine and network, the build process can take 7-10 minutes to finish. -
- -```bash coloredLines=10-11 hideClipboard -# Sample output -==> vsphere-iso.edge-template: Power on VM... - vsphere-iso.edge-template: Please shutdown virtual machine within 10m0s. -==> vsphere-iso.edge-template: Deleting Floppy drives... -==> vsphere-iso.edge-template: Eject CD-ROM drives... -==> vsphere-iso.edge-template: Deleting CD-ROM drives... -==> vsphere-iso.edge-template: Convert VM into template... -Build 'vsphere-iso.edge-template' finished after 7 minutes 13 seconds. -==> Wait completed after 7 minutes 13 seconds -==> Builds finished. The artifacts of successful builds are: ---> vsphere-iso.edge-template: palette-edge-template -``` -
- - -## Provision VMs - -Once Packer creates the VM template, you can use the template when provisioning VMs. In the next steps, you will use the [GOVC](https://github.com/vmware/govmomi/tree/main/govc#govc) tool to deploy a VM and reference the VM template that Packer created. Remember that the VM instances you are deploying simulate bare metal devices. - - -GOVC requires the same VMware vCenter details as the environment variables you defined earlier in the **.packerenv** file. Use the following command to source the **.packerenv** file and echo one of the variables to ensure the variables are accessible on your host machine. -
- -```bash -source .packerenv -echo $PKR_VAR_vcenter_server -``` - -Use the following command to create a **.goenv** environment file. The **.goenv** file contains the VMware vCenter credentials and information required to deploy VMs in your VMware environment. -
- -```bash -cat << EOF > .goenv -vcenter_server=$PKR_VAR_vcenter_server -vcenter_username=$PKR_VAR_vcenter_username -vcenter_password=$PKR_VAR_vcenter_password -vcenter_datacenter=$PKR_VAR_vcenter_datacenter -vcenter_datastore=$PKR_VAR_vcenter_datastore -vcenter_resource_pool=$PKR_VAR_vcenter_resource_pool -vcenter_folder=$PKR_VAR_vcenter_folder -vcenter_cluster=$PKR_VAR_vcenter_cluster -vcenter_network=$PKR_VAR_vcenter_network -EOF -``` -View the file to ensure variable values are set correctly. -
- -```bash -cat .goenv -``` - - -The next step is to use the following `docker run` command to clone the VM template and provision three VMs. Here is an explanation of the options and sub-command used below: -
- -- The `--env-file` option reads the **.goenv** file in our official `ghcr.io/spectrocloud/tutorials:1.0.7` tutorials container. - - -- The `sh -c "cd edge/vmware/clone_vm_template/ && ./deploy-edge-host.sh"` shell sub-command changes to the container's **edge/vmware/clone_vm_template/** directory and invokes the **deploy-edge-host.sh** shell script. - - -The **edge/vmware/clone_vm_template/** directory in the container has the following files: -
- -- **deploy-edge-host.sh** - Provisions the VMs. - - -- **delete-edge-host.sh** - Deletes the VMs. - - -- **setenv.sh** - Defines the GOVC environment variables, the number of VMs, a prefix string for the VM name, and the VM template name. Most of the GOVC environment variables refer to the variables you have defined in the **.goenv** file. - - -Below is the **setenv.sh** file content for your reference. This tutorial does not require you to modify these configurations. -
- -```bash hideClipboard -#!/bin/bash -# Number of VMs to provision -export NO_OF_VMS=3 -export VM_PREFIX="demo" -export INSTALLER_TEMPLATE="palette-edge-template" - -#### DO NOT MODIFY BELOW HERE #################### -# GOVC Properties -export GOVC_URL="https://${vcenter_server}" # Use HTTPS. For example, https://vcenter.company.com -export GOVC_USERNAME="${vcenter_username}" -export GOVC_PASSWORD="${vcenter_password}" -export GOVC_INSECURE=1 #1 if insecure -export GOVC_DATACENTER="${vcenter_datacenter}" -export GOVC_DATASTORE="${vcenter_datastore}" -export GOVC_NETWORK="${vcenter_network}" -export GOVC_RESOURCE_POOL="${vcenter_resource_pool}" -export GOVC_FOLDER="${vcenter_folder}" -``` -
- - - -Suppose you have changed the VM template name in the previous step or need to change the number of VMs to provision. In that case, you must modify the **setenv.sh** script. To do so, you can reuse the container bash session from the previous step if it is still active, or you can open another bash session into the container using the `docker run -it --env-file .goenv ghcr.io/spectrocloud/tutorials:1.0.7 bash` command. If you use an existing container bash session, create the **.goenv** file described above and source it in your container environment. Next, change to the **edge/vmware/clone_vm_template/** directory to modify the **setenv.sh** script, and issue the `./deploy-edge-host.sh` command to deploy the VMs. - - -
- -Issue the following command to clone the VM template and provision three VMs. -
- -```bash -docker run -it --rm \ - --env-file .goenv \ - ghcr.io/spectrocloud/tutorials:1.0.7 \ - sh -c "cd edge/vmware/clone_vm_template/ && ./deploy-edge-host.sh" -``` - -The cloning process can take 3-4 minutes to finish and displays output similar to that shown below. The output displays the Edge host ID for each VM, as highlighted in the sample output below. VMs use this host ID to auto-register themselves with Palette. -
- -```bash coloredLines=7-7 hideClipboard -# Sample output for one VM -Cloning /Datacenter/vm/sp-sudhanshu/palette-edge-template to demo-1...OK -Cloned VM demo-1 -Powering on VM demo-1 -Powering on VirtualMachine:vm-13436... OK -Getting UUID demo-1 -Edge Host ID VM demo-1 : edge-97f2384233b498f6aa8dec90c3437c28 -``` - -For each of the three VMs, copy the Edge host ID. An Edge host ID looks similar to `edge-97f2384233b498f6aa8dec90c3437c28`. -
- - - -You must copy the Edge host IDs for future reference. In addition, if auto registration fails you will need the Edge host IDs to manually register Edge hosts in Palette. - - - - -# Verify Host Registration - -Before deploying a cluster, you must verify Edge host registration status in Palette. - -Open a web browser and log in to [Palette](https://console.spectrocloud.com). Navigate to the left **Main Menu** and select **Clusters**. Click on the **Edge Hosts** tab and verify the three VMs you created are registered with Palette. - -![A screenshot showing the VMs automatically registered with Palette. ](/tutorials/edge/clusters_edge_deploy-cluster_edge-hosts.png) - - -If the three Edge hosts are not displayed in the **Edge hosts** tab, the automatic registration failed. If this happens, you can manually register hosts by clicking the **Add Edge Hosts** button and pasting the Edge host ID. Repeat this host registration process for each of the three VMs. -If you need help, the detailed instructions are available in the [Register Edge Host](/clusters/edge/site-deployment/site-installation/edge-host-registration) guide. -
- -# Deploy a Cluster - -Once you verify the host registration, the next step is to deploy a cluster. In this section, you will use the Palette User Interface (UI) to deploy a cluster that is made up of the three Edge hosts you deployed. -
- -## Create a Cluster Profile - -Validate you are in the **Default** project scope before creating a cluster profile. -
- -![A screenshot of Palette's Default scope selected.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_default-scope.png) - -
- - - -Next, create a cluster profile with the core infrastructure layers and a manifest of a sample application, [Hello Universe](https://github.com/spectrocloud/hello-universe#hello-universe). -Navigate to the left **Main Menu** and select **Profiles**. Click on the **Add Cluster Profile** button, and fill out the required input fields. The cluster profile wizard contains the following sections. -
- -### Basic Information - -Use the following values when filling out the **Basic Information** section. - -|**Field**|**Value**| -|---|---| -|Name|docs-ubuntu-k3s| -|Version|`1.0.0`| -|Description|Cluster profile as part of the edge cluster deployment tutorial.| -|Type|Full| -|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:false`| - -Click on **Next** to continue. -
- - -### Cloud Type - -In the **Cloud Type** section, choose **Edge Native** and click on **Next** at the bottom to proceed to the next section. -
- -### Profile Layers - -In the **Profile Layers** section, add the following [BYOS Edge OS](/integrations/byoos) pack to the OS layer. - -|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| -|---|---|---|---| -|OS|Public Repo|BYOS Edge OS|`1.0.0`| - -Replace the OS layer manifest with the following custom manifest so that the cluster profile can pull the provider image from the *ttl.sh* image registry. You may recall that the CanvOS script returned an output containing a custom manifest after building the Edge artifacts. You will copy the CanvOS output into the cluster profile's BYOOS pack YAML file. - - The `system.xxxxx` attribute values in the manifest below are as same as those you defined in the **.arg** file while building the Edge artifacts. Copy the code snippet below into the YAML editor for the BYOOS pack. -
- -```yaml -pack: - content: - images: - - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" -options: - system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" - system.registry: ttl.sh - system.repo: ubuntu - system.k8sDistribution: k3s - system.osName: ubuntu - system.peVersion: v3.4.3 - system.customTag: demo - system.osVersion: 22 -``` -
- - -The screenshot below shows you how to reference your provider OS image in a cluster profile by using the utility build output with the BYOOS pack. -
- -![A screenshot of k3s OS layer in a cluster profile.](/tutorials/edge/clusters_edge_deploy-cluster_edit-profile.png) - - - - - *ttl.sh* is a short-lived image registry. If you do not use the provider image in your cluster profile within 24 hours of pushing to *ttl.sh*, they will no longer exist and must be re-pushed. In a production environment, use a custom registry for hosting provider images. - - -
- -Click on the **Next layer** button to add the following Kubernetes layer to your cluster profile. - - -|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| -|---|---|---|---| -|Kubernetes|Public Repo|Palette Optimized K3s|`1.25.x`| - - -Select the K3s version 1.25.x. 1.25.X because earlier in this tutorial, you pushed a provider image compatible with K3s v1.25.2 to the *ttl.sh* image registry. The `system.uri` attribute of the BYOOS pack will reference the Kubernetes version you select using the `{{ .spectro.system.kubernetes.version }}` [macro](/clusters/cluster-management/macros). - - -Click on the **Next layer** button, and add the following network layer. This example uses the Calico Container Network Interface (CNI). However, you can choose a different CNI pack that fits your needs, such as Flannel, Cilium, or Custom CNI. - - -|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| -|---|---|---|---| -|Network|Public Repo|Calico|`3.25.x`| - - -Click on the **Confirm** button to complete the core infrastructure stack. Palette displays the newly created infrastructure profile as a layered diagram. - -Finally, click on the **Add Manifest** button to add the [Hello Universe](https://github.com/spectrocloud/hello-universe#readme) application manifest. - -![A screenshot of the add Manifest button.](/tutorials/edge/clusters_edge_deploy-cluster_add-manifest.png) - -Use the following values to add the Hello Universe manifest metadata. - -|**Field** |**Value**| -|---|---| -|Layer name| hello-universe| -|Layer values (Optional)|Leave default| -|Install order (Optional)|Leave default| -|Manifests|Add new manifest, and name it `hello-universe`| - -When you provide the `hello-universe` value in the **Manifest** field, a blank text editor opens at right. Copy the following manifest and paste it into the text editor. -
- -```yaml -apiVersion: v1 -kind: Service -metadata: - name: hello-universe-service -spec: - type: NodePort - selector: - app: hello-universe - ports: - - protocol: TCP - port: 8080 - targetPort: 8080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-universe-deployment -spec: - replicas: 2 - selector: - matchLabels: - app: hello-universe - template: - metadata: - labels: - app: hello-universe - spec: - containers: - - name: hello-universe - image: ghcr.io/spectrocloud/hello-universe:1.0.12 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 8080 -``` - -The screenshot below shows the manifest pasted into the text editor. Click on the **Confirm & Create** button to finish adding the manifest. - - ![A screenshot of Hello Universe application manifest.](/tutorials/edge/clusters_edge_deploy-cluster_add-manifest-file.png) - - -If there are no errors or compatibility issues, Palette displays the newly created full cluster profile for review. Verify the layers you added, and click on the **Next** button. -
- -Review all layers and click **Finish Configuration** to create the cluster profile. -
- -## Create a Cluster - -Click on the newly created cluster profile to view its details page. Click the **Deploy** button to deploy a new Edge cluster. -
- -![Screenshot of the Profile Layers success.](/tutorials/edge/clusters_edge_deploy-cluster_profile-success.png) - -The cluster deployment wizard displays the following sections. -
- -### Basic Information - -Use the following values in the **Basic Information** section. - -|**Field**|**Value**| -|---|---| -|Cluster name| docs-tutorial-cluster | -|Description| Cluster as part of the Edge tutorial.| -|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:false`| - -Click **Next** to continue. -
- -### Parameters - -The **Parameters** section offers you another opportunity to change the profile configuration. For example, clicking on the **BYOS Edge OS 1.0.0** layer allows you to configure the `system.registry`, `system.repo`, and other available attributes. - -Use the default values for all attributes across all layers and click **Next**. -
- -### Cluster configuration - -Provide the Virtual IP (VIP) address for the host cluster to use during the cluster configuration process. An Edge cluster virtual IP represents the entire cluster, and external clients or applications can use it to access services the Edge cluster provides. Ask your system administrator for an IP address you can use. It must be unique and not conflict with any other IP addresses in the network. - -If available, you can optionally select an SSH key to remote into the host cluster and provide a Network Time Protocol (NTP) server list. - -Click **Next** to continue. -
- -### Nodes configuration - -In this section, you will use the Edge hosts to create the cluster nodes. Use one of the Edge hosts as the control plane node and the remaining two as worker nodes. In this example, the control plane node is called the master pool, and the set of worker nodes is the worker pool. - -Provide the following details for the master pool. - -|**Field** | **Value for the master-pool**| -|---| --- | -|Node pool name| master-pool | -|Allow worker capability| Checked | -|Additional Labels (Optional) | None | -|[Taints](/clusters/cluster-management/taints/)|Off| -|Pool Configuration > Edge Hosts | Choose one of the registered Edge hosts.
Palette will automatically display the Nic Name for the selected host. | - -The screenshot below shows an Edge host added to the master pool. - -![Screenshot of an Edge host added to the master pool.](/tutorials/edge/clusters_edge_deploy-cluster_add-master-node.png) - - -Similarly, provide details for the worker pool, and add the remaining two Edge hosts to the worker pool. - -|**Field** | **Value for the worker-pool**| -|---| --- | -|Node pool name| worker-pool | -|Additional Labels (Optional) | None | -|Taints|Off| -|Pool Configuration > Edge Hosts | Choose one or more registered Edge hosts. | - -The screenshot below shows two Edge hosts added to the worker pool. - -![Screenshot of Edge hosts added to the worker pool.](/tutorials/edge/clusters_edge_deploy-cluster_add-worker-node.png) - -Click **Next** to continue. -
- -### Settings - -This section displays options for OS patching, scheduled scans, scheduled backups, cluster role binding, and location. Use the default values, and click on the **Validate** button. -
- -### Review - -Review all configurations in this section. The **Review** page displays the cluster name, tags, node pools, and layers. If everything looks good, click on the **Finish Configuration** button to finish deploying the cluster. Deployment may take up to *20 minutes* to finish. - -While deployment is in progress, Palette displays the cluster status as **Provisioning**. While you wait for the cluster to finish deploying, you can explore the various tabs on the cluster details page, such as **Overview**, **Workloads**, and **Events**. -
- -# Validate - -In Palette, navigate to the left **Main Menu** and select **Clusters**. Select your cluster to display the cluster **Overview** page and monitor cluster provisioning progress. - - -When cluster status displays **Running** and **Healthy**, you can access the application from the exposed service URL with the port number displayed. One random port between 30000-32767 is exposed for the Hello Universe application. Click on the port number to access the application. - -The screenshot below highlights the NodePort to access the application. - -![Screenshot of highlighted NodePort to access the application.](/tutorials/edge/clusters_edge_deploy-cluster_access-service.png) - - - -Clicking on the exposed NodePort displays the Hello Universe application. -
- - - -We recommend waiting to click on the service URL, as it takes one to three minutes for DNS to properly resolve the public NodePort URL. This prevents the browser from caching an unresolved DNS request. - - - - -![Screenshot of successfully accessing the Hello Universe application.](/tutorials/edge/clusters_edge_deploy-cluster_hello-universe.png) - - -You have successfully provisioned an Edge cluster and deployed the Hello Universe application on it. - -
- -# Cleanup - -The following steps will guide you in cleaning up your environment, including the cluster, cluster profile, and Edge hosts. -
- -## Delete Cluster and Profile - -In Palette, display the cluster details page. Click on the **Settings** button to expand the **drop-down Menu**, and select the **Delete Cluster** option, as shown in the screenshot below. - - -![Screenshot of deleting a cluster.](/tutorials/edge/clusters_edge_deploy-cluster_delete-cluster.png) - - -Palette prompts you to enter the cluster name and confirm the delete action. Type the cluster name to delete the cluster. The cluster status changes to **Deleting**. Deletion takes up to 10 minutes. - - -After you delete the cluster, click **Profiles** on the left **Main Menu**, and select the profile to delete. Choose the **Delete** option in the **three-dot Menu**, as shown in the screenshot below. - - -![Screenshot of deleting a cluster profile.](/tutorials/edge/clusters_edge_deploy-cluster_delete-profile.png) - - -Wait for Palette to successfully delete the resources. -
- -## Delete Edge Hosts - -Switch back to the **CanvOS** directory in the Linux development environment containing the **.goenv** file, and use the following command to delete the Edge hosts. -
- -```bash -docker run --interactive --tty --rm --env-file .goenv \ - ghcr.io/spectrocloud/tutorials:1.0.7 \ - sh -c "cd edge/vmware/clone_vm_template/ && ./delete-edge-host.sh" -``` - -
- -## Delete Edge Artifacts - -If you want to delete Edge artifacts from your Linux development environment, delete the Edge installer ISO image and its checksum by issuing the following commands from the **CanvOS/** directory. -
- -```bash -rm build/palette-edge-installer.iso -rm build/palette-edge-installer.iso.sha256 -``` - -Issue the following command to list all images in your current development environment. -
- -```bash -docker images -``` - -Note the provider image name and tags, and use the following command syntax to remove all provider images. - -
- -```bash -docker image rm --force ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-demo -docker image rm --force ttl.sh/ubuntu:k3s-1.24.6-v3.4.3-demo -``` -
- -## Clean up VMware vCenter Environment -Navigate to **Inventory** > **VMs and Templates** in your vSphere client. To delete the **palette-edge-template** VM template, right-click on it and choose **Delete** option from the **drop-down Menu**. - -Switch to the **Storage** view in your vSphere client. To delete the **palette-edge-installer.iso** file from the **packer_cache/** directory in the VMware vCenter datastore, right-click on it and choose **Delete** option from the **drop-down Menu**. -
- -# Wrap-Up - -Building Edge artifacts allows you to prepare Edge hosts and deploy Palette-managed Edge clusters. Edge artifacts consist of an Edge installer ISO and provider images for all the Palette-supported Kubernetes versions. An Edge installer ISO assists in preparing the Edge hosts, and the provider image is used in the cluster profile. - -In this tutorial, you learned how to build Edge artifacts, prepare VMware VMs as Edge hosts using the Edge installer ISO, create a cluster profile referencing a provider image, and deploy a cluster. - -Palette's Edge solution allows you to prepare your Edge hosts with the desired OS, dependencies, and user data configurations. It supports multiple Kubernetes versions while building the Edge artifacts and creating cluster profiles, enabling you to choose the desired Kubernetes version for your cluster deployment. - -Before you plan a production-level deployment at scale, you can prepare a small set of Edge devices for development testing and to validate the devices' state and installed applications. Once the validation is satisfactory and meets your requirements, you can roll out Edge artifacts and cluster profiles for deployment in production. This approach maintains consistency while deploying Kubernetes clusters at scale across all physical sites, be it 1000 or more sites. In addition, you can use Palette to manage the entire lifecycle of Edge clusters. - -To learn more about Edge, check out the resources below. -
- -- [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) - - -- [Build Content Bundle](/clusters/edge/edgeforge-workflow/build-content-bundle) - - -- [Model Edge Native Cluster Profile](/clusters/edge/site-deployment/model-profile) - - -- [Prepare Edge Hosts for Installation](/clusters/edge/site-deployment/stage) - - -- [Perform Site Install](/clusters/edge/site-deployment/site-installation) \ No newline at end of file diff --git a/content/docs/04-clusters/04-palette-virtual-clusters.md b/content/docs/04-clusters/04-palette-virtual-clusters.md deleted file mode 100644 index b79be141ba..0000000000 --- a/content/docs/04-clusters/04-palette-virtual-clusters.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "Palette Virtual Clusters" -metaTitle: "Create Palette virtual clusters" -metaDescription: "Create virtual clusters in Palette" -icon: "nodes" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Palette Virtual Clusters Overview - -Palette Virtual Clusters are nested Kubernetes clusters within a Host Cluster. Virtual clusters share the host cluster resources, such as CPU, memory, storage, container network interface (CNI), and container storage interface (CSI). By default, virtual clusters use [k3s](https://github.com/k3s-io/k3s), a highly available, certified Kubernetes distribution designed for production workloads. - -Palette provisions and orchestrates virtual clusters to make the lightweight Kubernetes technology stack and tools ecosystem available to you. Deploy virtual clusters on both new and imported Host Clusters and attach application profiles. - -Palette also supports Day 2 operations such as upgrades, backup, and restore to keep virtual clusters secure, compliant, and up to date. Additionally, Palette provides visibility into the workloads running inside your virtual clusters and the associated costs. - -To get started, refer to [Add Virtual Clusters to a Cluster Group](/clusters/palette-virtual-clusters/deploy-virtual-cluster). - - -
- -## Network Connectivity - -Two virtual cluster accessibility options are supported:


-- **Load Balancer**: The Host Cluster must support dynamic provisioning of load balancers, either via a Cloud Controller Manager in the public cloud or a bare metal load balancer provider such as MetalLB.


- -- **Ingress**: The NGINX Ingress Controller must be deployed on the Host Cluster with SSL passthrough enabled. This allows TLS termination to occur at the virtual cluster's Kubernetes API server.
- - A wildcard DNS record must be configured that maps to the load balancer associated with the NGINX Ingress Controller. For example: - - `*.myapp.mydomain.com` - -
-
- - -## Resources - -- [Deploy a Virtual Cluster to a Cluster Group](/clusters/palette-virtual-clusters/deploy-virtual-cluster) diff --git a/content/docs/04-clusters/04-palette-virtual-clusters/02-deploy-virtual-cluster.md b/content/docs/04-clusters/04-palette-virtual-clusters/02-deploy-virtual-cluster.md deleted file mode 100644 index f0bb78b811..0000000000 --- a/content/docs/04-clusters/04-palette-virtual-clusters/02-deploy-virtual-cluster.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: "Deploy a Virtual Cluster to a Cluster Group" -metaTitle: "Deploy a Virtual Clusters to a Cluster Group" -metaDescription: "How to add Palette Virtual Clusters to a Cluster Group" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Add Virtual Clusters to a Cluster Group - - -You can deploy Palette Virtual Clusters to a [cluster group](/clusters/cluster-groups). The advantages of a virtual cluster environment are: -- You can operate with admin-level privileges while ensuring strong isolation. -- Virtual clusters reduce operational overhead and improve resource utilization. - -Use the followings steps to deploy a virtual cluster. - -# Prerequisites - -- A Spectro Cloud account. - -- A cluster group. Refer to the [Create and Manage Cluster Groups](/clusters/cluster-groups/create-cluster-group) guide to learn how to create a cluster group. - -- Attach any required policies in your cloud account that must be added to your virtual cluster deployment. - - For AWS, refer to the [Required IAM Policies](/clusters/public-cloud/aws/required-iam-policies#globalroleadditionalpolicies) documentation. - - For Azure, no additional policies are required. - - - -Palette doesn't support _Usage_ and _Cost_ metrics for Virtual Clusters running on Google Kubernetes Engine (GKE). - - - -## Add Node-Level Policies in your Cloud Account - -In some situations additional node-level policies must be added to your deployment. - -To add node-level policies: - -1. In **Cluster Mode**, switch to the **Tenant Admin** project. - - -2. Select **Tenant Settings** in the **Main Menu**. - - -3. Click **Cloud Accounts** and ensure **Add IAM policies** is enabled for your cloud account. If an account does not already exist, you must add one. - - -4. You can specify any additional policies to include in virtual clusters deployed with this cloud account. - - - For AWS, add the **AmazonEBSCSIDriver** policy so that the virtual clusters can access the underlying host cluster's storage. Check out the [Palette required IAM policies](/clusters/public-cloud/aws/required-iam-policies#globalroleadditionalpolicies) documentation to learn more about additional IAM policies. - - -5. Confirm your changes. - -# Deploy a Virtual Cluster - -Follow these steps to deploy a virtual cluster to a cluster group: - -1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. - - -2. Select **Virtual Clusters** from the left **Main Menu**. - - -3. Click the **+New Virtual Cluster**. - - -4. Select your cluster group from the **Select cluster group drop-down Menu**, and type a name for the virtual cluster. - - -5. Assign the CPU, Memory, and Storage size for the cluster. - - -6. Deploy the cluster. - - - -# Validate - -To validate your virtual cluster is available and ready for use. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. -Select **Virtual Clusters** from the left **Main Menu**. Your cluster is ready for use if the status is **Running**. - - -# Resources - -- [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) - -- [CPU resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) - -- [Memory resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) - -- [Amazon EBS CSI driver - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) - -- [Creating the Amazon EBS CSI driver IAM role for service accounts - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/csi-iam-role.html) diff --git a/content/docs/04-clusters/05-imported-clusters.md b/content/docs/04-clusters/05-imported-clusters.md deleted file mode 100644 index d639bf8807..0000000000 --- a/content/docs/04-clusters/05-imported-clusters.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: "Imported Clusters" -metaTitle: "Imported Clusters" -metaDescription: "Learn how to manage imported clusters and what operations are supported with Palette." -icon: "cloud-download-alt" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Existing Kubernetes clusters not deployed through Palette can be imported into Palette for visibility, limited Day -2 management, and additional capabilities such as application lifecycle management. You can import Kubernetes clusters from various infrastructure providers, such as public and private clouds and bare-metal environments. - -Palette supports importing _generic_ or _cloud-specific_ clusters. Cloud-specific clusters enable more functionality because Palette understands how to interact with the infrastructure provider's API. Cloud-specific clusters provide the same experience as Palette deployed clusters. - -The generic type is for a cluster that is deployed in an environment where Palette lacks integration with the underlying infrastructure provider's API. Palette can support basic operations for generic clusters, such as reporting metrics, conducting scans, scheduling backups, and applying and managing add-on profiles. However, Day-2 activities are not supported in generic clusters. - - - Refer to the [Supported Infrastructure Providers](/clusters/imported-clusters#supportedinfrastructureproviders) section to learn more about supported infrastructure environments. - - -To get started with a cluster import, refer to the [Import a Cluster](/clusters/imported-clusters/cluster-import) guide to learn more. - -## Import Modes - -To determine Palette's control over the imported cluster, you can choose the management mode you prefer. Refer to the table below for more information on each mode. - - -| Mode | Description | -|---|---| -| Read-only| This mode allows you to access information about the cluster, such as event logs, cost, and health checks. The read-only mode does not support Day-2 activities. | -| Full Permission| This mode provides full cluster management, depending on the cluster, generic, or cloud-specific. This mode also supports the ability to deploy add-on cluster profiles. | - - - -## Supported Infrastructure Providers - - -The following infrastructure providers are supported for cluster imports. If an environment is not listed below, select the **Generic** type when importing a cluster. - - -
- -| Infrastructure Provider | Type | -|---|---| -| AWS | Cloud Specific | -| Azure | Cloud Specific | -| Google Cloud Platform | Cloud Specific | -| VMware | Cloud Specific | -|OpenShift |Cloud Specific | -| AWS EKS-Anywhere | Cloud Specific | -| Generic| Generic| - - -
- -### Self-Hosted Support - -Self-hosted Palette also supports importing clusters. You must ensure network connectivity is available between the target import cluster and the Palette instance. - -
- -## Limitations - -A few restrictions apply to all cluster imports that you need to be aware of before importing a cluster. - -
- -| Limitation | Description| -|---|---| -| Full Cluster Profile usage| You cannot use a full cluster profile. You are limited to using add-on profiles when deploying cluster profiles to imported clusters.| -| Kubeconfig file access| You cannot download the cluster's kubeconfig file from Palette. You must use the underlying infrastructure provider to access the kubeconfig file.| - - -
- - - - -Imported generic clusters lack many Day-2 management operations such as scaling nodes, adding worker pools, or any operations that require Palette to have knowledge of the underlying infrastructure. - - - -
- -## Delete Imported Cluster - -You can remove a cluster by following the standard cluster removal steps. Refer to the [Delete a Cluster](/clusters/cluster-management/remove-clusters) for instructions. Be aware that Palette will not delete the actual cluster. Palette will remove the link to the imported cluster and instruct the Palette agent to remove itself from the cluster and all of the agent's dependencies that were installed during the import process. To delete the cluster, you must manually perform the delete action in the hosting infrastructure provider. - - -# Resources - -- [Import a Cluster](/clusters/imported-clusters/cluster-import) - - -- [Attach an Add-on Profile](/clusters/imported-clusters/attach-add-on-profile) - - -- [Migrate to Full Permissions](/clusters/imported-clusters/migrate-full-permissions) - -
- - diff --git a/content/docs/04-clusters/05-imported-clusters/10-cluster-import.md b/content/docs/04-clusters/05-imported-clusters/10-cluster-import.md deleted file mode 100644 index 66587c89ac..0000000000 --- a/content/docs/04-clusters/05-imported-clusters/10-cluster-import.md +++ /dev/null @@ -1,245 +0,0 @@ ---- -title: "Import a Cluster" -metaTitle: "Import a Cluster" -metaDescription: "Learn how to import clusters and which Palette operations you can use to manage them." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -When importing a cluster into Palette, you can select the mode you want Palette to use when managing the cluster. You can choose between read-only mode or full permission. Refer to the [Imported Clusters](/clusters/imported-clusters#importmodes) reference page to learn more about each mode. - - -Select the mode you want to use when importing a cluster into Palette. - -
- - - - - -## Prerequisites - -- Kubernetes version >= 1.19.X - - -- Ensure your environment has network access to Palette SaaS or your self-hosted Palette instance. - - -- Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed and available in your local workstation. - - -- Access to your cluster environment through kubectl. - - -## Import a Cluster - -1. Log in to [Palette](https://spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Click on **Add New Cluster** and select **Import Cluster** in the pop-up box. - - -4. Fill out the required information and make your selections: - * Cluster Name - The name of the cluster you want to import. - * Cloud Type - Select the infrastructure environment your cluster resides in. Select **Generic** if the environment list doesn't contain your specific environment, but be aware of the limitations with generic clusters. - * Proxy - Optional and only available for generic clusters. Specify a network proxy address or DNS value. - * No Proxy - Optional and only available for generic clusters. Specify a no proxy address or DNS value. - -5. Select **Full-permission mode** and click on **Create & Open Cluster Instance** to start the import. - - - - -6. You will be redirected to the cluster details page. A set of instructions with commands is displayed on the right side of the screen. You will need to issue the following commands to complete the import process. - -
- - - ![A view of the cluster details page with the sidebar instructions box](/clusters_imported-clusters_full-permissions-instructions.png) - -
- -7. We recommend you install the metrics server so that Palette can expose and provide you with information about the cluster. Installing the metrics server is not required but is needed for Palette to expose cluster metrics. To enable the metrics server, open a terminal session and issue the commands below against the Kubernetes cluster you want to import. - -
- - ```shell - helm repo add bitnami https://charts.bitnami.com/bitnami && \ - helm install my-release bitnami/metrics-server - ``` - -8. To install the Palette agent, issue the command displayed in the cluster details page **Install the agent** section against the Kubernetes cluster you want to import. The command is customized for your cluster as it contains the assigned cluster ID. Below is an example output of the install command. - -
- - ```shell hideClipboard - kubectl apply --filename https://api.spectrocloud.com/v1/spectroclusters/6491d4a94c39ad82d3cc30ae/import/manifest - ``` - - Output - ```shell hideClipboard - namespace/cluster-6491d4a94c39ad82d3cc30ae created - serviceaccount/cluster-management-agent created - clusterrole.rbac.authorization.k8s.io/read-only-mode created - clusterrolebinding.rbac.authorization.k8s.io/read-only-mode created - configmap/log-parser-config created - configmap/upgrade-info-8kfc2m8mt8 created - configmap/version-info-kbk5hk992f created - secret/spectro-image-pull-secret created - priorityclass.scheduling.k8s.io/spectro-cluster-critical created - deployment.apps/cluster-management-agent-lite created - configmap/cluster-info created - configmap/hubble-info created - secret/hubble-secrets created - ``` - -9. When the Palette agent completes initializing, the cluster import procedures at right will disappear, and your cluster will transition to **Running** status within a few minutes. - -
- - ![A view of an imported cluster's details page](/clusters_imported-clusters_full-permissions.png) - -
- -You now have imported a cluster into Palette with full permissions. - - - -## Validate - -1. Log in to [Palette](https://spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select your imported cluster from the cluster list. - - -4. Review the **Cluster Status** row from the cluster details view. A successful cluster import will have the cluster status **Running**. - - -
- - -## Prerequisites - -- Kubernetes version >= 1.19.X - - -- Ensure your environment has network access to Palette SaaS or your self-hosted Palette instance. - - -- Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed and available in your local workstation. - - -- Access to your cluster environment through kubectl. - -## Import a Cluster - -1. Log in to [Palette](https://spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Click on **Add New Cluster** and select **Import Cluster** in the pop-up box. - - -4. Fill out the required information and make the proper selections: - * Cluster Name - The name of the cluster you want to import. - * Cloud Type - Select the infrastructure environment your cluster resides in. Select **Generic** if the environment list doesn't contain your specific environment but be aware of the limitations with generic clusters. - * Proxy - Optional and only available for generic clusters. Specify a network proxy address or DNS value. - * No Proxy - Optional and only available for generic clusters. Specify a no proxy address or DNS value. - -5. Select **Read-only mode** and click on **Create & Open Cluster Instance** to start the import action. - - -6. You will be redirected to the cluster details page. A set of instructions with commands is displayed on the right-hand side of the screen. You will need to issue the following commands to complete the import process. - -
- - - ![A view of the cluster details page with the sidebar instructions box](/clusters_imported-clusters_read-only-instructions.png) - -
- -7. We recommend you install the metrics server so that Palette can expose and provide you with information about the cluster. Installing the metrics server is not required but is needed for Palette to expose cluster metrics. Open a terminal session and issue the commands below against the Kubernetes cluster you want to import if you want to enable the metrics server. - -
- - ```shell - helm repo add bitnami https://charts.bitnami.com/bitnami && \ - helm install my-release bitnami/metrics-server - - -8. To install the Palette agent, issue the command displayed in the cluster details page **Install the** read-only agent** section against the Kubernetes cluster you want to import. The command is customized for your cluster as it contains the assigned cluster ID. Below is an example output of the install command. - -
- - ```shell hideClipboard - kubectl apply --filename https://api.spectrocloud.com/v1/spectroclusters/6491d4a94c39ad82d3cc30ae/import/manifest - ``` - - Output - ```shell hideClipboard - namespace/cluster-6491d4a94c39ad82d3cc30ae created - serviceaccount/cluster-management-agent created - clusterrole.rbac.authorization.k8s.io/read-only-mode created - clusterrolebinding.rbac.authorization.k8s.io/read-only-mode created - configmap/log-parser-config created - configmap/upgrade-info-8kfc2m8mt8 created - configmap/version-info-kbk5hk992f created - secret/spectro-image-pull-secret created - priorityclass.scheduling.k8s.io/spectro-cluster-critical created - deployment.apps/cluster-management-agent-lite created - configmap/cluster-info created - configmap/hubble-info created - secret/hubble-secrets created - ``` - -9. Once the Palette agent completes the initialization, the side view drawer on the right will disappear, and your cluster will transition to a status of **Running** after a few moments. - -
- - ![A view of an imported cluster's details page](/clusters_imported-clusters_read-only.png) - -
- - -You now have imported a cluster into Palette in read-only mode. Keep in mind that a cluster imported in read-only mode has limited capabilities. You can migrate to full permissions anytime by clicking **Migrate To Full Permissions**. - - - -## Validate - -1. Log in to [Palette](https://spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select your imported cluster from the cluster list. - - -4. Review the **Cluster Status** row from the cluster details view. A successful cluster import displays cluster status as **Running**. - - -
- -
- - -# Next Steps - -Depending on what mode you selected for the migration, your next step is to either [Attach an Add-on Profile](/clusters/imported-clusters/attach-add-on-profile) or you can [Migrate to Full Permissions](/clusters/imported-clusters/migrate-full-permissions). \ No newline at end of file diff --git a/content/docs/04-clusters/05-imported-clusters/20-attach-add-on-profile.md b/content/docs/04-clusters/05-imported-clusters/20-attach-add-on-profile.md deleted file mode 100644 index 205a0dcfff..0000000000 --- a/content/docs/04-clusters/05-imported-clusters/20-attach-add-on-profile.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Attach an Add-on Profile" -metaTitle: "Attach an Add-on Profile" -metaDescription: "Learn how to attach an add-on profile to an imported cluster in Palette." -hideToC: false -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Imported clusters lack the ability for Palette to manage the core layers found in a cluster profile, such as the Operating System, Kubernetes distribution and version, along with the container network interface and storage interface. -You can, however, use add-on cluster profiles to deploy additional software dependencies into your cluster and have Palette manage these dependencies through the normal cluster profile lifecycle. - - -In this how-to, you learn how to add an add-on cluster profile to an imported cluster. - - -# Prerequisites - -* An imported cluster with full permissions. Refer to the [Migrate to Full Permissions](/clusters/imported-clusters/migrate-full-permissions) to learn how to migrate an imported cluster from read-only mode to full-permissions mode. - - -* An add-on cluster profile. Refer to the [Create an Add-on Profile](/cluster-profiles/create-add-on-profile) to learn how to create an add-on cluster profile. - - -# Attach an Add-on Profile - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - - -3. Select your imported cluster to access its details page. - - -4. From the cluster details page, select the **Profile** tab and click on **Attach Profile**. - -
- - ![The cluster details view when the profile tab is selected](/clusters_imported-clusters_attach-add-on-profile_cluster-details-profile-tab.png) - -
- -5. Select an add-on profile and click on **Confirm**. - - -6. In the following screen, you can update the add-on profile if desired. Click on **Save** to deploy the add-on cluster profile. - - -7. Navigate to the **Overview** tab to monitor the deployment. When the add-on cluster profile is deployed, the **Cluster Profile** status displays as a green circle next to the layer. -
- - ![A cluster profile with an add-on profile deployed successfully](/clusters_imported-clusters_attach-add-on-profile_cluster-details-app-deployed.png) - - -
- - - -You now have an add-on cluster profile deployed onto your imported cluster. Use the steps above to add your custom add-on cluster profile to an imported cluster. - - -# Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. Navigate to left **Main Menu** and select **Clusters**. - - - -3. Select your imported cluster to access its details page. - - - -4. Verify the **Cluster Profile** section of the page has a green circle next to each layer. If your application exposes a service URL, use the URL to visit the application and verify it's operational. - - -
\ No newline at end of file diff --git a/content/docs/04-clusters/05-imported-clusters/30-migrate-full-permissions.md b/content/docs/04-clusters/05-imported-clusters/30-migrate-full-permissions.md deleted file mode 100644 index f7d1dd6ed5..0000000000 --- a/content/docs/04-clusters/05-imported-clusters/30-migrate-full-permissions.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: "Migrate to Full Permissions" -metaTitle: "Migrate to Full Permissions" -metaDescription: "Learn how to migrate an imported cluster from read-only mode to full-permissions mode." -hideToC: false -fullWidth: false ---- - -# Overview - - -# Prerequisites - -* An imported cluster in read-only mode. Refer to the [Import a Cluster](/clusters/imported-clusters/cluster-import) guide to learn how to import a cluster into Palette. - - -* Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed and available in your local workstation. - - -- Access to your cluster environment through kubectl. - - -# Migrate to Full Permissions - -1. Log in to [Palette](https://spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - - -3. Select your imported cluster from the cluster list. - - -4. From the cluster details page, click on **Migrate To Full Permissions** to start the migration process. A prompt will ask you to confirm your decision. Select **OK**. - -
- - ![The cluster details view with an arrow pointing to the migrate button](/clusters_imported-clusters_migrate-full-permissions_cluster-details-page.png) - -
- -5. A side view drawer will slide out from the right side of the cluster details page. Copy the displayed command to your clipboard. - - - -6. Open a terminal on your local workstation and validate you are in the correct Kubernete context. You can use the following command to verify the Kubernetes context. If you find yourself in an incorrect Kubernetes context, switch to the proper context so you are interacting with the imported cluster when using kubectl. - -
- - ```shell - kubectl config current-context - ``` - -7. Issue the command you copied in your terminal to start the migration. Your terminal output will look similar to the example output below. - -
- - ```shell hideClipboard - namespace/cluster-6495ea8d4c39b720c58a5f5f configured - serviceaccount/cluster-management-agent unchanged - clusterrolebinding.rbac.authorization.k8s.io/cma-lite-cluster-admin-binding created - configmap/log-parser-config unchanged - configmap/upgrade-info-8kfc2m8mt8 unchanged - configmap/version-info-kbk5hk992f unchanged - secret/spectro-image-pull-secret unchanged - priorityclass.scheduling.k8s.io/spectro-cluster-critical configured - deployment.apps/cluster-management-agent-lite configured - configmap/cluster-info unchanged - configmap/hubble-info unchanged - secret/hubble-secrets configured - customresourcedefinition.apiextensions.k8s.io/awscloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/azurecloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/clusterprofiles.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/coxedgecloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/edgecloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/edgenativecloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/gcpcloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/libvirtcloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/maascloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/nestedcloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/openstackcloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/packs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/spectroclusters.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/tencentcloudconfigs.cluster.spectrocloud.com created - customresourcedefinition.apiextensions.k8s.io/vspherecloudconfigs.cluster.spectrocloud.com created - serviceaccount/palette-manager created - clusterrolebinding.rbac.authorization.k8s.io/palette-lite-cluster-admin-binding created - configmap/palette-version-info-dd8mkdffbt created - priorityclass.scheduling.k8s.io/palette-spectro-cluster-critical created - deployment.apps/palette-lite-controller-manager created - job.batch/palette-import-presetup-job created - ``` - - -8. In a few minutes, the side drawer will disappear, and the **Profile**, **Workloads**, **Scan**, and **Backups** tabs will become unlocked and available for interaction. - -
- - ![A cluster details page with an imported cluster after a completed migration](/clusters_imported-clusters_migrate-full-permissions_cluster-details-page-import-complete.png) - - -You now have successfully migrated a read-only mode cluster to full-permissions mode. Imported clusters in full-permissions mode allow Palette to manage more Day-2 activities. You can also now deploy add-on cluster profiles to the cluster. Refer to the [Attach an Add-on Profile](/clusters/imported-clusters/attach-add-on-profile) guide to learn more. - -# Validate - -1. Log in to [Palette](https://spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select your imported cluster from the cluster list. - - -4. Review the **Cluster Status** row from the cluster details view. A successful cluster import displays cluster status as **Running**. \ No newline at end of file diff --git a/content/docs/04-clusters/06-cluster-management.md b/content/docs/04-clusters/06-cluster-management.md deleted file mode 100644 index c3574606a8..0000000000 --- a/content/docs/04-clusters/06-cluster-management.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Manage Clusters" -metaTitle: "Managing Cluster Update Events on Palette" -metaDescription: "Events and Notifications on Cluster Updates" -icon: "envelope-open-text" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Manage Clusters - -Palette supports several Day-2 operations to manage the end-to-end lifecycle of Kubernetes clusters launched through Palette. It also provides several capabilities across new and imported clusters to perform ongoing management operations like backup/restore and to keep your clusters secure, compliant, and up to date. Additionally, Palette gives you visibility into the workloads running inside your cluster and cluster costs. - - -The following sections describe these capabilities in detail: - -* [Reconfigure](/clusters/cluster-management/reconfigure) - Scale your clusters up/down by adding/reducing the number of nodes in a node pool and adding additional worker pools. Resize nodes in a node pool by modifying the node specs (CPU, Memory, or Instance Type for public clouds). Add additional fault domains such as availability zones to a node pool. - - -* [Updates](/clusters/cluster-management/cluster-updates) - Upgrade core packs (OS, Kubernetes, CSI, CNI) and add-on layers, such as Monitoring and Security. - - - -* [Cluster Health Alerts](/clusters/cluster-management/health-alerts) - Palette monitors the health of all workload clusters and raises an alert when the cluster goes to an unhealthy state. Besides displaying the alert on the UI console, Palette provides the ability to have these alerts pushed out to a variety of channels. Users can set up email alerts to receive an email when the health status of their cluster changes. - - - -* [Certificate Management](/clusters/cluster-management/certificate-management) - You can renew cluster certificates on-demand or leverage the automatic cluster update process to handle certificate renewal operations. - - -* [Cluster Monitoring](/clusters/cluster-management/monitoring/deploy-monitor-stack) - Monitor your cluster resources by collecting and reviewing metrics. - - -* [Compliance Scans](/clusters/cluster-management/compliance-scan) - Perform continuous compliance checks to ensure your clusters are secure and compliant. - - - -* [OS Patching](/clusters/cluster-management/os-patching) - Automatically apply the most recent security patches to cluster nodes to stay up to date with the latest OS patches. - - -* [Backup and Restore](/clusters/cluster-management/backup-restore) - Regularly back up your cluster configurations and any persistent volumes that your applications use. Choose critical namespaces you would like to back up. Restore as required to new or existing clusters. - - - -* [Cost Visibility](/clusters/cluster-management/cloud-cost) - Get visibility into the estimated cloud cost for the cluster based on cluster node configuration. Get additional insights into per namespace cost (Usage Cost) calculated based on the number of resources consumed within the namespace. - - -* [Workload Visibility](/clusters/cluster-management/workloads) - Palette provides visibility into the resources running inside workload clusters. These resources are displayed on the cluster details page. - - -* [Node Labels and Taints](/clusters/cluster-management/taints) - You can constrain a pod to run only on a particular set of nodes. There are several ways to do this. Common approaches, such as nodeSelector and node affinity, use labels to facilitate the selection. Taints allow a node to repel a set of pods for appropriate pod allocation to node pools. - - - -* [RBAC and NS Support](/clusters/cluster-management/cluster-rbac) - RoleBindings and ClusterRoleBindings are Role-Based Access Control (RBAC) concepts that allow granular control over cluster-wide resources as well as namespace resources. Palette provides the ability to specify these bindings to configure granular RBAC rules. Palette can also define new namespaces for the cluster and manage the namespaces, such as removing them and assigning quotas and role bindings to them. - - -* [Namespace Management](/clusters/cluster-management/namespace-management) - use Kubernetes namespaces to partition resources among multiple users without having to set up multiple physical clusters, configure Role-Based Access Control (RBAC) based on namespaces, and more. - - - -* [Add-on Pack Status and Monitoring](/clusters/cluster-management/pack-monitoring) - Palette displays the status and installation progress of add-on packs associated with the cluster you are monitoring. Pack status appears gray during initial onboarding and before deployment, blue when the pack is in the process of being installed, and green to indicate successful installation. Red indicates a failed installation and requires attention. - - -* [Kubectl](/clusters/cluster-management/palette-webctl#overview) - Learn how to use `kubectl` to interact with your host clusters. - - - -* [Platform Management](/clusters/cluster-management/palette-lock-cluster) - Palette supports the Cluster(s) Management feature to exclude a cluster or a group of clusters from getting upgraded when Palette is upgraded. - - -* [NOC UI](/clusters/cluster-management/palette-lock-cluster) - Palette provides Intuitive UI-based location monitoring for the clusters running at multiple locations. For public cloud clusters Palette displays the `Region` set during the cluster creation process and displays the location on the UI Map. For private cloud clusters the user can set the location through the Palette UI. The user can monitor the location details of all the clusters running under a specific scope. - -* [Palette Access Control](/clusters/cluster-management/cluster-tag-filter) - Palette provides the ability to manage user and role access privileges through tags. This feature helps you reduce the overhead in managing user and role access to clusters by assigning tags. Tags can be used to group clusters, allowing you to apply access controls to the tag rather than to each cluster, user, or role. This reduces the overhead of managing access controls for individual users and clusters. - - - -* [Image Swap](/clusters/cluster-management/image-swap) - Learn how to use image swap capabilities with Palette. - -
diff --git a/content/docs/04-clusters/06-cluster-management/0-reconfigure.md b/content/docs/04-clusters/06-cluster-management/0-reconfigure.md deleted file mode 100644 index 147b0227a0..0000000000 --- a/content/docs/04-clusters/06-cluster-management/0-reconfigure.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: "Reconfigure" -metaTitle: "Managing Cluster reconfiguration-scaling Events on Palette" -metaDescription: "Reconfiguration-scaling Events on Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - - - -# Overview -Scaling a cluster up or down involves changing the size of node pools. The following are the steps to scale up/down a cluster: -* Access the ‘Nodes’ view of the cluster. -* For the desired node pool, change the size directly from the nodes panel or edit node pool settings. -* After the node pool configuration is updated, the scale-up/down operation is initiated in a few minutes. -* Provisioning status is updated with the ongoing progress of the scale operation. - - -The master node pool is scaled from 1 to 3 or 3 to 5 nodes, etc. However, the scale-down operation is not supported for master nodes. - - -# Reconfiguring the Cluster Nodes - -The following are the steps to reconfigure worker pool nodes: -* Access the 'Nodes' view for the cluster. -* Edit the settings of the desired node pool. -* Change the number of nodes, rolling update setting, availability zones, flavor, and Disk size to the desired settings. -* Save the node pool settings. After the node pool settings are updated, the node pool reconfiguration begins within a few minutes. The older nodes in the node pool are deleted and replaced by new nodes launched with a new instance type configured. -* Provisioning status is updated with the ongoing progress of nodes being deleted and added. - -# Adding a New Worker Pool - -The following are the steps to add a new worker node pool to a cluster: -* Invoke the option to ‘Add Node Pool’ from the cluster’s node information page. -* Provide node pool settings as follows: - * A descriptive name for the node pool - * The number of nodes in the node pool - * Rolling update setting, availability zones, flavor, and Disk size settings - * Save the node pool settings - -The new worker pool settings are updated, and cluster updates begin within a few minutes. Provisioning status updates will be available with the ongoing progress of tasks related to adding new nodes. - - -# Removing a Worker Pool -The following steps need to be performed to remove a worker pool from the cluster: -* Access the ‘Nodes’ view of the cluster -* Delete the desired worker pool and confirm the deletion -* Upon confirmation, the worker node deletion begins in a few minutes - - - - Support of reconfiguration is not available for existing clusters imported into Palette for any cloud type. - diff --git a/content/docs/04-clusters/06-cluster-management/0-ssh-keys.md b/content/docs/04-clusters/06-cluster-management/0-ssh-keys.md deleted file mode 100644 index c187b0279a..0000000000 --- a/content/docs/04-clusters/06-cluster-management/0-ssh-keys.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: "SSH Keys" -metaTitle: "SSH Keys" -metaDescription: "Learn how to create and manage SSH keys in Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette supports SSH (Secure Shell) to establish, administer, and communicate with remote clusters. This section describes creating and managing SSH Keys in the Palette Management Console. - -## Scope of SSH Key - -Palette groups clusters for logical separation into [Projects](/projects). Users and teams can be assigned roles within a project for granular control over permissions within the project scope. SSH key authentication is scoped to a project. Multiple users can gain access to a single project. To access a cluster with SSH, you need a public SSH key registered in Palette. - -# Prerequisites - -* Access to a terminal window. - - -* The utility ssh-keygen or similar SSH key generator software. - - -# Create and Upload an SSH Key - -Follow these steps to create an SSH key using the terminal and upload it to Palette: - -1. Open the terminal on your computer. - - -2. Check for existing SSH keys by invoking the following command. - -
- - ```shell - ls -la ~/.ssh - ``` - If you see files named **id_rsa** and **id_rsa.pub**, you already have an SSH key pair and can skip to step 8. If not, proceed to step 3. - - -3. Generate a new SSH key pair by issuing the following command. - -
- - ```shell - ssh-keygen -t rsa -b 4096 -C "your_email@example.com" - ``` - - Replace `your_email@example.com` with your actual email address. - - -4. Press Enter to accept the default file location for the key pair. - - - -5. Enter a passphrase (optional) and confirm it. We recommend using a strong passphrase for added security. - - -6. Copy the public SSH key value. Use the `cat` command to display the public key. - -
- - ```shell - cat ~/.ssh/id_rsa.pub - ``` - Copy the entire key, including the `ssh-rsa` prefix and your email address at the end. - - -7. Log in to [Palette](https://console.spectrocloud.com). - - -8. Navigate to the left **Main Menu**, select **Project Settings**, and then the **SSH Keys** tab. - - -9. Open the **Add New SSH Key** tab and complete the **Add Key** input form: - * **Name**: Provide a unique name for the SSH key. - - - * **SSH Key**: Paste the SSH public key contents from the key pair generated earlier. - - -10. Click **Confirm** to complete the wizard. - -
- - - -You can edit or delete SSH keys later by using the **three-dot Menu** to the right of each key. - - - -During cluster creation, assign your SSH key to a cluster. You can use multiple keys to a project, but only one key can be assigned to an individual cluster. - -# Validate - -You can validate the SSH public key is available in Palette by attempting to deploy a host cluster. During the host cluster creation wizard, you will be able to assign the SSH key to the cluster. Refer to the [Deploy a Cluster](/clusters/public-cloud/deploy-k8s-cluster) tutorial for additional guidance. - diff --git a/content/docs/04-clusters/06-cluster-management/01-cluster-updates.md b/content/docs/04-clusters/06-cluster-management/01-cluster-updates.md deleted file mode 100644 index c2ec96886f..0000000000 --- a/content/docs/04-clusters/06-cluster-management/01-cluster-updates.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: "Cluster Updates" -metaTitle: "Managing Cluster Update Events on Palette" -metaDescription: "Events and Notifications on Cluster Updates" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - -# Overview - -Palette supports various kinds of updates to running clusters, such as upgrade of Kubernetes version, upgrade of add-on versions, the addition of new add-ons, removing existing ones, etc. Based on the nature of the change, one of the following two mechanisms can be used to apply cluster updates to the cluster. - -# Cluster profile based updates - - - -**Kubernetes Version Upgrades:** We recommend our users to go for single-step upgrades of Kubernetes minor versions. E.g., Kubernetes version 1.18.x is to be updated to 1.19.x, not a direct upgrade to 1.20.x - -**Kubernetes Version Downgrades:** We do not recommend downgrading the Kubernetes versions. - - - - -Fundamental changes to the cluster’s definition, such as upgrading Kubernetes versions, installing new packs, uninstalling previously installed packs, and updating default pack configuration, are initiated through the cluster profile. These changes result in update notifications on all the clusters that are instantiated from the cluster profile. Update notifications consist of detailed information about all the changes applied to the profile since the initial installation or the previous update. In addition, users can update the clusters individually at an appropriate time. - - - -**Note:** Cluster Profile Changes will not be shown or allowed on clusters when the cluster is in a provisioning state until worker node additions are completed. This is done to prevent the Kubernetes clusters from going into an unrecoverable state due to the changes in core components. - - - -![Cluster Notification - Update Available](/cluster_list_update_available.png) - -Updates to pack configuration might result in a conflict if the configuration was previously overridden within the cluster. These conflicts are presented to the user and need to be resolved before changes can be applied to the cluster. - - -![Cluster Update Details](/cluster_update_available_detail.png) - - - -## Instructions: -* Navigate to the cluster profiles page and choose the profile to be updated. -* Make the desired changes. These include add/delete layers, change pack version, change pack values, etc. Save your changes. -* On the Clusters page, observe the ‘Updates Available’ tag on every previously launched cluster using the updated cluster profile. -* Click on one of the clusters to be updated to invoke the cluster details page. -* An update notification in the form of a button called ‘Updates Available’ can be seen on the right top of the screen. Click the button to open the update notifications dialog. -* A notification is created for each change made to the profile. Review all notifications. Depending on the nature of the change, additional action might be required for certain notifications. There are typical scenarios where the settings or attached manifests for a pack are directly updated on the cluster, resulting in a conflict with the new incoming changes from the profile. The updated profile settings and modified cluster settings are shown side by side for such cases, with the differences highlighted. Resolve all of the conflicts. When there has been no update to the pack settings or manifests, the incoming changes from the profile are automatically merged. A side-by-side comparison between the original and merged cluster settings is still displayed in such cases for review purposes. However, users may choose to customize settings from this dialog further. -* Once all the notifications are reviewed and conflicts, if any, are resolved, confirm updates to apply changes to the cluster. -* The system starts the update process in a few seconds. Depending upon the nature of the change, a rolling update nodes of the clusters may take place. The detailed status of the upgrade is made available at UI. -* Repeat this process for other clusters to be upgraded. - - -## Examples - Update Notifications - -|Update Type |Description|Notification Example | -|:---------------|:---------|:-----------------------| -Pack Version Upgrade |The existing pack version is upgraded to a different version in the cluster profile |Kubernetes version is updated 1.18.16 > 1.20.0| -|Pack Values Update |The existing pack values are updated in the cluster profile |Kubernetes 1.20.0 values are updated| -|Add Pack|Add a new pack to the cluster profile |New Kibana 7.2.4 layer is added| -|Delete Pack|Delete the existing pack from the cluster profile |Kibana 7.2.4 layer is deleted| -|Attach Pack Manifest|Delete the existing pack from the cluster profile |Manifest security is attached to the pack Kubernetes| -|Update Pack Manifest|The attached pack manifest content is updated in the cluster profile|manifest security is updated in the pack Kubernetes| -|Delete Pack Manifest |The attached pack manifest is deleted from the cluster profile|manifest security is deleted in the pack Kubernetes| - -**Note:** -Prior to applying the notifications resulting from a profile update, the notification is automatically cleared if the corresponding changes are reverted. - -## Examples - Notification settings - -As described above, notifications originate from changes to pack settings or manifest. They are accompanied by a settings dialog with a split pane showing differences in values. Following are a few examples of such scenarios: - -|Values Updated |Values overridden in Clusters |Settings displayed (LHS) |Settings displayed (RHS) |Auto Merged | Action | -|:---------------|:---------|:--------------------|:--------|:-------|:--------| -|Pack Values|No|Original pack settings| Updated pack settings| Yes| Review and/or modify if desired| -|Attached Manifests|No|Original Manifests| Updated Manifests| Yes| Review and/or modify if desired| -|Pack Values|Yes|Updated settings from Cluster Profile| Current settings from cluster| No| Resolve all conflicts| -|Attached Manifests|Yes|Updated settings from Cluster Profile| Current settings from cluster| No| Resolve all conflicts| -|Pack Version Changed|No|Original pack settings| Updated pack settings| Yes| Review and/or modify if desired| -|Pack Version Changed|Yes|Updated settings from Cluster Profile| Current settings from cluster| No| Resolve all conflicts| - -# Configuration overrides - -Every pack installed via cluster profile provides a set of out-of-the-box default settings. These can be overridden at the time of launching a new cluster or any time afterward for a running cluster. Besides basic defaults, Palette also provides useful presets. Presets are preconfigured configuration blocks logically grouped. Can leverage these to turn a feature on/off quickly. For example, enabling ingress for a Prometheus/Grafana pack requires many settings to be added. However, the Ingres preset for the Prometheus pack makes it easy to make this change. - -![Cluster Update Details](/cluster_config_override.png) - - - - Supported for all cluster types (new, imported) and cloud types (public, private, managed) - diff --git a/content/docs/04-clusters/06-cluster-management/01.9-monitoring.md b/content/docs/04-clusters/06-cluster-management/01.9-monitoring.md deleted file mode 100644 index e116ae31ff..0000000000 --- a/content/docs/04-clusters/06-cluster-management/01.9-monitoring.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: 'Cluster Monitoring' -metaTitle: 'Palette Cluster Monitoring' -metaDescription: 'Learn how to set up cluster monitoring with Prometheus' -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Cluster Monitoring - -Palette exposes a set of [workload metrics](/clusters/cluster-management/workloads) out-of-the-box to help cluster administrators better understand the resource utilization of the cluster. The workload metrics Palette exposes are a snapshot in time and offer a limited ability to review past values. Administrators who want more information or a better understanding of their cluster metrics should consider using a dedicated monitoring system. - -Several Packs are available in the [monitoring](/integrations) category that you can use to add additional monitoring capabilities to your cluster and help you get answers to questions. For a more robust and scalable solution, we recommend creating a dedicated monitoring stack for your environment. You can deploy a monitoring stack that uses [Prometheus](https://prometheus.io/) to collect metrics from all clusters in your environment. - -To help you get started with deploying a monitoring stack to your Palette environment, check out the [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) and the [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent) guide. - -# Resources - -- [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) - - -- [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent) - - -- [Prometheus Operator Pack](/integrations/prometheus-operator/) - - -- [Prometheus Agent Pack](/integrations/prometheus-agent/) - - -- [Prometheus Cluster Metrics](/integrations/prometheus-cluster-metrics) - - -- [Spectro Cloud Grafana Dashboards](/integrations/grafana-spectrocloud-dashboards) \ No newline at end of file diff --git a/content/docs/04-clusters/06-cluster-management/01.9-monitoring/10-deploy-monitor-stack.md b/content/docs/04-clusters/06-cluster-management/01.9-monitoring/10-deploy-monitor-stack.md deleted file mode 100644 index dd7f584534..0000000000 --- a/content/docs/04-clusters/06-cluster-management/01.9-monitoring/10-deploy-monitor-stack.md +++ /dev/null @@ -1,544 +0,0 @@ ---- -title: 'Deploy Monitoring Stack' -metaTitle: 'Deploy Monitoring Stack' -metaDescription: 'Learn how to deploy a monitoring stack in your Palette environment.' -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -The monitoring stack you will deploy uses the open-source tool, [Prometheus](https://prometheus.io/docs/introduction/overview/), to support your environment's monitoring requirements. The monitoring stack is a centralized server or aggregation spot to which all other clusters will forward metrics. The monitoring stack is a dedicated Kubernetes cluster for monitoring and metrics aggregation in your Palette environment. - -The monitoring stack uses a server-client architecture. The monitoring stack uses the [Prometheus Operator](/integrations/prometheus-operator) pack to deploy all the dependencies the Prometheus server requires. The server exposes an API endpoint for all other clients to forward metrics. The clients are Kubernetes clusters with the [Prometheus Agent](/integrations/prometheus-agent) pack installed and configured. - -Use the following steps to deploy a monitoring stack, and learn how to configure a host cluster to forward metrics to the monitoring stack. - -
- - - -We recommend you avoid installing applications in your monitoring stack. The monitoring stack will require all the allocated resources to support Prometheus and incoming metrics from all other clusters. - - - -# Deploy a Monitoring Stack - -The steps below will deploy a new host cluster with the Prometheus Operator pack. You can add the Prometheus Operator pack to an existing cluster if you already have a host cluster deployed in your environment. - -The Prometheus Operator pack will install an unsecured Prometheus server by default. Use the **With Authentication and Encryption** tab for guidance on how to enable authentication. - -
- - - - - -## Prerequisites - -- An infrastructure provider environment registered in Palette. Refer to the [Clusters](/clusters) documentation for guidance on how to register your infrastructure provider environment in Palette. - - -- The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: - - Recommended size: - - 8 CPU - - 16 GB Memory - - 20 GB Storage. - - - As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: - - Each added agent: - - 0.1 CPU - - 250 MiB Memory - - 1 GB Storage. - -
- - Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. - -## Create Cluster Profile and Deploy - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Profiles**. - - -3. Click on **Add Cluster Profile** to create a new cluster profile. - - -4. Provide the cluster profile a name and select the type **Full**. Click on **Next**. - - -5. Select the infrastructure provider and continue. - - -6. Go ahead and select the desired operating system, Kubernetes distribution, container network interface (CNI), and container storage interface (CSI). Click on **Next Layer** after each selection. When you have completed selecting core infrastructure for the profile, click **Confirm**. - - -7. In the next screen that displays, select **Add New Pack**. - - -8. Use the following information to find the Prometheus Operator pack. - - Pack Type: Monitoring - - Registry: Public Repo - - Pack Name: Prometheus Grafana - - Pack Version: 44.25.X or newer.
- - - -9. Review the YAML configuration on the right. Scroll down in the file until you find the parameter `adminPassword`. Input the password value for the admin user. The default admin user name is `admin`. - - -10. Next, click on the **Presets** button to expand the options drawer. - - -11. Scroll down the presets option menu and enable **Remote Monitoring**. Confirm your changes. You can enable several options to expand the functionality of the monitoring stack. Review the [Prometheus Operator](/integrations/prometheus-operator) pack documentation to learn more about the available options. - - - -12. Confirm your changes by selecting **Confirm & Create**. - - -13. Click on **Add New Pack**. - - -14. Use the following information to find the Spectro Cluster Metrics pack. - - **Pack Type**: Monitoring - - **Registry**: Public Repo - - **Pack Name**: Spectro Cluster Metrics - - **Pack Version**: 3.3.X or newer - - -15. Use the default values. Confirm your changes by selecting **Confirm & Create**. - - -16. Click on **Add New Pack**. - - -17. Use the following information to find the Spectrocloud Grafana Dashboards pack. - - **Pack Type**: Monitoring - - **Registry**: Public Repo - - **Pack Name**: Spectrocloud Grafana Dashboards - - **Pack Version**: 1.0.X or newer - - -18. Use the default values. Confirm your changes by selecting **Confirm & Create**. - - -19. Click on **Next** to review the cluster profile and save it. - - -20. Navigate to the left **Main Menu** and select **Clusters**. - - -21. Click on **Add New Cluster**. Select **Deploy New Cluster**. - - -22. Choose the infrastructure provider you selected for the cluster profile you created earlier. - - -23. Assign a name to the host cluster and select the registered account you will deploy it to. Click on **Next**. - - -24. Choose the cluster profile you created earlier and complete the remainder of the cluster creation process. - -When you deploy the cluster, a host cluster with Prometheus will be installed and ready to receive information from Prometheus agents. - - -## Validate - -To validate the monitoring stack is successfully deployed and ready to receive Prometheus agent requests, use the following steps. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the monitoring stack cluster to review the details page. - - -4. Ensure the cluster is in **Running** state. - - -5. Click on the exposed service URL for the service **prometheus-operator-kube-prometheus-stack-grafana**. - - -6. Log in to the Grafana dashboard using the user `admin` and the password you specified in the cluster profile. - - -
- - - -To enable Hypertext Transfer Protocol Secure (HTTPS), you must make several architectural decisions and decide on various options for setting up the environment. These options range from choosing what will generate the Secure Socket Layer (SSL) certificates to how incoming requests for Grafana or Prometheus are handled. - -The approach presented in this guide is one pattern you can use. However, we encourage you to discuss this pattern with your system administrator or platform engineering team before changing your infrastructure and Domain Name System (DNS) resources. - -The following diagram represents the infrastructure pattern you will use in this guide to enable HTTPS. - -![An architecture diagram that displays the network flow and infrastructure components](/clusters_monitoring_deploy-monitor-stack_https-architecture.png) - -In this guide, the following domains are used to expose the monitoring stack: - -| Domain | Description| -|---|---| -|`metrics.example.com`| The endpoint that all host clusters will forward Prometheus metrics. | -| `monitoring.example.com` | The Grafana dashboard.| - -## Prerequisites - - -- Experience with DNS and setting up custom domains that use SSL certificates are required for this guide. In addition, the following actions are needed. - - - Ability to create and update DNS record names. - - - Two custom domain names. One domain is for the Grafana dashboard, and another is for host clusters to forward metrics to the monitoring stack. - - - Ability to create a public certificate for each domain. - - -- An infrastructure provider environment registered in Palette. Refer to the [Clusters](/clusters) documentation for guidance on how to register your infrastructure provider environment in Palette. - - -- [htpasswd](https://httpd.apache.org/docs/2.4/programs/htpasswd.html) or similar basic auth password file generator tool. - - -- The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: - - Recommended size: - - 8 CPU - - 16 GB Memory - - 20 GB Storage. - - - As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: - - Each added agent: - - 0.1 CPU - - 250 MiB Memory - - 1 GB Storage. - - Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. - - -## Create Cluster Profile and Deploy - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Profiles**. - - -3. Click on **Add Cluster Profile** to create a new cluster profile. - - -4. Provide the cluster profile a name and select the type **Full**. Click on **Next**. - - -5. Select the infrastructure provider and continue. - - -6. Go ahead and select the desired operating system, Kubernetes distribution, container network interface (CNI), and container storage interface (CSI). Click on **Next Layer** after each selection. When you have completed selecting core infrastructure for the profile, click **Confirm**. - - -7. In the next screen that displays, select **Add New Pack**. - - -8. Use the following information to add the Nginx ingress controller pack. - - Pack Type: Ingress - - Registry: Public Repo - - Pack Name: Nginx - - Pack Version: 1.5.X or newer.
- - -9. Review the YAML configuration on the right and add the following changes: - -
- - ```yaml - charts: - ingress-nginx: - controller: - extraArgs: - enable-ssl-passthrough: true - ``` - -10. Click on **Confirm & Create**. - - -11. Select **Add New Pack**. - - -12. Use the following information to find the Prometheus Operator pack. - - - Pack Type: Monitoring - - Registry: Public Repo - - Pack Name: Prometheus Grafana - - Pack Version: 44.3.X or newer.
- - -13. Next, click on the **Presets** button to expand the options drawer. - - -14. Scroll down the presets option menu and enable **Remote Monitoring**. - - -15. Review the YAML configuration on the right. Scroll down in the file until you find the parameter `grafana.adminPassword`. Input the password value for the admin user. The default admin user name is `admin`. - -
- - ```yaml - charts: - kube-prometheus-stack: - grafana: - adminPassword: "YourPassword" - ``` - -16. Next, update the `prometheus.service.type` parameter to `ClusterIP`. - -
- - ```yaml - charts: - kube-prometheus-stack: - prometheus: - service: - type: ClusterIP - ``` - -17. Confirm your changes by selecting **Confirm & Create**. You can enable several options to expand the functionality of the monitoring stack. Review the [Prometheus Operator](/integrations/prometheus-operator) pack documentation to learn more about the available options. - - -18. Click on **Add New Pack**. - - -19. Use the following information to find the Spectro Cluster Metrics pack. - - **Pack Type**: Monitoring - - **Registry**: Public Repo - - **Pack Name**: Spectro Cluster Metrics - - **Pack Version**: 3.3.X or newer - - -20. Use the default values. Confirm your changes by selecting **Confirm & Create**. - - -21. Click on **Add New Pack**. - - -22. Use the following information to find the Spectrocloud Grafana Dashboards pack. - - **Pack Type**: Monitoring - - **Registry**: Public Repo - - **Pack Name**: Spectrocloud Grafana Dashboards - - **Pack Version**: 1.0.X or newer - - -23. Use the default values. Confirm your changes by selecting **Confirm & Create**. - - -24. Click on **Next** to review the cluster profile and save it. - - -25. Navigate to the left **Main Menu** and select **Clusters**. - - -26. Click on **Add New Cluster**. Select **Deploy New Cluster**. - - -27. Choose the infrastructure provider you selected for the cluster profile you created earlier. - - -28. Assign a name to the host cluster and select the registered account you will deploy it to. Click on **Next**. - - -29. Choose the cluster profile you created earlier and complete the remainder of the cluster creation process. - - -30. Once the host cluster is deployed, navigate to the left **Main Menu** and select **Clusters**. Click on your cluster to display the details page and ensure its status is **Running**. - - - -31. Download the Kubernetes configuration file. Click on the URL that has the name of your cluster followed by a period and the word *kubeconfig*. Refer to the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl#accessclusterwithcli) for additional guidance. - - -32. Open a terminal window and set the environment variable `KUBECONFIG` to point to kubeconfig file you downloaded. - -
- - ```shell - export KUBECONFIG=~/Downloads/dev-monitoring-stack.config - ``` - -33. Create an htpasswd file for the user `agent` and assign a password. You can choose a different username if you prefer. - -
- - ```shell - htpasswd -c auth agent - ``` - - Output: - ```shell - New password: [agent_password_here] - New password: - Re-type new password: - Adding password for user agent - ``` - -34. Convert the htpasswd file into a Kubernetes secret. - -
- - ```shell - kubectl create secret generic basic-auth --from-file=auth --namespace monitoring - ``` - - Output: - ```shell - secret "basic-auth" created - ``` - -35. Navigate back to Palette, and review the cluster profile you created for the monitoring stack. From the left **Main Menu** > **Profiles** > select your cluster profile. Click on the Prometheus operator layer to edit the YAML. - - -36. Locate the `prometheus.ingress` section near the end of the file. Update the ingress configuration with the values provided below. Replace the `hosts` parameter with your custom domain. - -
- - ```yaml - ingress: - enabled: true - ingressClassName: nginx - annotations: - nginx.ingress.kubernetes.io/auth-type: basic - nginx.ingress.kubernetes.io/auth-secret: basic-auth - nginx.ingress.kubernetes.io/auth-realm: "Authentication Required" - hosts: - - metrics.example.com - ``` - -37. Confirm your updates on the next screen that displays. - - - -38. From the left **Main Menu**, select **Clusters** and access the monitoring stack host cluster. - - - -39. Click on the **Updates Available** button to review the changes. - - - -40. Accept the changes and select **Confirm Updates**. - - - - - - The following steps can be complex, depending on your environment and your access. Discuss the remaining step with your network administrator team if you need additional guidance. - - - - -41. Create a Canonical Name (CNAME) record for each of the following services and add the load balancer hostname to the CNAME's record value. Use the table below to identify which mapping to use between the domain and each load balancer hostname. - - | Service | Domain| CNAME Value Example | - |---|---|---| - |`nginx-ingress-controller`| `metrics.example.com` | `a57b622a0c0a148189ed00df614481c9-1803006767.us-east-1.elb.amazonaws.com` - |`prometheus-operator-kube-prometheus-stack-grafana` | `monitoring.example.com` | `a702f8a14b9684a30b18b875d2cca997-1676466159.us-east-1.elb.amazonaws.com` | - - ![A screenshot of the Palette user interface with two boxes highlighting the load balancers that you need to add to your CNAME.](/clusters_monitoring_deploy-monitor-stack_loadbalancers.png) - - - - - - You can also use `kubectl` to retrieve the load balancer hostname. - - Grafana: - - ```shell - kubectl get service prometheus-operator-kube-prometheus-stack-grafana -n monitoring -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' - ``` - - Prometheus: - ```shell - kubectl get service nginx-ingress-controller --namespace nginx -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' - ``` - - - -42. Create a public certificate for each domain. If you are using a public cloud provider, use the native certificate manager service to generate a public certificate that you can attach to the load balancers with minimal overhead. On-prem, use the default certificate manager used by your organization. - - - -43. Update the network rules for each of the load balancers to allow inbound port 443. - -
- - -44. Next, update the load balancer listeners to forward requests from port 443 to the respective target port on the monitoring stack. The following table will map the service's load balancer listener with the respective configuration. Refer to the architecture diagram from the introduction to help you visualize the mapping. - - | Inbound Load Balancer Port | Domain | Monitoring Stack Port | Service | - |---|---|---|---| - |443| `monitoring.example.com` | Use the same instance port the original entry for port 80 is using. | `prometheus-operator-kube-prometheus-stack-grafana` | - |443| `metrics.example.com`| Use the same instance port the original entry for port 80 is using.| `nginx-ingress-controller` | - - -45. Wait for the DNS changes to propagate. This can take up to five minutes. - - -Your monitoring stack is now enabled with authentication and network encryption. - - -## Validate - -To validate the monitoring stack is successfully deployed and ready to receive Prometheus agent requests, use the following steps. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the monitoring stack cluster to review the details page. - - -4. Ensure the cluster is in **Running** state. - - -5. Next, open up your web browser and visit the domain name you specified for the Grafana dashboard. Example: `https://monitoring.example.com`. - -
- -6. Log in to the Grafana dashboard using the user `admin` and the password you specified in the cluster profile. - - -7. After you have verified you can log in to the Grafana dashboard, open a new tab and visit the Prometheus endpoint. Example: `https://metrics.example.com` - - - -8. Log in with the user `agent` and use the password you specified in the htpasswd file. - - - - - A common error is not updating the network rules to allow inbound connections for port 443 to the load balancers. Ensure you have updated all the required network rules to allow inbound network requests for port 443. - - - - -
- -
- - -# Next Steps - -Now that you have a monitoring stack deployed and available in your environment, start adding the Prometheus agent to new and existing clusters. Check out the [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent) to get started. \ No newline at end of file diff --git a/content/docs/04-clusters/06-cluster-management/01.9-monitoring/20-deploy-agent.md b/content/docs/04-clusters/06-cluster-management/01.9-monitoring/20-deploy-agent.md deleted file mode 100644 index c40ec12382..0000000000 --- a/content/docs/04-clusters/06-cluster-management/01.9-monitoring/20-deploy-agent.md +++ /dev/null @@ -1,284 +0,0 @@ ---- -title: 'Enable Monitoring on Host Cluster' -metaTitle: 'Enable Monitoring on Host Cluster' -metaDescription: 'Learn how to configure your host cluster to forward metrics to a Prometheus server.' -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Observability (O11y) of Kubernetes clusters and their metrics is an important operational capability. Palette provides a pack that collects metrics from host clusters, which can be scraped by a monitoring stack. - -The steps below will teach you how to create a Prometheus agent cluster add-on profile to deploy on the host clusters you would like to monitor. Creating an add-on profile makes it easier for you to deploy the Prometheus agent to other host clusters in the future. You will use this add-on profile when deploying a new host cluster, but you can also apply the add-on profile to an existing cluster to send metrics to the monitoring stack. - -# Prerequisites - - -- A monitoring stack. Check out the [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) guide to learn how to deploy a monitoring stack in your Palette environment. - - -- An infrastructure provider environment registered in Palette. Refer to the [Clusters](/clusters) documentation for guidance on how to register your infrastructure provider environment in Palette. - -# Create Add-on Profile and Deploy Cluster - - - - - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Profiles**. - - -3. Click on **Add Cluster Profile** to create a new cluster profile. - - -4. Provide the cluster profile a name and select the type **Add-on**. Click on **Next**. - - -5. In the following screen, select **Add New Pack**. - - -6. Use the following information to find the Prometheus Agent pack. - - **Pack Type**: Monitoring - - **Registry**: Public Repo - - **Pack Name**: Prometheus Agent - - **Pack Version**: 19.0.X or newer. - - -7. Review the YAML configuration on the right. Navigate down the file until you find the parameter `url` in the `remoteWrite` section. The `remoteWrite.url` is exposed by the monitoring stack. The Prometheus server URL can be found by reviewing the details of the monitoring stack. Use the URL exposed by the Prometheus service. - - The following image displays the cluster details page of a monitoring stack. Use the URL exposed for port 9090 to populate the `remoteWrite.url` parameter. - -![A view of the cluster details page with a highlighted box around the Prometheus service URL](/integrations_prometheus-agent_cluster-detail-view.png) - -
- - - -The Prometheus server URL must be in the format of `http://HOST:PORT/api/v1/write`. -Example: `http://a2c938972938b4f0daee5f56edbd40af-1690032247.us-east-1.elb.amazonaws.com:9090/api/v1/write` - - - -
- -```yaml -charts: - prometheus: - server: - remoteWrite: - - url: "http://a2c938972938b4f0daee5f56edbd40af-1690032247.us-east-1.elb.amazonaws.com:9090/api/v1/write" -``` - -8. Confirm your changes by selecting **Confirm & Create**. - - -9. Click on **Add New Pack**. - - -10. Use the following information to find the Spectro Cluster Metrics pack. - - **Pack Type**: Monitoring - - **Registry**: Public Repo - - **Pack Name**: Spectro Cluster Metrics - - **Pack Version**: 3.3.X or newer - - -11. Use the default values. Confirm your changes by selecting **Confirm & Create**. - - -12. Click on **Next** to review the cluster profile. Save the cluster profile. - - -13. Navigate to the left **Main Menu** and select **Clusters**. - - -14. Click on **Add New Cluster**. Select **Deploy New Cluster**. - - -15. Pick the infrastructure provider you selected for the cluster profile you created earlier. - - -16. Assign a name to the host cluster and select the registered account that will deploy it. Click on **Next**. - - -17. Select a cluster profile to apply to your cluster. Click on **Next**. - - -18. The next screen displays all the layers of your cluster profile. You need to apply your add-on profile to this cluster profile. Click on the **+** button above the cluster profile layers. - - -19. Select the add-on profile you created earlier. Selecting the add-on profile ensures the Prometheus agent is installed with the correct configuration. Click on **Next** to proceed. - - -20. Complete the remainder of the cluster creation process. - -After the cluster deployment process, you will have a host cluster with the Prometheus agent installed and ready to send metrics to the monitoring stack. - -
- - - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Profiles**. - - -3. Click on **Add Cluster Profile** to create a new cluster profile. - - -4. Provide the cluster profile a name and select the type **Add-on**. Click on **Next**. - - -5. In the following screen, select **Add New Pack**. - - -6. Use the following information to find the Prometheus Agent pack. - - **Pack Type**: Monitoring - - **Registry**: Public Repo - - **Pack Name**: Prometheus Agent - - **Pack Version**: 19.0.X or newer. - - -7. Review the YAML configuration on the right. Scroll down in the file until you find the parameter `url` in the `remoteWrite` section. The `remoteWrite.url` is exposed by the monitoring stack. You can find the Prometheus server URL by reviewing the details of the monitoring stack. Use the URL exposed by the Prometheus service. - - The following image displays the cluster details page of a monitoring stack. Use the URL exposed for port 9090 to populate the `remoteWrite.url` parameter. - -![A view of the cluster details page with a highlighted box around the Prometheus service URL](/integrations_prometheus-agent_cluster-detail-view.png) - -
- - - -The Prometheus server URL must be in the format `http://HOST:PORT/api/v1/write`. -Example: `https://metrics.example.com:9090/api/v1/write` - - - - ```yaml - charts: - prometheus: - server: - remoteWrite: - - url: "https://metrics.example.com:9090/api/v1/write" - ``` - -8. Add the `basic_auth` parameters shown below. Replace `` and `` with the actual credential values. Use the username you created to authenticate with the Prometheus API server. If you followed the [Deploy a Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack#deployamonitoringstack) with authentication guide, then the username is `agent`. - -
- -```yaml -charts: - prometheus: - server: - remoteWrite: - - url: "http://metrics.example.com:9090/api/v1/write" - remote_timeout: "5s" - basic_auth: - username: "" - password: -``` - -8. Confirm your changes. - - -9. Click on **Add New Pack**. - - -10. Use the following information to find the Spectro Cluster Metrics pack. - - **Pack Type**: Monitoring - - **Registry**: Public Repo - - **Pack Name**: Spectro Cluster Metrics - - **Pack Version**: 3.3.X or newer - - -11. Use the default values. Confirm your changes by selecting **Confirm & Create**. - - -12. Click on **Next** to review the cluster profile. Save the cluster profile. - - -13. Navigate to the left **Main Menu** and select **Clusters**. - - -14. Click on **Add New Cluster**. Select **Deploy New Cluster**. - - -15. Pick the infrastructure provider you selected for the cluster profile you created earlier. - - -16. Assign a name to the host cluster and select the registered account that will deploy it. Click on **Next**. - - -17. Select a cluster profile to apply to your cluster. Click on **Next**. - - -18. The next screen displays all the layers of your cluster profile. You need to apply your add-on profile to this cluster profile. Click on the **+** button above the cluster profile layers. - - -19. Select the add-on profile you created earlier. Selecting the add-on profile ensures the Prometheus agent is installed with the correct configuration. Click on **Next** to proceed. - - -20. Complete the remainder of the cluster creation process. - -When you deploy the cluster deployment, you will have a host cluster with the Prometheus agent installed and ready to send metrics to the monitoring stack. - - -
- -
- -# Validate - -To validate that the Prometheus agent is successfully installed and sending metrics to the monitoring stack, use the following steps. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the monitoring stack cluster to review the details page. - - -4. Ensure the cluster is in the **Running** state. - - -5. Click on the exposed service URL for the service **prometheus-operator-kube-prometheus-stack-grafana**. - - -6. Log in to the Grafana dashboard using the user `admin` and the password you specified in the cluster profile. - - -7. In the Grafana dashboard, click on the left **Main Menu** and click on **Dashboards**. Palette exposes a set of Grafana dashboards by default. - - -8. Select the **Spectro Cloud/ Spectro Clusters** dashboard. - - -9. Use the **cluster** filter and review the list of available clusters. Select your newly deployed cluster to review its metrics. - - -![A grafana dashboard view of the cluster metric displaying pack status](/clusters_cluster-management_grafana_spectro_metrics.png) - -
- - - -Pods without the defined attributes `request` and `limit` will display no metrics data in the Grafana out-of-the-box Kubernetes Pods dashboard. - - - - -Use the other dashboard created by Palette to learn more about your environment. - - -# Next Steps - -Visit your Grafana dashboard and explore the Palette-created dashboard to learn more about your environment. To learn how to create dashboards tailored to your environment, check out the [Grafana tutorials](https://grafana.com/tutorials/). diff --git a/content/docs/04-clusters/06-cluster-management/02-health-alerts.md b/content/docs/04-clusters/06-cluster-management/02-health-alerts.md deleted file mode 100644 index c04940d10b..0000000000 --- a/content/docs/04-clusters/06-cluster-management/02-health-alerts.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: "Cluster Health Alerts" -metaTitle: "Cluster Health Alerts on Palette" -metaDescription: "Cluster Health Alerts" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - -# Overview -Palette monitors the health of all Workload Clusters and raises an alert when the Cluster goes to an unhealthy state. Besides displaying the alert on the User Interface (UI) console, Palette provides the ability to have these alerts pushed out to a variety of channels. Users can set up simple email alerts to receive a notice when the Health Status of their cluster changes. Additionally, they can set up Webhooks to integrate alerts with a variety of IT Service Management (ITSM) tools such as ServiceNow, Slack, or Microsoft Teams. These alerts are set up at the Project level and apply to all Clusters within the Project. - -The Palette Management Server relies on the following to trigger Cluster-health Alerts: - -* Node and resource metrics pushed by Spectro agent from clusters. - - -* Machines' info and heartbeat from the agent. - - Management server has a timeout of 10 mins for heartbeat detection. An alert is triggered if agent heartbeat is not received within the fixed timeout. Cluster will be marked as "unhealthy" when the agent is down/paused for troubleshooting. This behavior is applicable for: - * Both workload clusters and management cluster of public cloud. - * Management clusters of on-premises, enterprise infrastructure. - -# Email Alerts -1. As the Project Administrator, navigate to Project Settings. - - -2. Click **Alerts** to access the **Manage Alerts** page. - - -3. Enable **ClusterHealth**. - - -4. Select **Email all project members** option if the alert needs to be received by every Project Member or specify the email Ids of members who are supposed to receive the alerts. - - -5. Save the settings to start receiving the health alerts from your workload cluster. - -# Webhook Alerts - -1. As **Project Administrator**, navigate to **Project Settings**. - - -2. Click **Alerts** to access the **Manage Alerts** page. - - -3. Click on **Add New Webhook**. - - -4. Follow the Webhook creation wizard with the following details: - * **Alert type** - ClusterHealth - * **Method** - POST to Post the alert message to the hooked target - * **URL** - URL of the target to be hooked to receive alerts - * **Body** - JSON formatted alert message - * **Headers** - Optional header as key-value pair depending on the target - * **Active** - Enable to Disable the Webhook - - -5. Confirm the details provided to receive the health alerts for your workload clusters in your ITSM tools. - - - - diff --git a/content/docs/04-clusters/06-cluster-management/02.5-certificate-management.md b/content/docs/04-clusters/06-cluster-management/02.5-certificate-management.md deleted file mode 100644 index d942a2564e..0000000000 --- a/content/docs/04-clusters/06-cluster-management/02.5-certificate-management.md +++ /dev/null @@ -1,421 +0,0 @@ ---- -title: "Certificate Management" -metaTitle: "Certificate Management" -metaDescription: "Learn how to manage the cluster certificates through Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette installs Kubernetes through the tool, [kubeadm](https://kubernetes.io/docs/reference/setup-tools/kubeadm). As a result, all deployed clusters include auto-generated Public Key Infrastructure (PKI) certificates created by kubeadm. We recommend you review the [PKI certificates and requirement](https://kubernetes.io/docs/setup/best-practices/certificates) Kubernetes documentation to learn more about the auto-generated certificates and to better understand their purpose. - -This reference page focuses on how to renew the PKI certificates through Palette. You have two options for how you can renew the cluster PKI certificates: - -
- - * Automatic Certificate Renewal - - - * Manual Certificate Renewal - - - - - -Certificates created by kubeadm expire after 365 days. The Root Certificate Authority (CA) is valid for 3652 days or 10 years. - - - - -You can learn more about each option in the following sections. - - - -# Automatic Certificate Renewal - -When you upgrade the control plane on a cluster, the PKI certificates are automatically updated. Upgrading a cluster's Kubernetes version, whether a minor patch or a major release, results in renewed PKI certificates. This is the method that requires the least user actions when it comes to renewing PKI certificates. We recommend regularly updating your clusters to stay current with security fixes and best practices. By keeping your host cluster updated, you prevent the scenario of PKI certificates from reaching their one-year expiration date. - -
- - - -You can upgrade the Kubernetes version of a cluster by updating the Kubernetes layer of the [cluster profile](/cluster-profiles) and applying the cluster profile update to the cluster. For guidelines on updating pack versions, review [Update the Pack Version](/cluster-profiles/task-update-profile#updatethepackversion). - - - - -# Manual Certificate Renewal - -You can renew the cluster PKI certificates on-demand using the Palette API or the Palette User Interface (UI). When you manually trigger a certificate renewal action, it results in cluster nodes becoming repaved. Palette will scale up the cluster control plane nodes count and deploy new nodes with new PKI certificates auto-generated by kubeadm. Once the new control plane nodes are active and available, Palette will remove the previous control plane nodes. The worker nodes renew once the control plane nodes are updated and available. - -
- - - -A manual renewal of cluster PKI certificates results in all cluster nodes becoming repaved. - - - - -Review the following sections to learn more about manually renewing the cluster PKI certificates. Choose your preferred update method, using the Palette UI or the API. - - - -
- - - - - - -Use the following steps to update the cluster PKI certificates through the Palette UI. - - -## Prerequisites - -- A deployed host cluster. - - -- Access to the host cluster. - -## Renew Cluster Certificate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the host cluster to renew its PKI certificates. - - -4. From the cluster details page, click on **View K8s Certificates**. - -
- - ![A view of the Palette UI with an arrow pointing to the **View K8s Certificates** button.](/clusters_cluster-management_certificate-management_cluster-details-page.png) - - -5. Next, select **Renew All** to start the renewal process. - -
- - ![A view of the cluster certificates displaying the expiration date](/clusters_cluster-management_certificate-management_certificate-renew-page.png) - -The renewal process may take several minutes, depending on the number of cluster nodes. - -## Validate - -Using the following steps, you can validate that the cluster's PKI certificates were renewed. - -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the host cluster with the renewed PKI certificates. - - -4. From the cluster details page, click on **View K8s Certificates**. - - -5. Review the expiration date for each component. Each component's expiration date will have a status of **365d** with a date that is one year away. - - -
- - - - -Use the following steps to update the cluster PKI certificates through the Palette API. - - -## Prerequisites - -- A Palette API key. You can learn how to create a Palette API key by reviewing the [API Key](/user-management/user-authentication/) documentation. - - -- A deployed host cluster. - - -- Access to the host cluster. - - -## Renew Cluster Certificate - - -1. Set your Palette API key as an environment variable. Add your actual API key in place of `REPLACE_ME`. - -
- - ```shell - export API_KEY=REPLACE_ME - ``` - - -2. Set the project ID as an environment variable. Add your project ID in place of `REPLACE_ME`. You can find the project ID on the Palette landing page. The project ID is displayed in the top right corner of the page. - -
- - ```shell - export PROJECT_ID=REPLACE_ME - ``` - - - - -3. Set the cluster ID as an environment variable. Add your cluster's ID in place of `REPLACE_ME`. You can get the cluster ID from the cluster detail's page URL. The value after `clusters/` is the cluster ID. - -
- - ```shell - export CLUSTER_ID=REPLACE_ME - ``` - - - -4. Use the Palette API endpoint `https://api.spectrocloud.com/v1/spectroclusters/{uid}/k8certificates/renew` to renew a cluster's PKI certificates. The endpoint accepts the HTTP method `PATCH`, and the only required parameter is the cluster ID. - -
- - ```shell - curl --request PATCH \ - --url 'https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID/k8certificates/renew' \ - --header 'Content-Type: application/json' \ - --header 'Accept: application/json' \ - --header "ApiKey: $API_KEY" \ - --header "ProjectUid: $PROJECT_ID" - ``` - -3. No output is returned and an HTTP status `204` is expected. - - - - -The renewal process may take several minutes, depending on the number of cluster nodes. - -## Validate - - -Using the following steps, you can validate that the cluster's PKI certificates were renewed. - -
- - -1. Set your Palette API key as an environment variable. Add your actual API key in place of `REPLACE_ME`. - -
- - ```shell - export API_KEY=REPLACE_ME - ``` - - -2. Set the project ID as an environment variable. Add your project ID in place of `REPLACE_ME`. - -
- - ```shell - export PROJECT_ID=REPLACE_ME - ``` - - - -3. Set the cluster ID as an environment variable. Add your cluster's ID in place of `REPLACE_ME`. - -
- - ```shell - export CLUSTER_ID=REPLACE_ME - ``` - - - -4. Retrieve the cluster's certificate information from Palette by using the `https://api.spectrocloud.com/v1/spectroclusters/{uid}/k8certificates` endpoint. - -
- - ``` - curl \ - --url 'https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID/k8certificates' \ - --header 'Content-Type: application/json' \ - --header 'Accept: application/json' \ - --header "ApiKey: $API_KEY" \ - --header "ProjectUid: $PROJECT_ID" - ``` - - -5. Validate the output and confirm the expiration date is one year away. - -
- - ```json - { - "machineCertificates": [ - { - "certificateAuthorities": [ - { - "certificates": [ - { - "expiry": "2024-05-23T16:51:05.000Z", - "name": "front-proxy-client" - } - ], - "expiry": "2033-05-23T16:45:22.209Z", - "name": "front-proxy-ca" - }, - { - "certificates": [ - { - "expiry": "2024-05-23T16:51:05.000Z", - "name": "kube-apiserver" - }, - { - "expiry": "2024-05-23T16:51:05.000Z", - "name": "kube-apiserver-kubelet-client" - } - ], - "expiry": "2033-05-23T16:45:22.209Z", - "name": "ca" - }, - { - "certificates": [ - { - "expiry": "2024-05-23T16:51:05.000Z", - "name": "kube-apiserver-etcd-client" - }, - { - "expiry": "2024-05-23T16:51:05.000Z", - "name": "kube-etcd-healthcheck-client" - }, - { - "expiry": "2024-05-23T16:51:05.000Z", - "name": "kube-etcd-peer" - }, - { - "expiry": "2024-05-23T16:51:05.000Z", - "name": "kube-etcd-server" - } - ], - "expiry": "2033-05-23T16:45:22.209Z", - "name": "etcd-ca" - } - ], - "name": "ip-10-0-1-120.ec2.internal" - } - ] - } - ``` - - -
- -
- -
- -# Advanced - Only Renew Control Plane Nodes - -You can configure Palette to only renew the PKI certificates for the control plane nodes. You can achieve this by using the annotation `spectrocloud.com/cert-renew-controlplane-only` and setting the value to `true`. To enable this behavior, you must use `kubectl` and apply the update to a Custom Resource Definition (CRD) created by Palette during the cluster deployment process. - -Use the following steps to configure Palette only to renew the certificates for control plane nodes. - - -## Prerequisites - -- Kubectl is installed in your system. - - -- A host cluster deployed. - - -- Access to the host cluster's kubeconfig file. Refer to the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl) guide to learn how to use your cluster's kubeconfig file. - - -## Configure Cluster - - -1. Set your cluster name as an environment variable. Add your cluster's name in place of `REPLACE_ME`. - -
- - ```shell - export CLUSTER_NAME=REPLACE_ME - ``` - -1. Use the following command to retrieve the namespace of the CRD Palette created in your cluster. - -
- - ```shell - namespace=$(kubectl get spc --all-namespaces --output jsonpath='{range .items[?(@.metadata.name=="'"$CLUSTER_NAME"'")]}{.metadata.namespace}{"\n"}{end}') - ``` - - -2. Use `kubectl` to update the CRD to include the `spectrocloud.com/cert-renew-controlplane-only` annotation. - -
- - ```shell - kubectl annotate spc/certificate-renew --namespace $namespace spectrocloud.com/cert-renew-controlplane-only="true" - ``` - -3. Verify the annotation was set correctly with the command below. The expected output is `true`. - -
- - ```shell - kubectl get spc/$CLUSTER_NAME --namespace $(kubectl get spc --all-namespaces --output jsonpath='{range .items[?(@.metadata.name=="'"$CLUSTER_NAME"'")]}{.metadata.namespace}{"\n"}{end}') --output jsonpath='{.metadata.annotations.spectrocloud\.com/cert-renew-controlplane-only}' - ``` - - Output - ``` - true - ``` - - -4. Next, trigger a certificate renewal using either [Automatic Certificate Renewal](/clusters/cluster-management/certificate-management#automaticcertificaterenewal) or [Manual Certificate Renewal](/clusters/cluster-management/certificate-management#manualcertificaterenewal). - - - -The renewal process may take several minutes, depending on the number of cluster nodes. - -## Validate - -Using the following steps, you can validate that the cluster's PKI certificates are renewed only for the control plane nodes. - - -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the host cluster to renew its PKI certificates. - - -4. From the cluster details page, click on **View K8s Certificates**. - - -5. Review the expiration date for each component. Each component's expiration date will have a status of **365d** with a date that is one year away. - - -6. Navigate to the **Nodes** tab and verify the **Worker Pool** nodes' **Age** is not updated recently. - -
- - ![View of the cluster nodes where only the control plane nodes are modified](/clusters_cluster-management_certificate-management_control-plane-only-change.png) - - -
\ No newline at end of file diff --git a/content/docs/04-clusters/06-cluster-management/03-compliance-scan.md b/content/docs/04-clusters/06-cluster-management/03-compliance-scan.md deleted file mode 100644 index eb7d030ce6..0000000000 --- a/content/docs/04-clusters/06-cluster-management/03-compliance-scan.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: "Compliance Scan" -metaTitle: "Managing Cluster Update Events on Palette" -metaDescription: "Events and Notifications on Cluster Updates" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - # Overview - -Palette provides a way to run compliance, security, conformance, and software bill of materials (SBOM) scans on tenant clusters. These scans ensure cluster adherence to specific compliance and security standards. The scans also detect potential vulnerabilities by performing penetration tests. - -Palette supports four types of scans. Each scan generates reports with details specific to the type of scan. You can initiate multiple scans of each type over time. In addition, Palette keeps a history of previous scans for comparison purposes. To learn more about each scan type, refer to the following sections. - -
- - - -Scans may not work as expected when a node is in maintenance mode. Before scheduling a scan, we recommend you turn off maintenance mode if enabled. To verify if a node is in maintenance mode, navigate to **Clusters** > **Nodes** and check the **Health** column for a **Maintenance mode** icon. To turn off maintenance mode, click on the **three-dot Menu** in the row of the node you want to scan, and select **Turn off maintenance mode**. - - - -# Configuration Security - -This scan examines the compliance of deployed Kubernetes security features against the CIS Kubernetes Benchmarks. CIS Kubernetes Benchmarks are consensus-driven security guidelines for the Kubernetes. Different releases of the CIS benchmark cover different releases of Kubernetes. By default, Kubernetes configuration security will determine the test set based on the Kubernetes version running on the cluster being scanned. Internally, Palette leverages an open-source tool called KubeBench from Aqua Security to perform this scan. Scans are run against master and worker nodes of the Kubernetes cluster, and a combined report is made available on the UI. Users can filter the report to view only the master or worker results if required. - -All the tests in the report are marked as Scored or Not Scored. The ones marked Not Scored cannot be automatically run, and it is suggested to be tested manually. - -![kcs.png](/kcs.png) - -# Penetration Testing - -Kubernetes penetration testing scans Kubernetes-related open-ports for any configuration issues that can leave the tenant clusters exposed to attackers. It hunts for security issues in your Kubernetes clusters and increases awareness and visibility of the security controls in Kubernetes environments. The scan gives a full report on the cluster security concerns. Internally Palette leverages an open-source tool called KubeHunter from Aqua Security to perform this scan. Scans are run in 2 modes, Internal and External. In the internal mode, tests are run against the internal endpoint of the API server, whereas, in external mode, the external public-facing endpoint is used for testing. A combined report of vulnerabilities found in both modes is shown on the Palette UI. Users can filter the report to view just the internal or external report if required. - -![kpt.png](/kpt.png) - -# Conformance Testing - -Kubernetes conformance testing is about validating your Kubernetes configuration to ensure that they are conformant to the CNCF specifications. Palette leverages an open-source tool called Sonobuoy to perform this scan. Automatically select a subset of relevant tests for execution based on the type of cloud (public, private) and the type of deployment infrastructure (IaaS, managed cloud service). Each test can take up to 2 hours to complete. If a cluster has a single worker node, a few tests may fail due to resources. For accurate assessment of conformance for distribution of Kubernetes, set up a cluster with at least two worker nodes. These tests are not destructive. However, they do launch several workloads in test namespaces as part of the tests. As a result, the consumption of cluster resources during the test run duration increases and may impact other workloads running on the cluster. - -The scan summary of total passed and failed tests are displayed while the test is in progress. In addition, a complete overview of the tests that were run is displayed after the completion of the report. - -![conformance.png](/conformance.png) - -# SBOM: Dependencies & Vulnerabilities - -## What is an SBOM? -An SBOM is a comprehensive list of the components, libraries, and other assets that make up a software application. It details the various third-party components and dependencies used in the software and helps to manage security and compliance risks associated with those components. - -The SBOM provides metadata about each component such as version, origin, license, and more. Reviewing the SBOM enables organizations to track vulnerabilities, perform regular software maintenance, and ensure compliance with regulatory requirements such as the European Union's General Data Protection Regulation (GDPR) and the Payment Card Industry Data Security Standard (PCI DSS). - -![sbom_scan.png](/sbom_scan.png) - -## Configure an SBOM Scan -To initiate an SBOM scan, navigate to **Clusters** and select the cluster to scan. On the **Cluster Overview** page, click the **Scans** tab, and expand the **Software Bill of Materials (SBOM)** drop-down menu. Select **Configure Scan** and choose the desired SBOM format, scan scope, and an optional backup location. Confirm your changes. - -Palette will identify every unique container image within your chosen scope and generate an SBOM for that image. Palette also runs the SBOM through a vulnerability scanner to flag any Common Vulnerabilities and Exposures (CVEs). Palette leverages two open-source tools from Anchore: [Syft](https://github.com/anchore/syft) for SBOM generation and [Grype](https://github.com/anchore/grype) for vulnerability detection. - -Suppose you specify a [backup location](/clusters/cluster-management/backup-restore). In that case, the SBOM for each image will be uploaded to your backup location, and you can subsequently download the SBOMs with the click of a button or using the Palette API. - -If a backup location is not provided, Palette will preserve all of the identified dependencies and vulnerabilities, but the raw SBOMs will not be available for download. The report results are available for review regardless of their backup location setting. - -
- -#### SBOM Scan Format -* [SPDX](https://github.com/spdx/spdx-spec/blob/v2.2/schemas/spdx-schema.json): A standard SBOM format widely used by organizations and governments. The SPDX format has been around longer than any other SBOM format. - -* [CycloneDX](https://cyclonedx.org/specification/overview/): An open-source XML-based SBOM format that provides a standard representation of software components and their metadata. - -* Syft JSON: Syft's custom SBOM format. The Syft SBOM format contains the most metadata compared to the other SBOM formats. - -#### SBOM Scan Scopes -* Cluster: Scans all the images in your Kubernetes cluster. - -* Namespace: Scans all images in a particular Kubernetes namespace. - -* Label Selector: Scans all images used by all the Pods matching a label selector within a particular Kubernetes namespace. - -* Pod: Scans all images used by a single Pod. - -## Review SBOM Results -To review a completed scan, expand the **Software Bill of Materials (SBOM)** row. The expanded row displays the completed report containing detailed information about every scanned image. The context column indicates every unique use of each image, broken out by container name, namespace, and pod name. Each image may be used by various containers within a given scope. The vulnerability summary column provides a condensed view of the vulnerability report, which can be viewed in greater detail by clicking on any row in the scan report. - -![sbom_results.png](/sbom_results.png) - -Each identified image has its own detailed results page containing dependency and vulnerability reports. To review an image's result page, select the **>** button. Regardless of the selected SBOM format, each dependency’s name, version, and type is displayed, and each vulnerability's name, severity, code, impacted version, and fixed version is displayed. - -Additional metadata will be included in the SBOM. Exactly what additional metadata is included depends on the selected SBOM format. - -![sbom_dependencies.png](/sbom_dependencies.png) - -For each identified vulnerability, you can view the name, severity level, vulnerability code, installed or impacted version, and the fix version (if a fix is available). Any CVEs documented in the [NIST National Vulnerability Database](https://nvd.nist.gov/vuln) (NVD) will render as a hyperlink to the NVD detail page for that particular vulnerability. - -![sbom_vulnerabilities.png](/sbom_vulnerabilities.png) - -# Scan Options - -The following options are available for running cluster scans: - -## On Demand -A cluster scan of any type can be started by navigating to the **Scans** tab of a cluster in Palette. Scan progress displays as 'Initiated' and transitions to 'Completed' when the scan is complete. - -|__On Demand Scan__| -|------------------| -|Select the cluster to scan -> Scan(top panel) -> Run Scan.| - -## Scheduled -You can set a schedule for each scan type when you deploy the cluster, and you can change the schedule at a later time. - -|__During Cluster Deployment__| -|-----------------------------| -|Add New Cluster -> Settings -> Schedule scans -> Enable and schedule desired scans.| - -|__Running Cluster__| -|----------------------| -|Select the cluster to scan -> Settings -> Cluster Settings -> Scan Policies -> Enable and schedule scans of your choice.| - -### Schedule Options Available - -Schedule your compliance scan for month, day, hour, or minute. For example: -* Every week on Sunday at midnight. -* Every two weeks at midnight. -* Every month on the first day of the month at midnight. -* Every two months on the first day of the month at midnight - - - This operation can be performed on all cluster types across all clouds. - diff --git a/content/docs/04-clusters/06-cluster-management/04-os-patching.md b/content/docs/04-clusters/06-cluster-management/04-os-patching.md deleted file mode 100644 index fb01154c03..0000000000 --- a/content/docs/04-clusters/06-cluster-management/04-os-patching.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: "OS Patching" -metaTitle: "Managing Cluster OS patching through Palette" -metaDescription: "Managing Cluster OS patching through Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - -# Overview - -Palette deploys Kubernetes clusters using pre-built VM images. The operating system (OS) on these images is the latest patch version when building the image for the supported major-minor streams. For example, if Ubuntu 18.04 is selected for the OS layer during provisioning, the OS on the cluster nodes might be using 18.04.3 LTE, assuming that was the latest version available at the time the VM image was built. However, newer versions continue to be published in the upstream repositories as improvements, bug fixes, and security patches are released. - -OS Patching allows the operating system on the running cluster nodes to be updated to the latest patch version so that it is up-to-date with the latest fixes. Assume Ubuntu 18.04.4 LTE and 18.04.5 LTE are released over time to fix important security issues. OS Patching operation will identify 18.04.5 as the latest version and upgrade it on the cluster nodes. The following choices are available for patching the operating system to the latest version. - -# Patch on Boot - -During the cluster creation, while configuring the cluster, you can select **Patch OS on boot**. In this case, the operating system on all cluster nodes will be updated to the latest when the cluster VMs are initially deployed. - -To enable **Patch OS on boot**, ensure you are in the **Settings** step of the cluster creation wizard. Next, click on the **Manage machines** tab, and select the check box for **Patch OS on boot**. -# Reboot if Required - -Palette supports the **Reboot if Required** feature to control the system reboot as part of cluster upgrades. Some system upgrades will require a reboot to apply the changes to the cluster. You need to check the **Reboot if Required** checkbox to allow the reboot. If this option is unchecked, the system reboot will be restricted. - -To enable **Reboot if Required**, ensure you are in the **Settings** step of the cluster creation wizard. Next, click on the **Manage machines** tab, and select the check box for **Reboot if Required**. - -# Scheduled -Palette also supports OS patching through a schedule. The patching schedule can be set initially when creating a cluster as well as at any given point later. The following scheduling options are available. - - -* Never -* Every week on Sunday at midnight -* Every two weeks at midnight -* Every month on the 1st at midnight -* Every two months on the 1st at midnight -* Custom OS patch for an exact month, day, hour and minute of your choice - - -To enable **OS Patching Schedule**, ensure you are in the **Settings** step of the cluster creation wizard. Next, click on the **Manage machines** tab, and select the drop-down input for **OS Patching Schedule**. - - -To enable **OS Patching Schedule**, for an active cluster. Navigate to the **Main Menu** and click on **Clusters**. In the cluster view page, find the row for the respective cluster you want to configure OS patching for. Click on the three dots at the end of row to access the cluster settings. Next, click on the **Machine Management** tab, and select the drop-down input for **OS Patching Schedule**. - -# On-Demand -This option provides a way for you to perform an immediate update. - -To perform an **On-Demand** update for an active cluster. Navigate to the **Main Menu** and click on **Clusters**. In the cluster view page, find the row for the respective cluster you want to configure OS patching for. Click on the three dots at the end of row to access the cluster settings. Next, click on the **Machine Management** tab, and select the drop-down input for **OS Patching Schedule**. - -
- - - This operation is not available for existing Kubernetes clusters imported into Palette. - This operation is not available for managed Kubernetes Services such as EKS, AKS etc. For EKS clusters, an OS update can be triggered from Palette. This would initiate a request on AWS to update cluster node groups to the latest patch version. - - -# Monitoring - -The clusters' OS patching status can be monitored through the **Node** tab of cluster details page. The following are the patch details available for the customer to monitor: - -| **Field** | **Description** | -|---|---| -| Last Applied Patch Time | The date and time of the last OS patch.| -| | | -| Patched Version| The latest patched version.| diff --git a/content/docs/04-clusters/06-cluster-management/05-backup-restore.md b/content/docs/04-clusters/06-cluster-management/05-backup-restore.md deleted file mode 100644 index c2df6de23a..0000000000 --- a/content/docs/04-clusters/06-cluster-management/05-backup-restore.md +++ /dev/null @@ -1,330 +0,0 @@ ---- -title: "BackUp and Restore" -metaTitle: "Managing Cluster Update Events on Palette" -metaDescription: "Events and Notifications on Cluster Updates" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette provides two ways to back up and restore Kubernetes clusters: - -* Cluster Backup and Restore for a single cluster which is managed from within the cluster. -* [Workspace](/workspace/workload-features#workspaceoperator) Backup and Restore for multiple clusters managed from workspaces. - -# Cluster Backup and Restore - -Palette provides a convenient backup option to back up the Kubernetes cluster state into object storage and restores it at a later point in time if required to the same or a different cluster. Besides backing up Kubernetes native objects such as Pods, DaemonSets, and Services, persistent volumes can also be snapshotted and maintained as part of the Backup. Internally, Palette leverages an open-source tool called Velero to provide these capabilities. In addition, multiple backups of a cluster can be maintained simultaneously. - -Palette leverages the BackUps to the following locations: - -
- -#### Amazon Web Services (AWS) S3 Buckets: [Prerequisites](/clusters/cluster-management/backup-restore#foranamazonwebservices(aws)bucketasbackuplocation), [Configure your Backup](/clusters/cluster-management/backup-restore#configureyourbackupinawss3) - -#### Google Cloud Platform (GCP) Buckets: [Prerequisites](/clusters/cluster-management/backup-restore#foragooglecloudplatform(gcp)backuplocation), [Configure your Backup](/clusters/cluster-management/backup-restore#configureyourbackupingcpbucket) - -#### MinIO S3 Buckets: [Prerequisites](/clusters/cluster-management/backup-restore#forminios3backup), [Configure your Backup](/clusters/cluster-management/backup-restore#configureyourbackupinminio) - -#### Azure Blob:[Prerequisites](/clusters/cluster-management/backup-restore#forazureblobbackup),[Configure your Backup](/clusters/cluster-management/backup-restore#configureyourbackupinazure:azureblob) - -# Prerequisites - -## For an Amazon Web Services (AWS) Bucket as Backup Location - -* The Amazon Simple Storage Service (S3) permissions listed in the next section need to be configured in the AWS account to provision Backup through Palette. - -* Pre-created bucket at the AWS Console. - -## For a Google Cloud Platform (GCP) Backup Location - -* GCP service account with a `storage admin` role. - -* Pre-created bucket at the GCP object storage. - -## For MinIO S3 Backup - -* S3 bucket with Read/Write Access - -* A unique access key (username) and corresponding secret key (password) from MinIO Console. - -* Service provider certificate (Optional) - -## For Azure Blob Backup - -* An active Azure cloud account with the following pieces of information noted down: - * Tenant Id - * Client Id - * Subscription Id - * Client Secret created - - -* An [Azure storage account](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-create?tabs=azure-portal) created with the following information to be noted down for Palette use: - * Storage Name: Custom name given to the Azure storage created. - * Stock-keeping unit - - -* A container to be created in the Azure Storage account - -# Backup Locations - -Creating the backup location is identical for both cluster and workspace backup. AWS S3 and other S3 compliant object stores such as MinIO and GCP Buckets are currently supported as backup locations. These locations can be configured and managed under the **Project** > **Settings** option and can be selected as a backup location, while backing up any cluster in the project. - -## Configure your Backup in AWS S3 - -The following details are required to configure a backup location in AWS: - -1. **Location Name**: Name of your choice. - - -2. **Location Provider**: AWS - - -3. **Certificate**: Required for MinIO. - - -4. **S3 Bucket**: S3 bucket name must be pre-created on the object-store. - - -5. **Configuration**: region={region-name},s3ForcePathStyle={true/false},s3Url={S3 URL}. S3 URL need not be provided for AWS S3. - - -6. **Account Information** - Details of the account which hosts the S3 bucket to be specified as Credentials or STS. - * Credentials - Provide access key and secret key. - * STS - Provide the ARN and External ID of the IAM role that has permission to perform all S3 operations. The STS role provided in the backup location should have a trust set up with the account used to launch the cluster itself and should have the permission to assume the role. - - -7. Palette mandates the AWS S3 Permissions while users use the static role to provision worker nodes. - -#### AWS S3 Permissions - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeVolumes", - "ec2:DescribeSnapshots", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:CreateSnapshot", - "ec2:DeleteSnapshot" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:PutObject", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploadParts" - ], - "Resource": [ - "arn:aws:s3:::BUCKET-NAME/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::BUCKET-NAME" - ] - } - ] - } - ``` - -#### Trust Setup Example - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::141912899XX99:root" - }, - "Action": "sts:AssumeRole", - "Condition": {} - } - ] - } - ``` - -## Configure your Backup in GCP Bucket - -These locations can be configured and managed from the 'Settings' option under 'Project' and can be selected as a backup location while backing up any cluster in the project. - -The following details are required to configure a backup location in GCP: - -1. **Location Name**: Name of your choice. - - -2. **Location Provider**: Google Cloud (Choose this option when backing up to the GCP bucket object store). - - -3. **Bucket**: The name of the bucket name pre-created on the object store. - - -4. **JSON Credentials**: For external authentication of the GCP storage. - - -5. Click Create to complete the location creation wizard. - -## Configure your Backup in MinIO - -The following details are required to configure a backup location in MinIO: - -1. **Location Name**: Name of your choice. - - -2. **Location Provider**: Minio - - -3. **Certificate**: Optionally required for MinIO. - - -4. **S3 Bucket**: S3 bucket name must be pre-created on the MinIO object-store. - - -5. **Region**: Region in which Minio server is configured. Example: us-east-1 - - -6. **S3 URL**: Url of the MinIO object storage console. Example: `http://12.123.234.567:0000' - - -7. **Force S3 path style** : To force S3 path style addressing or else the url will be converted to virtual-hosted style addressing with bucket name appended to the url.This is an optional setting. - - -8. Provide the MiniIO unique **Access Key** and **Secret Key**. An unique access key (username) and corresponding secret key (password) can be obtained for every MinIO user account from MinIO console. - - -9. Click **Create** to complete the location creation wizard. - - -## Configure your Backup in Azure: Azure Blob - -The following details are required to configure a backup location in Azure: - -1. **Location Name**: A custom name for the storage location getting created. - - -2. **Location Provider:** Select **Azure** from the drop-down. - - -3. **Container Name:** The container created in Azure Storage. - - -4. **Storage Name**: Name of the Azure storage created. - - -5. **Stock-Keeping Unit**: Information from the Azure storage. - - -6. **Resource Group:** Azure Resource Group name - - -7. **Tenant ID:** Azure Account Credential. - - -8. **Client ID:** Azure Account Credential. - - -9. **Subscription ID**: Azure Account Credential. - - -10. **Client Secret:** Secret created in the Azure console needs to be validated. - - -11. Click **Create** to complete the location creation wizard. - - -## Add a Backup Location - -Go to **Project Settings** > **Backup locations** > **Add a New Backup location**. - - -# Create a Cluster Backup - -Backups can be scheduled or initiated on demand during cluster creation. Backups can also be scheduled for a running cluster. The following information is required to configure a cluster backup: - -1. **Backup Prefix / Backup Name**: - * For scheduled backup, a name will be generated internally, add a prefix of our choice to append with the generated name. - * For an on demand Backup, a name of user choice can be used. - - -2. Select the **Backup location**. - - -3. **Backup Schedule**: Create a backup schedule of your choice from the drop-down, applicable only to scheduled backups. - - -4. **Expiry Date**: Select an expiry date for the backups. The backup will be automatically removed on the expiry date. - - -5. **Include all disks**: Optionally backup persistent disks as part of the backup. - - -6. **Include Cluster Resources**: Select or deselect on your choice. - - -7. **Namespaces**: Provide namespaces that need to be backed up. If left empty then all the Namespaces will be backed up. - -|On Demand Backup | -|-------------------| -|Select the cluster from **Backup** > **Settings** > **Cluster Settings** > **Schedule Backups**| - -|Scheduled Backup | -|-----------------| -|**Cluster Creation** > **Policies** > **Backup Policies**| - - -### Backup Scheduling Options - -Both the cluster and workspace backup support the following scheduling options: - -* Customize your backup for the exact month, day, hour, and minute of the user's choice -* Every week on Sunday at midnight -* Every two weeks at midnight -* Every month on the 1st at midnight -* Every two months on the 1st at midnight - -# Restore a Backup - -Backups created manually or as part of the schedule are listed under the Backup/Restore page of the cluster. - -1. Restore operation can be initiated by selecting the restore option for a specific backup. - - -2. Next, you would be prompted to select a target cluster where you would like the backup to be restored. The progress of the restore operation can be tracked from the target cluster's backup/restore page. - - -3. Finally, restore operation can be done to the cluster running on the same project. - - - -Some manual steps might be required, when restoring backups to a cluster running on a cloud different from the source cluster. For example, you might need to pre-create a storage class on the cluster before initiating restore procedures: - For EKS, please specify gp2 storage class. - For other cloud environments, please specify spectro-storage-class. - - - -When restoring your backup to a cluster launched using a cloud account different from the one used for the source account, permissions need to be granted before restoration is initiated to the new cluster. - - - - This operation can be performed on all cluster types across all clouds. - diff --git a/content/docs/04-clusters/06-cluster-management/06-cloud-cost.md b/content/docs/04-clusters/06-cluster-management/06-cloud-cost.md deleted file mode 100644 index 025b4b2b48..0000000000 --- a/content/docs/04-clusters/06-cluster-management/06-cloud-cost.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: "Cost Visibility" -metaTitle: "Calculate Cloud Cost in Spectro Cloud" -metaDescription: "Calculate Cloud Cost in Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette calculates estimated cloud cost for workload clusters based on the rate of the instance type used for cluster node pools and usage cost breakdown by namespaces based on actual resource utilization within the namespace. - -# Cloud Cost - -Cluster cloud cost is the sum of the estimated cost of all the nodes launched in the cluster. The cost calculation is done based on the instance type and storage type selected for each machine pool. - -| |**FORMULAS FOR CALCULATION**| -|--|--------------| -| |Machine Pool Cost = ( Number of Nodes X Instance Price ) + ( Storage Size X Storage Price )| -| |Cluster Cloud Cost = Master Pool Cost + Worker Pool Cost| - -**Example 1:** - -Let's assume that a cluster ‘demo’ is launched with two machine pools with the following configuration: - -|MACHINE POOL|SIZE | INSTANCE TYPE WITH COST|ROOT DISK WITH COST| -|--|-----|---|----| -|MASTER POOL|3|AWS t2.medium($0.0496/hour)|60GB - gp2($0.00014/GB/hour)| -|WORKER POOL|3|AWS t2.large($0.0992/hour)|60GB - gp2($0.00014/GB/hour)| - -|Calculation for the above scenario| -|----------| -|master-pool cost = ( 3 X $0.0496 ) + ( 60 X $0.00014 ) = $0.1572/hour| -|worker-pool cost = ( 3 X $0.0992 ) + ( 60 X $0.00014 ) = $0.306/hour| -|Cluster Cloud Cost = $0.1572 + $0.306 = $0.4632/hour| - - - For private clouds like VMware, OpenStack, MaaS, etc., the unit rate for CPU and Memory can be configured as an administrative setting. These rates are used in place of instance-type rates for cost calculation. - - - -# Usage Cost -Usage cost is calculated based on the pods' actual CPU & Memory usage, including the claimed PVC storage size. The pod cost calculation is done by dividing the instance type rate into CPU and memory rates proportional to the instance type category. - -|Instance Type Category| CPU: Memory | -|--|--| -|General Purpose|65% : 35%| -|Compute Optimized|65% : 35%| -|Memory-Optimized|25% : 75%| - -|**FORMULAS FOR CALCULATION** || -|--|--------------| -|Pod CPU Cost = (CPU Proportion x Instance Rate ) x Pod CPU Usage| -|Pod Memory Cost = (Memory Proportion x Instance Rate) x Pod Memory Usage| -|Pod Storage Cost = PVC Storage Size x Storage Rate| -|Pod Cost = Pod CPU Cost + Pod Memory Cost + Pod Storage Cost| - -**Example 2** - -For the cluster configuration of master-pool & worker-pool considers in example 1, - -|Calculation for the example scenario| -|----------| -|Pod CPU usage = 200m, Pod Memory Usage = 200MB, Pod Storage Size = 10GB| -|Pod CPU Cost = ( 65% * $0.0992 ) * 200m = 0.06448 * 0.2 = $0.012896/hour| -|Pod Memory Cost = ( 35% * $0.0992 ) * 200MB = 0.03472 * 0.2GB = $0.006944/hour| -|Pod Storage Cost = 10GB * $0.00014 = $0.0014/hour| -|Pod Cost = $0.012896 + $0.006944 + $0.0014 = $0.02124/hour| - - - - Cluster costs are calculated for all cluster types (new and existing) across all cloud types (public and private) - - diff --git a/content/docs/04-clusters/06-cluster-management/07-workloads.md b/content/docs/04-clusters/06-cluster-management/07-workloads.md deleted file mode 100644 index f8d95a2620..0000000000 --- a/content/docs/04-clusters/06-cluster-management/07-workloads.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "Workload Visibility" -metaTitle: "View cluster workloads" -metaDescription: "Browse all cluster resources such as pods, deployment sets, etc." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette provides visibility into the resources running inside workload clusters. These resources are displayed on the cluster details page. Following is the list of resources shown in the workload browser: - -
- -* Namespaces - - -* Pods - - -* DeploymentSets - - -* DaemonSets - - -* StatefulSets - - -* Jobs - - -* CronJobs - - -* Role Bindings - - -* Cluster Role Bindings - - - - Workloads are displayed for all cloud types (new, existing) (public, private) across all cloud types. - diff --git a/content/docs/04-clusters/06-cluster-management/08-taints.md b/content/docs/04-clusters/06-cluster-management/08-taints.md deleted file mode 100644 index 7c43ef70b8..0000000000 --- a/content/docs/04-clusters/06-cluster-management/08-taints.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "Labels and Taints" -metaTitle: "Labels and Taints" -metaDescription: "Learn how to apply labels and taints to nodes in a cluster, and how to specify Namespace labels and annotations to Add-on packs and packs for Container Storage Interface (CSI) and Container Network Interface (CNI) drivers." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Taints - -Node affinity is a property of Pods that attracts them to a set of nodes (either as a preference or a hard requirement. Taints are the opposite -- they allow a node to repel a set of pods. - -Tolerations are applied to pods and allow (but do not require) the pods to schedule onto nodes with matching taints. - -Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a node; this marks that the node should not accept any pods that do not tolerate the taints. - -Palette enables Taints to be applied to a node pool to restrict a set of intolerant pods getting scheduled to a Palette node pool. Taints can be applied during initial provisioning of the cluster and modified later. - -## Apply Taints to nodes - -Taints can be applied to worker pools while creation of a new cluster from the node pool configuration page as follows: - -* Enable the “Taint” select button. -* To apply the Taint, set the following parameters: - * Key: Custom key for the Taint - * Value: Custom value for the Taint key - * Effect: The effects define what will happen to the pods that do not tolerate a Taint. There are 3 Taint effects: - * NoSchedule: A pod that cannot tolerate the node Taint, should not be scheduled to the node. - * PreferNoSchedule: The system will avoid placing a non-tolerant pod to the tainted node but is not guaranteed. - * NoExecute: New pods will not be scheduled on the node, and existing pods on the node, if any will be evicted if they do not tolerate the Taint. - -Eg: Key = key1; - Value = value1; - Effect = NoSchedule - -Taints can also be updated on a running cluster by editing a worker node pool from the 'Nodes' tab of the cluster details page. - -# Labels - -You can constrain a Pod to only run on a particular set of Node(s). There are several ways to do this and the recommended approaches such as, nodeSelector, node affinity, etc all use label selectors to facilitate the selection. Generally, such constraints are unnecessary, as the scheduler will automatically do a reasonable placement (e.g. spread your pods across nodes so as not place the pod on a node with insufficient free resources, etc.) but there are some circumstances where you may want to control which node the pod deploys to - for example to ensure that a pod ends up on a machine with an SSD attached to it, or to co-locate pods from two different services that communicate a lot into the same availability zone. - -Palette enables our users to Label the nodes of a master and worker pool by using key/value pairs. These labels do not directly imply anything to the semantics of the core system but are intended to be used by users to drive use cases where pod affinity to specific nodes is desired. Labels can be attached to node pools in a cluster during creation and can be subsequently added and modified at any time. Each node pool can have a set of key/value labels defined. The key must be unique across all node pools for a given cluster. - -## Apply Labels to nodes - -Labels are optional and can be specified in the **Additional Labels** field of the node pool configuration form. Specify one or more values as 'key:value'. You can specify labels initially during cluster provisioning and update them any time by editing a node pool from the **Nodes** tab of the cluster details page. \ No newline at end of file diff --git a/content/docs/04-clusters/06-cluster-management/09-cluster-rbac.md b/content/docs/04-clusters/06-cluster-management/09-cluster-rbac.md deleted file mode 100644 index fe79a6406e..0000000000 --- a/content/docs/04-clusters/06-cluster-management/09-cluster-rbac.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: "RBAC and NS Support" -metaTitle: "Cluster Level RBAC and NS Support" -metaDescription: "Cluster Level RBAC and NS Support for Access Control" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -[*RoleBindings*](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) and *ClusterRoleBindings* are Role-Based Access Control (RBAC) concepts that allow granular control over cluster-wide resources. Palette provides you the ability to specify bindings to configure granular RBAC rules. - -You can configure namespaces and RBAC from within a cluster or from a [Palette Workspace](/workspace) that contains a collection of like clusters that need to be managed as a group. If a host cluster is part of a Palette workspace, then all roleBindings must occur at the namespace level. - -As you review RBAC support, use the following definitions: - -- **Role** An entity that is assigned a set of access permissions within a namespace. Roles require the assignment of a Kubernetes namespace. - -
- - - ```yaml - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - namespace: default - name: pod-reader - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "watch", "list"] - ``` - -- **Cluster Role** An entity that is assigned a set of access permissions scoped to the cluster and all of its Kubernetes namespaces. ClusterRoles do not have a namespace assigned. - -
- - ```yaml - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: secret-reader - rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "watch", "list"] - ``` - -- **RoleBinding** associates a subject with a role. A subject can be a user, a group, or a [*ServiceAccount*](https://kubernetes.io/docs/concepts/security/service-accounts/). Role binding is used to grant permissions to a subject. Role and RoleBinding are used to scope a subject to a specific Kubernetes namespace. - -
- - ```yaml - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: read-pods - namespace: default - subjects: - - kind: User - name: jane - apiGroup: rbac.authorization.k8s.io - roleRef: - kind: Role - name: pod-reader - apiGroup: rbac.authorization.k8s.io - ``` - -- **ClusterRoleBinding** associates a subject with a ClusterRole. A subject can be a user, a group, or a [*ServiceAccount*](https://kubernetes.io/docs/concepts/security/service-accounts/). A ClusterRoleBinding is used to grant permissions to a subject. ClusterRole and ClusterRoleBinding are used to scope a subject's access to the cluster which includes all the Kubernetes namespaces inside the cluster. - - -There are many reasons why you may want to create roles and assign permissions to different users or groups. Below are a few common scenarios. - -* Use Role and a RoleBinding to scope security to a single Kubernetes namespace. -* Use Role and a RoleBinding to scope security to several Kubernetes namespaces. -* Use ClusterRole and ClusterRoleBinding to scope security to all namespaces. - - -
- - - -Palette does not provide a way for roles to be configured natively through its platform. You can create roles by using a manifest layer in the cluster profile. RBAC management only allows you to specify role bindings. - - - - -Use the steps below to create a RoleBinding or ClusterRoleBinding for your host clusters. - - -
- -# Palette Roles and Kubernetes Roles - -Palette offers a set of [default roles](/user-management/palette-rbac#palettespecific(default)roles:) you can assign to your users. The Palette roles are only in scope at the platform level. This means you can manage the permissions for users' actions in Palette, such as creating or deleting clusters, creating projects, creating users, and more. - -The Kubernetes roles are used to control the actions users are allowed to do inside the cluster. For example, a user in Palette could have the *Cluster Profile Viewer* role, which grants them the ability to view cluster profiles for a specific project. In all the clusters in this project, the user could be assigned a role binding to a custom role that grants them administrative access in all the clusters. - -In summary, using Palette roles allows you to control what actions users can do in Palette. Use Kubernetes roles to control users' actions inside a host cluster. - -
- - - -Palette roles do not automatically map to a Kubernetes role. You must create a role binding for a specific user or group of users. - - - -# Create Role Bindings - -## Prerequisites - -To create a role binding the role must exist inside the host cluster. You can use any of the [default cluster roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) provided by Kubernetes. The alternative to default cluster roles is to create a role by using a manifest in the cluster profile. - -If you have OpenID Connect (OIDC) configured at the Kubernetes layer of your cluster profile, you can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](/integrations/kubernetes#userbacwithoidc). - -## Enablement - -You can create role bindings during the cluster creation process or after the host cluster is deployed. - -For a new cluster, you can modify the cluster settings at the end of the cluster creation process. RBAC is one of the cluster settings you can modify. Select **RBAC** from the left **Settings Menu**. - - -![A view of the cluster settings page when creating a cluster](/clusters_cluster-management_cluster-rbac_cluster-creation-settings.png) - -To create or modify a role binding for an active cluster. Navigate to the cluster details page and click on **Settings**. Select **RBAC** from the left **Settings Menu**. - -![A view of the cluster settings page for an active cluster](/clusters_cluster-management_cluster-rbac_cluster-settings.png) - - -The RBAC settings view contains two tabs: - -* **Cluster**: Use this tab to create a ClusterRoleBinding. -* **Namespaces**: Use this tab to create a RoleBinding within Kubernetes namespaces. - -Select the tab for your specific role scope to learn how to create the appropriate role binding. - - - - -1. From the cluster settings view, select the **RBAC** tab. - - -2. Click on **Add New Binding**. - - -3. Fill out the following details: - * Role Name: Define a custom role name to identify the cluster role. - * Subjects: Subjects are a group of users, services, or teams using the Kubernetes API. - * Subject Name: Custom name to identify a subject. - - - - -In Kubernetes, a role binding connects a user or group with a set of permissions called a Role. The Role can be in the same namespace as the RoleBinding. If you want to give a role access to all the namespaces in your cluster, use a ClusterRoleBinding. - - - -4. Click on **Confirm** to save your changes. - -A ClusterRoleBinding will be created in your host cluster. Keep in mind that you can assign multiple subjects to a ClusterRoleBinding. - - - - - -1. From the cluster settings view, select the **RBAC** tab. - - -2. Click on **Add New Binding**. - - -3. Add the namespace name or provide a regular expression to automatically apply the following settings to other namespaces in the future. Example: `/^web-app/`. Click on **Add To List**. - - -4. Allocate resources to the selected namespace. You can allocate the maximum CPU and Memory the role is allowed to consume from the listed namespaces. - - -5. Click on **Add New Binding**. - - -6. Fill out the following details: - * Namespace: Select the namespace. - * Role Type: The type of role. You can specify either a role or a cluster role. - * Role Name: Define a custom role name to identify the cluster role. - * Subjects: Subjects are a group of users, services, or teams using the Kubernetes API. - * Subject Name: Custom name to identify a subject. - - - - -In Kubernetes, a role binding connects a user or group with a set of permissions called a Role. The Role can be in the same namespace as the RoleBinding. If you want to give a role access to all the namespaces in your cluster, use a ClusterRoleBinding. - - - - -A role binding will be created in the listed namespaces. Keep in mind that you can assign multiple subjects to a RoleBinding or ClusterRoleBinding. - - - - - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the cluster you created the role binding in to view its details page. - - -4. Download the **kubeconfig** file for the cluster or use the web shell to access the host cluster. - - -5. Use the following commands to review details about the role and to ensure the role binding was successful. - - -#### Cluster Role: - -```shell -kubectl get clusterrole --output yaml -``` - - -#### Role - -```shell -kubectl get role --namespace --show-kind --export -``` - diff --git a/content/docs/04-clusters/06-cluster-management/09.5-namespace-management.md b/content/docs/04-clusters/06-cluster-management/09.5-namespace-management.md deleted file mode 100644 index 8599198001..0000000000 --- a/content/docs/04-clusters/06-cluster-management/09.5-namespace-management.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: "Namespace Management" -metaTitle: "Namespace Management" -metaDescription: "Learn how to create and delete a namespace and assign resource quotas." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - - -# Overview - -In Kubernetes, namespaces provide a way to isolate groups of resources within a single cluster. Some of the benefits of namespaces are: - -
- -- They can be used to partition resources among multiple users via resource quota – where each namespace has its own set of resources – without having to set up multiple physical clusters. - - -- You can configure Role-Based Access Control (RBAC) based on namespaces. For information about configuring namespaces and RBAC, check out [RBAC and NS Support](/clusters/cluster-management/cluster-rbac#createrolebindings). - - -- Namespaces can be used for different purposes such as testing, development, and production. - - -- You can use namespaces to help prevent resource naming conflicts. Resource names must be unique within a namespace but not across namespaces. - - -- In environments that have hybrid containerized and virtualized applications, a separate namespace can be used to isolate virtual machines (VMs). For information about a VM environment in Palette, check out [Virtual Machine Management](/vm-management). - - - -# Create a Namespace - -The following steps will guide you on how to create a namespace. - - - -## Prerequisites - -- An active cluster. - - -- Permission to create a namespace. - - -- A unique namespace name. - - - -## Create a Namespace in a Cluster - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Select the cluster in which you want to create a namespace. - - -4. Navigate to the **Workloads** > **Namespaces** tab, and click the **Manage Namespaces** button. - -
- - The **Settings** pane displays with **RBAC** preselected and the **Namespaces** tab opened by default. - - ![Cluster Settings pane with three arrows that point respectively to Namespace Name field, Add to List button, and the location where the namespace is listed](/clusters_cluster-management_namespace-create.png) - - -5. Type a unique namespace name in the **Namespace name or Regex** field and click **Add to List** at right. - - - -6. You can assign resource quotas now or at a later time. To learn how, check out [Assign Resource Quotas](/clusters/cluster-management/namespace-management#assignresourcequotas). - -
- - For details on how to configure RBAC for namespaces, check out the [RBAC and NS Support](/clusters/cluster-management/cluster-rbac) guide. - - - -## Validate - -Validate that the namespace was successfully created. - -
- -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Select the cluster that contains the namespace you created and view its details. - - -4. In the **Settings** pane, click **RBAC** > **Namespaces** tab. - -
- - The namespace you created will be listed under **Workspace Quota**. - - - -# Assign Resource Quotas - -You can assign resource quotas for resource sharing among multiple users who have access to a namespace. - - - -## Prerequisites - -- A running cluster with at least one namespace. - - - -## Assign Resource Quotas to a Namespace - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Select the cluster with the namespace to which you will assign workspace quotas. - - -4. Navigate to the **Workloads** > **Namespaces** tab, and click the **Manage Namespaces** button. - - -5. The **Settings** pane displays with **RBAC** > **Namespaces** preselected. - - -6. Select the namespace listed in the **Workspace Quota** section. - - -![Cluster Settings pane displaying Workspace Quota section of Namespaces tab](/clusters_cluster-management_ns-resource-quota.png) - -
- -7. Type the number of CPU and Memory to allocate to the namespace, and save your changes. - - - -# Delete a Namespace - -When you delete a namespace, all the resources that were created within the namespace will also be deleted, such as pods, services and endpoints, config maps, and more. - - - -## Prerequisites - -- Ensure that no other resources depend on the namespace being deleted. - -## Delete a Namespace from a Cluster - -1. Navigate to the left **Main Menu** and click on **Clusters**. - - -2. Select the cluster in which you want to create a namespace. - - -3. Navigate to the **Workloads** > **Namespaces** tab, and click the **Manage Namespaces** button. - -
- - The **Settings** pane displays with **RBAC** preselected and the **Namespaces** tab opened by default. - - -4. Select the namespace you want to delete, which is listed in the **Workspace Quota** section, and click the trash can icon. - - -## Validate - -Validate that the namespace was successfully deleted. - -
- -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Select the cluster that contains the namespace you want to delete and view its details. - - -4. In the **Settings** pane, click **RBAC** > **Namespaces** tab. - -
- - The namespace you created is no longer listed under **Workspace Quota**. - - - - diff --git a/content/docs/04-clusters/06-cluster-management/10-macros.md b/content/docs/04-clusters/06-cluster-management/10-macros.md deleted file mode 100644 index 6207c87abd..0000000000 --- a/content/docs/04-clusters/06-cluster-management/10-macros.md +++ /dev/null @@ -1,221 +0,0 @@ ---- -title: "Palette Macros" -metaTitle: "Palette Macros Support" -metaDescription: "Apply Palette Macros to Cluster Profiles." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette supports placeholder variables as Macros in our cluster profile layers. These macros make regression and update of variables, across multiple-running clusters, easier. We encourage creating these macros and using them within any of our cluster profile layers. Hence, changes to the existing Macros get updated to the corresponding cluster profile and the clusters with these profiles attached. - -# Scope of Palette Macros - -Palette users can declare the Macros under three different scopes: - -1. **Project Scope**: Create `Macros` from the project dashboard with project privileges. - - -2. **Tenant Admin Scope**: Create macros from the `Tenant Admin Dashboard` with administrative privileges. - - -3. **System Scope**: Includes the default system macros and user-created system macros. - -The Macros must have unique names within a given application, but Macros with a different Scope can have a unique name. In such cases, the precedence followed is in decreasing order (the highest precedence being Project Scope). - - - **Project Scope** > **Tenant Scope** > **System Scope** - - -# Create your Macro - -Palette users can use Macros in three different Scopes. Following the user preferences and privileges, log in as a Tenant Admin or Project Admin, to create macros under Tenant Admin scope and Project Scope, respectively. System Scope Macros can be created via API's. The steps to create a macro are as below: - -
-
- - - - -
- -1. Log in to the Palette Management Console as a **Tenant Admin**. - - -2. From the menu on the left-hand side, click on **Tenant Settings** and select the **Macros** tab. - - -3. Click on **+Add Macro**. - - -4. Complete the following details for the same: - - **Name**: A custom name for the Macro. - - **Value**: The value to be assigned to the placeholder variable. - - -5. Click the **Save changes** button to complete the wizard. - -
- - - -1. Log in to the Palette Management Console as a **Project Admin**. - - -2. From the menu on the left-hand side, click on **Project Settings** and select the **Macros** tab. - - -3. Click on **+Add Macro**. - - -4. Complete the following details for the same: - * **Name**: A custom name for the Macro - * **Value**: The value to be assigned to the placeholder variable. - - -5. Click the **Save changes** button to complete the wizard. - - - - - -Create and list your System Level macros via an API. - - - -
- -## Example - -```yaml -manifests: - aws_ebs: - #Storage type should be one of io1, gp2, sc1, st1 types #Checkhttps://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html for more details - storageType: "gp2" - #Allowed reclaim policies are Delete, Retain - reclaimPolicy: "Delete" - #Toggle for Volume expansion - allowVolumeExpansion: "true" - #Toggle for Default class - isDefaultClass: "true" - #Supported binding modes are Immediate, WaitForFirstConsumer - #Setting this to WaitForFirstConsumer for AWS, so that the volumes gets created in the same AZ as that of the pods - volumeBindingMode: "{{.spectro.macro.volumeBindingMode}}" -``` -# Use your Macros - -The Macros are overridden into the Cluster Profile layers: -* During a Cluster Profile creation. - - -* For a Cluster Profile used by a running cluster or during cluster provisioning. - -
- - - -
- -## Add a Macro to a Cluster Profile Pack: - -1. Log in to the Palette console and navigate to **Profiles**. - - -2. From the **Cluster Profiles** tab, select the **Cluster Profile** to which the Macro is to be added. - - **Note:** A macro can be attached to any Infrastructure or Add-on layers of a Profile. - - -3. Add the macro name to the desired layer of the profile in the format: - - `{{.spectro.macro.macro-name}}`, where the *macro-name* is the **Custom name**, created by the user. - - -4. Save the changes to the **Cluster Profile**. This Macro can be replaced or edited later. - - -
- - - -## Replace or Add a Macro to a running Cluster: - -1. ​​Log in to Palette Console and go to the **Clusters** tab. - - -2. Select the **Cluster Name** to which the Macro is to be updated and navigate to the **Cluster Details** page. - - -3. Go to the **Profiles** tab to select the layer to which the Macro is to be added. - - -4. In the desired existing pack, replace the value with the Macro name as: - - `{{.spectro.macro.macro-name}}` - - -5. Save the changes to the **Cluster Profile**. - - - - - -
- -# Delete Macros - - - - -1. Log in to Palette Management Console as **Tenant Admin**. - - -2. From the menu on the left-hand side, go to **Tenant Settings** and select the **Macros** tab. - - -3. Click the **Delete** button to remove the macro. - - -4. Click the **Save changes** button to complete the wizard. - - - - - -1. Log in to Palette Management Console as **Project Admin**. - - -2. From the menu on the left-hand side, go to **Project Settings** and select the **Macros** tab. - - -3. Click on the **Delete** button to remove the macro. - - -4. Click the **Save changes** button to complete the wizard. - - - - - -
-Delete your system level macros via an API. - -
- -
- -
- -When a Macro is deleted from the UI, it needs to be cleared from the cluster profile to avoid Macro deletion anomalies in the running cluster. - - - - - - diff --git a/content/docs/04-clusters/06-cluster-management/11-pack-monitoring.md b/content/docs/04-clusters/06-cluster-management/11-pack-monitoring.md deleted file mode 100644 index c10d560bfb..0000000000 --- a/content/docs/04-clusters/06-cluster-management/11-pack-monitoring.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "Pack Monitoring" -metaTitle: "Monitoring Packs in Palette" -metaDescription: "How to monitor the status of packs in Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Pack Monitoring - -Palette provides a color scheme to help you monitor pack installation progress during Palette Workload Cluster deployment. Different colors represent stages of pack installation so you can track the progress of packs as they are added to a cluster. - -The Cluster Profile page displays the list of packs associated with the cluster you are monitoring. In addition, the page also includes information on the status and the installation progress of the installed packs. The following are the possible pack statuses. - -
-
- -| **Indicator Status** | **Description** | -| ------------------------------------ | ------------------------------------------------------------------- | -|

**Gray**

| The pack is onboarding, and it's right before the deployment stage. | -|

**Blue**

| The pack is in processing mode. | -|

**Green**

| The pack installation is successful. | -|

**Red**

| The pack installation has failed. | - - -
-
- -#### Cluster Profiles Pack Status - -![Pack_Status](/pack_status.png) - - -
-
diff --git a/content/docs/04-clusters/06-cluster-management/11.5-kubeconfig.md b/content/docs/04-clusters/06-cluster-management/11.5-kubeconfig.md deleted file mode 100644 index fbd42f88e3..0000000000 --- a/content/docs/04-clusters/06-cluster-management/11.5-kubeconfig.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: "Kubeconfig" -metaTitle: "Kubeconfig" -metaDescription: "Learn how to find the kubeconfig file for your cluster and how permissions are managed." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -A [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file is a configuration file used to access a Kubernetes cluster. It contains information such as the cluster's API server address, authentication credentials, and cluster-specific settings. The kubeconfig file allows you to authenticate and interact with the cluster using the kubectl CLI or other Kubernetes client tools. - - -The kubeconfig file is crucial in enabling you and other users to issue kubectl commands against the host cluster. It ensures you have the necessary permissions and access to the cluster's resources. Using the kubeconfig file, you can validate your access to the host cluster and perform various operations, such as deploying applications, managing resources, and monitoring the cluster. - -Overall, the kubeconfig file serves as a key component in connecting and interacting with a Kubernetes cluster, providing you with the necessary configuration and credentials to access the cluster's API server. - - -You can download the kubeconfig file from the cluster details page in Palette. Check out the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl) guide for steps on how to download your cluster's kubeconfig file and connect to your host cluster with the kubectl CLI. - -# Kubeconfig Files - -Palette exposes two kubeconfig files for each cluster deployed through Palette. - -
- -* Kubeconfig - This kubeconfig contains OIDC and Spectro Proxy configurations. The kubeconfig file is available for all users with proper access to the cluster. The kubeconfig file can be used to access the cluster's resources and perform operations on the cluster. Refer to the [Kubeconfig Access Permissions](#kubeconfigaccesspermissions) section to learn more about access permissions for the kubeconfig file. - - -* Admin Kubeconfig - The admin kubeconfig is created without OIDC configurations. This file is ideal for those that need to access an intermediate host to access the cluster, such as a jump host. Refer to the [Kubeconfig Access Permissions](#kubeconfigaccesspermissions) section to learn more about access permissions for the admin kubeconfig file. - -![The cluster details page with the two Kubeconfig files elements highlighted](/clusters_cluster--management_kubeconfig_cluster-details-kubeconfig-files.png) - -# Kubeconfig Access Permissions - -Palette exposes kubeconfig files for each cluster deployed through the paltform. Depending on the cluster's configuration, the kubeconfig file may contain different configurations, such as the cluster's API server address and authentication credentials. - -Your assigned [Palette permissions](/user-management/palette-rbac/project-scope-roles-permissions) determine which clusters you can access and what operations you can perform on the cluster. The permissions assigned to you in Palette determine if you can download and access the kubeconfig files for a cluster. - -As a rule of thumb, users with the Palette role [*Cluster Admin*](/user-management/palette-rbac/project-scope-roles-permissions#cluster) can access both kubeconfig files for all clusters in the project. Users with lower-level project roles such as the *Cluster Editor* or the *Cluster Viewer* may not be able to access the kubeconfig file of the cluster. - -
- - - -Palette has its own RBAC system that is separate from the [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) system in the host cluster. The permissions assigned to you in Palette determine what operations you can perform on the cluster. The permissions assigned to you in the host cluster through the Kubernetes RBAC system determine what operations you can perform inside the cluster and on its resources. Refer to the [Palette Roles and Kubernetes Roles](/clusters/cluster-management/cluster-rbac#paletterolesandkubernetesroles) for additional information. - - - - -The access you have as a user to the kubeconfig files for a cluster depends on the following factors: - -
- -* Is OIDC configured for the cluster? OIDC is configured in the Kubernetes pack YAML file of the cluster profile. Refer to the respective Kubernetes distribution on the [Packs List](/integrations) page to learn more about OIDC configuration. - -
- - - - When enabling OIDC, ensure the parameter `oidc-issuer-url` and the `clientConfig` block are properly configured in the Kubernetes Pack YAML. Properly configuring both parameters ensures the kubeconfig file is available for all project users. Otherwise, the kubeconfig file will only be available for Cluster Admins or custom roles that have the *delete* permission for the resource key *cluster*. - - - - -* Is the [Spectro Proxy](/integrations/frp) enabled for the cluster? - - - -Use the tables below to help you identify which project role you need to access the kubeconfig file for a cluster. - -
- -#### Cluster Admin - -The following table shows the *Cluster Admin* role or equivalent provides access to both the Kubeconfig file and the Admin Kubeconfig file whether OIDC and the Spectro Proxy are configured or not. If you use a custom Palette resource role instead of the Palette role Cluster Admin, ensure the custom Palette resource role has the *delete* permissions for the resource key *cluster* to access both kubeconfig files for a cluster. - - - **Is OIDC Configured?** | **Is Spectro Proxy Enabled?** | **Access to Kubeconfig File** | **Access to Admin Kubeconfig File** | - --- | --- | --- | --- | -Yes | Yes | ✅ | ✅ | -No | Yes| ✅ | ✅ | -Yes | No | ✅ | ✅ | - -#### Non-Cluster Admin - -The table shows that lower-level project roles, such as the *Cluster Editor* or the *Cluster Viewer*, or custom Palette resource roles that do not have the *delete* permissions for the resource key *cluster* may have access to the kubeconfig file. - -If a cluster has OIDC and the Spectro Proxy enabled then the kubeconfig file is available. Or, if the cluster has OIDC enabled and the Spectro Proxy disabled, the kubeconfig file is available. - - - **Is OIDC Configured?** | **Is Spectro Proxy Enabled?** | **Access to Kubeconfig File** | **Access to Admin Kubeconfig File** | -| --- | --- | --- | --- |---| -|Yes | Yes | ✅ | ❌ -| No | Yes | ❌ | ❌ -| Yes | No | ✅ | ❌ - - -# API Access - -Palette exposes two API endpoints that you can use to access the kubeconfig file for a host cluster. The endpoints are: - -
- -* `GET https://api.spectrocloud.com/v1/spectroclusters/{clusterId}/assets/kubeconfig` - Returns the kubeconfig file for the cluster. The kubeconfig file is returned as a text string. Access to the kubeconfig file is determined by the permissions assigned to you in Palette. For additional information, refer to the [Kubeconfig Access Permissions](#kubeconfigaccesspermissions) section. - - -* `GET https://api.spectrocloud.com/v1/spectroclusters/{clusterId}/assets/adminkubeconfig` - Returns the admin kubeconfig file for the cluster. The admin kubeconfig file is returned as a text string. Only users with the Palette project role *Cluster Admin* or with a custom Palette resource role with the resource key *cluster* and the *delete* permission can access the admin kubeconfig file for a cluster. \ No newline at end of file diff --git a/content/docs/04-clusters/06-cluster-management/12-palette-webctl.md b/content/docs/04-clusters/06-cluster-management/12-palette-webctl.md deleted file mode 100644 index 2ad53eeaa7..0000000000 --- a/content/docs/04-clusters/06-cluster-management/12-palette-webctl.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: "Kubectl" -metaTitle: "Web kubectl CLI on Palette" -metaDescription: "Web kubectl CLI on Palette for cluster access" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Overview - -You can access your Kubernetes cluster by using the [kubectl CLI](https://kubernetes.io/docs/reference/kubectl/). Palette automatically generates a **kubeconfig** file for your cluster that you can download and use to connect with your host cluster. - - -# Access Cluster with CLI - -Use the following steps to connect to your host cluster with the kubectl CLI. - -
- - - -If you are using Palette Virtual Machine (VM) Management, you can find steps on how to connect to your virtual machines with the [virtctl CLI](https://kubevirt.io/user-guide/operations/virtctl_client_tool/) in the [Access VM Cluster with virtctl](/vm-management/create-manage-vm/access-cluster-with-virtctl) guide. The virtctl CLI facilitates some of the VM operations you will perform, such as copying, pasting, or transferring files to and from a virtual machine using Secure Copy Protocol (SCP). - - - -# Prerequisites - -- Kubectl installed locally. Use the Kubernetes [Install Tools](https://kubernetes.io/docs/tasks/tools/) for additional guidance. - - -- A host cluster that is either publicly accessible OR a private host cluster that has the [Spectro Proxy](/integrations/frp) installed. - - - - -If you are using [OIDC](/clusters/cluster-management/cluster-rbac#userbacwithoidc) with your host cluster, you will need the kubelogin plugin. Refer to the kubelogin GitHub repository [README](https://github.com/int128/kubelogin#setup) for installation guidance. - - - - -# Set up Kubectl - -1. Log in to [Palette](https://spectrocloud.com). - - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - - -3. Select the host cluster you want to access. - - -4. From the cluster overview page, navigate to the middle column containing cluster details and locate the **Kubernetes Config File** row. - - -5. Click on the kubeconfig link to download the file. - -![Arrow pointing to the kubeconfig file](/clusters_cluster-management_palette-webctl_cluster-details-overview.png) - -
- -6. Open a terminal window and set the `KUBECONFIG` environment variable to the file path of the **kubeconfig** file. - - Example: - ```shell - export KUBECONFIG=~/Downloads/dev-cluster.kubeconfig - ``` - - -You can now issue kubectl commands against your host cluster. - - -# Validate - -Verify you have access to your host cluster by issuing kubectl commands against it. - - - - diff --git a/content/docs/04-clusters/06-cluster-management/13-palette-lock-cluster.md b/content/docs/04-clusters/06-cluster-management/13-palette-lock-cluster.md deleted file mode 100644 index b6f67c95b2..0000000000 --- a/content/docs/04-clusters/06-cluster-management/13-palette-lock-cluster.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: "Platform Settings" -metaTitle: "Platform Settings" -metaDescription: "Platform Settings on Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Manage Platform Settings - -* [Pause Platform Updates](/clusters/cluster-management/palette-lock-cluster#pauseplatformupdates) -* [Auto Remediation](/clusters/cluster-management/palette-lock-cluster#autoremediation) - -# Pause Platform Updates - -Palette supports the **Pause Platform Updates** feature to exclude a cluster or a group of clusters from getting upgraded when Palette is upgraded. The use cases of this feature are: - -
- -* Pause Updates for Single Cluster -* Pause Updates for all the Clusters within the Project Scope -* Pause Updates for all Clusters within the Tenant Scope - -
- -## Pause Updates for Single Cluster - -Individual cluster under Project scope and Tenant scope can be locked to restrict them from the Palette upgrades. To lock a cluster follow the below steps: -
- -1. Log in to Palette console as Tenant or Project administrator. - - -2. Go to the `Clusters` page from the left ribbon menu and select the cluster to be paused with updates. - - -3. From the cluster details page, click `Settings -> Cluster Settings` - - -4. Toggle the `Pause Platform Updates` button to pause updates of the cluster so that the cluster management services are not upgraded on the upgrade of the Palette. - - -5. To unpause the cluster updates, toggle the `Pause Platform Updates` back and deselect. - -
- -## Pause Updates for all the Clusters within the Project Scope - -All the clusters under a Project can be paused for updates to restrict them from the Palette upgrades. To pause updates for all clusters under Project scope: - -1. Log in to Palette console as Project administrator. - - -2. Select `Project Settings` from the left ribbon menu. - - -3. From `Project Settings` page, select `Platform Updates` and toggle the `Pause Platform Updates` button. This restricts all the clusters under that project scope from being upgraded from cluster management services upgrade on the upgrade of the Palette. - - -4. To unpause the clusters updates, toggle the `Pause Platform Updates` back and deselect. - -## Pause Updates for all Clusters within the Tenant Scope - - -All the clusters under a Tenant can be update paused to restrict them from the Palette upgrades. To lock all clusters under a Tenant scope: - -
- -1. Log in to the Palette console as a `Tenant administrator`. - - -2. Select `Tenant Settings` from the left ribbon menu. - - -3. From `Tenant Settings,` select `Platform Updates` and toggle the `Pause Platform Updates` button.This restricts all the clusters under that tenant scope from being upgraded from cluster management services upgrade on the upgrade of the Palette. - - -4. To unlock the clusters, toggle the `Pause Platform Updates` back and deselect. - - -# Auto Remediation - -Palette provides Cluster Auto Remediation as a node reconciliation operation. When Cluster Auto Remediation is on, unhealthy nodes in all the Palette-provisioned clusters will automatically be replaced with new nodes. Turning off this feature will disable auto remediation. -This feature can work under the scope of: - -* Tenant - -* Project - -To enable auto remediation: - -* Login to Palette console as Tenant/Project admin. - -* Go to `Tenant Settings`/`Project Settings` as per the user scope. - -* Select `Platform Settings` from the left menu and toggle `Cluster Auto Remediation` toggle button. - - -This does not apply to EKS, AKS or TKE clusters. - diff --git a/content/docs/04-clusters/06-cluster-management/14-noc-ui.md b/content/docs/04-clusters/06-cluster-management/14-noc-ui.md deleted file mode 100644 index c29d53e687..0000000000 --- a/content/docs/04-clusters/06-cluster-management/14-noc-ui.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: "NOC-UI" -metaTitle: "Clusters Location view on Map - NOC UI" -metaDescription: "Clusters Location view on Map - NOC UI" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Overview - -Palette provides an intuitive user interface (UI) based on location monitoring for the clusters running at multiple locations. The Palette -UI displays the region set during the cluster creation process for public cloud clusters and the location on the UI map. You can set the location for private cloud clusters through the Palette UI. You can also monitor the location details of all the clusters running under a specific scope. - -# Set the Cluster Location - -The private cloud clusters must set the location explicitly. To set the location: -
- -* Login to [Palette](https://console.spectrocloud.com). - - -* Select the cluster to which the location needs to be updated and go to the **Cluster details** page of the cluster. - - -* Open **Settings** and then navigate to the **Cluster Settings**. - - -* Select **Location** from the left menu, set the cluster's location, and save the changes. - - -* The location is then visualized on the UI map display. - - -# Monitor your Cluster Location - - -To monitor the cluster location follow the below steps: - -
- -* Log in to Palette and select **Clusters** from the left **Main Menu**. - - -* Go to **Map View Icon** below the **Clusters** tab. - - -The map will display all the cluster locations under that user’s scope. - -# Map Filters - -Palette Map Filters filter out specific clusters using built-in or custom filters for an enhanced user experience. The map filter allows you to narrow down clusters that may be dispersed geographically, across multiple scopes, or different cloud providers. You have two types of filters:- **Built-in Filters** and **Custom Filters**. - -## Built-In Filters - -Built-in filters are available in the Palette console by default and can be selected from the **Add Filter** drop-down menu. You can use the following built-in filters. - - -|**Built-In Filters** |Description| -|---------------------|-----------| -|Deleted Only| To dispaly the deleted Clusters for the last 72 hours| -|Imported Only| To display the brown field clusters| -|Updates Pending| To display the clusters with pending updates| - - -## Custom Filters - -Palette supports a wide range of custom filters in a fixed format. To add a custom filter: -
- -* Log in to Palette and select **Clusters** from the left **Main Menu**. - - -* Click on the **+Add Filter** button on the top menu and select **+ Add custom filter** from the top menu. - - -* The format for adding a cluster is as follows: - - `Conjunction - Condition - Operator - Value` - - -* You can add more than one custom filter simultaneously, and they work together with the chosen conjunction. - -
- -You can apply these filters for both map view and cluster listing view. - - -|Conjunction| Condition |Operator|Value -|--|--|--|---| -|and/or|Cloud Account|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value | -|and/or|Name|[operator](/clusters/cluster-management/noc-ui#operators) | Custom value| -|and/or|Profiles|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value | -|and/or|Status|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value| -|and/or|Environment|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value| -|and/or|Environment|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value| -|and/or|Health Status|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value| -|and/or|Deleted|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value| -|and/or|Read Only Import| [operator](/clusters/cluster-management/noc-ui#operators)|Custom value| -|and/or|Imported|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value| -|and/or|Updates Pending|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value| -|and/or|Tags|[operator](/clusters/cluster-management/noc-ui#operators) |Custom value| -|and/or|Region| [operator](/clusters/cluster-management/noc-ui#operators)|Custom value| - - -## Operators -is
-is not
-contains
-does not contain
-begins with
-does not begin
- - diff --git a/content/docs/04-clusters/06-cluster-management/15-node-pool.md b/content/docs/04-clusters/06-cluster-management/15-node-pool.md deleted file mode 100644 index 5b766c698e..0000000000 --- a/content/docs/04-clusters/06-cluster-management/15-node-pool.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: "Node Pools" -metaTitle: "Node Pools" -metaDescription: "Learn about the node pools and applying changes to a node pool." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Node Pools - - -A node pool is a group of nodes within a cluster that all have the same configuration. Node pools allow you to create pools of nodes that can be used for different workloads. For example, you can create a node pool for your production workloads and another node pool for your development workloads. You can update node pools for active clusters or create a new node pool for the cluster. - -
- - - -Ensure you exercise caution when modifying node pools. We recommend creating a [backup](/clusters/cluster-management/backup-restore) before you make a change in the event a configuration change causes an issue. - - - -## Repave Behavior and Configuration - -In Kubernetes, the term "repave" refers to the process of replacing a node with a new node. [Repaving](/glossary-all#repavement) is a common practice in Kubernetes to ensure that nodes are deployed with the latest version of the operating system and Kubernetes. Repaving is also used to replace nodes that are unhealthy or have failed. You can configure the repave time interval for a node pool. - -The ability to configure the repave time interval for all node pools except the master pool. The default repave time interval is 900 seconds (15 minutes). You can configure the node repave time interval during the cluster creation process or after the cluster is created. To modify the repave time interval after the cluster is created, follow the [Change a Node Pool](#changeanodepool) instructions below. - - -
- -## Node Pool Configuration Settings - -The following tables contain the configuration settings for node pools. Depending on the type of node pool, some of the settings may not be available. - -
- -### Master Node Pool - -| **Property** | **Description** | -|-----------|-------------| -| **Node pool name** | A descriptive name for the node pool. | -| **Number of nodes in the pool** | Number of nodes to be provisioned for the node pool. For the master pool, this number can be 1, 3, or 5. | -| **Allow worker capability** | Select this option to allow workloads to be provisioned on master nodes. | -| **Additional Labels** | Optional labels apply placement constraints on a pod. For example, you can add a label to make a node eligible to receive the workload. To learn more, refer to the [Overview on Labels](/clusters/cluster-management/taints#overviewonlabels). | -| **Taints** | Sets toleration to pods and allows (but does not require) the pods to schedule onto nodes with matching taints. To learn more, refer to the [Overview on Taints](/clusters/cluster-management/taints#overviewontaints).| -| **Availability Zones** | The Availability Zones from which to select available servers for deployment. If you select multiple zones, Palette will deploy servers evenly across them as long as sufficient servers are available to do so. | -| **Disk Size** | Give the required storage size. | - - -### Worker Node Pool - -| **Property** | **Description** | -|-----------|-------------| -| **Node pool name** | A descriptive name for the worker pool. | -| **Number of nodes in the pool** | Number of nodes to be provisioned for the node pool. | -| **Node repave interval** | The time interval in seconds between repaves. The default value is 900 seconds (15 minutes). | -| **Additional Labels** | Optional labels apply placement constraints on a pod. For example, you can add a label to make a node eligible to receive the workload. To learn more, refer to the [Overview on Labels](/clusters/cluster-management/taints#overviewonlabels). | -| **Taints** | Sets toleration to pods and allows (but does not require) the pods to schedule onto nodes with matching taints. To learn more, refer to the [Overview on Taints](/clusters/cluster-management/taints#overviewontaints).| -| **Rolling update** | Apply the update policy. **Expand first** launches new nodes and then terminates old notes. **Contract first** terminates old nodes and then launches new ones. | -| **Instance Option** | AWS options for compute capacity. **On Demand** gives you full control over the instance lifecycle without long-term commitment. **Spot** allows the use of spare EC2 capacity at a discount but which can be reclaimed if needed. | -| **Instance Type** |The compute size. | -| **Availability Zones** | The Availability Zones from which to select available servers for deployment. If you select multiple zones, Palette will deploy servers evenly across them as long as sufficient servers are available to do so. If you select public subnets, ensure those subnets have automatic public IP addresses assigned. Otherwise, node deployment errors will occur. Automatic public IP address assignment is typically handled by the infrastructure provider Palette is deploying a cluster to. Discuss this with your network team for additional guidance. | -| **Disk Size** | Give the required storage size. | - - -
- - - - -Some features may not be available for all infrastructure providers. Review each infrastructure provider's node pool configuration settings to learn more. - - - -
- - -## Create a New Node Pool - - -### Prerequisites - - -* A Palette-deployed cluster. - - -* Sufficient permissions to edit the cluster. - - -### Create Node Pool - - -You can create a new node pool for an active cluster. To create a new node pool follow the steps below. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Click on the row of the cluster you want to edit the node pool. - - -4. Click on the **Nodes** tab. - - -5. Click on **New Node Pool**. - - -6. Fill out the input fields in the **Add node pool** page. Refer to the [Node Pool Configuration Settings](#nodepoolconfigurationsettings) tables for more information on each field. - - - - -
- -7. Click on **Confirm** to create the new node pool. - - -### Validate - -After you create a new node pool, you can validate the node pool by following the steps below. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Click on the row of the cluster you added the new node pool. - - -4. Click on the **Nodes** tab. - - -5. Ensure the new node pool is listed in the **Node Pools** section and that all compute instances are in the healthy status. - -## Change a Node Pool - - -You can apply changes to a node pool after a cluster is created and deployed. You can change the node pool's taints label, node repavement interval, number of compute instances in the node pool and more. To make changes to an active cluster's node pools, follow the steps below. - -### Prerequisites - - -* A Palette deployed cluster. - - -* Sufficient permissions to edit the cluster. - - -### Edit Node Pool - - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Select a cluster to edit the node pool. - - -4. Click on the **Nodes** tab. - - -5. The nodes details page is where you can review the existing node pools and their configuration. You can also add a new node pool from this page. Click on the **Edit** button to make changes to the node pool. - - -6. Make the changes as needed. Refer to the [Node Pool Configuration Settings](#nodepoolconfigurationsettings) tables for more information on each field. - - -7. Click on **Confirm** to update the node pool. - -### Validate - -After you have modified a new node pool, you can validate the node pool by following the steps below. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Select the cluster with the new node pool. - - -4. Click on the **Nodes** tab. - - -5. Ensure the new node pool is listed in the **Node Pools** section and that all compute instances are in the healthy status. - -
\ No newline at end of file diff --git a/content/docs/04-clusters/06-cluster-management/16-cluster-tag-filter.md b/content/docs/04-clusters/06-cluster-management/16-cluster-tag-filter.md deleted file mode 100644 index 6f9e08d995..0000000000 --- a/content/docs/04-clusters/06-cluster-management/16-cluster-tag-filter.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "Cluster Access Control" -metaTitle: "Cluster Access Control" -metaDescription: "Learn how to manage and administer access control to clusters through tags." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Overview - -Palette provides the ability to manage user and role access privileges through tags. This feature helps you reduce the overhead in managing users' and roles' access to clusters by assigning tags. Tags can be used to group clusters, allowing you to apply access controls to the tag rather than to each cluster, user, or role, reducing the overhead of managing access controls for individual users and clusters. - -To get started with an attribute access control through tags, check out the [Create Resource Filter](/clusters/cluster-management/cluster-tag-filter/create-add-filter) guide. - - -# Resources - -* [Cluster Resource Filter](/clusters/cluster-management/cluster-tag-filter/create-add-filter) - - * [Create Resource Filter](/clusters/cluster-management/cluster-tag-filter/create-add-filter#createresourcefilter) - - * [Add Resource Role](/clusters/cluster-management/cluster-tag-filter/create-add-filter#addresourcerole) - - -* [Palette Resource Roles](/user-management/palette-rbac/resource-scope-roles-permissions) - - * [Palette Global Resource Roles](/user-management/palette-rbac/resource-scope-roles-permissions#paletteglobalresourceroles) - - * [Palette Custom Resource Roles](/user-management/palette-rbac/resource-scope-roles-permissions#palettecustomresourceroles) - - * [Create Custom Role](/user-management/new-user#createcustomrole) - - -* [Create New User in Palette](/user-management/new-user#createanewuser) - - -
diff --git a/content/docs/04-clusters/06-cluster-management/16-cluster-tag-filter/00-create-add-filter.md b/content/docs/04-clusters/06-cluster-management/16-cluster-tag-filter/00-create-add-filter.md deleted file mode 100644 index ea93394453..0000000000 --- a/content/docs/04-clusters/06-cluster-management/16-cluster-tag-filter/00-create-add-filter.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: "Cluster Resource Filter" -metaTitle: "Create and Add Cluster Resource Filter " -metaDescription: "Create and Add Cluster Resource Filter" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Overview - -The page guides you on how to create a Palette Resource Filter and add these filters to the users to establish cluster access restrictions. - -# Create Resource Filter - -You must create a Resource Filter in Palette to establish user-based access restrictions to clusters across multiple projects. The resource filters are created under the scope of Tenant Admin. To create a resource filter, follow the steps below: - -
- -1. Log in to Palette as **Tenant Admin** and go to **Tenant Settings** from the left **Main Menu**. - - -2. Select **Filters** tab and click **+New Resource Filter**. - - -3. To the **Create New Filter** wizard give the following information: - * Filter Name: A custom name for the tag filter. - * A filter expression. Use the following table to familiarize yourself with the filter expression format: - - |Conjunction| Property| Operator| Tag-Value| - |-------|-----|---------|------------------| - |and | Tag | is | Custom tag value| - |or | Tag | is | Custom tag value| - |and | Tag | is not | Custom tag value| - |or | Tag | is not | Custom tag value| - -4. Click the **Confirm** button to complete the filter creation wizard. - -**Note:** The tags are case-sensitive. - -## Validate - -Upon creating a filter, a display message will pop up to confirm the successful creation of the tag. You can also use the following steps to review the filter is available for use. - -1. Navigate to the left **Main Menu** and click on **Tentant Settings**. - -2. Access the **Manage Filters** page to find the filter name listed. - -3. You can **Edit** and **Delete** filters by clicking on the three-dots at the end of the row. - -# Add Resource Role - -You can assign the resource filter created, in combination with roles, to a [user](/user-management/new-user#createanewuser) to enforce access restriction. Palette provisions two types of roles: - -* [Palette Global Roles](/user-management/palette-rbac/resource-scope-roles-permissions#paletteglobalresourceroles), the set of roles that are available in Palette console - -* [Custom Resource Roles](/user-management/palette-rbac/resource-scope-roles-permissions#palettecustomresourceroles), can be generated according to your requirements from the available set of permissions and operations. - -## Prerequisites - -* A [Palette account](https://www.spectrocloud.com/get-started/) with Tenant scope privileges. - -* A [user created](/user-management/new-user#createanewuser) to assign the resource privileges. - -To assign the resource roles and filter to the user follow the below steps: -
- -1. Log in to Palette as Tenant Admin - - -2. Select the user to be assigned with a role from the **Users & Teams** from the left **Main Menu** to go to **User Details**. - - -3. From the user details wizard, select **Resource Roles** Tab and click **+ New Resource Role**. - - -4. In the **Add Roles to User** wizard, enter the following details: - * **Projects**: The projects to which the user is assigned. - * **Filers**: Select the filters to be assigned from the drop-down. The Filters created will be displayed in the drop-down menu. - * Select the **check box** to assign the roles to the user from the list displayed. These are Palette built-in roles. - - -5. Click **Confirm** to complete the Add Role wizard. - -## Remove or Edit the Role - -To remove or edit an attached role: - -1. Log in to Palette as Tenant Admin - - -2. From the left ** Main Menu** click on **Users & Teams**. This will take you to the **User Details** page. - - -3. From the **Resource Roles** tab, click the **three-dot** menu towards the role name. - - -4. Click **Edit** or **Remove** option from the drop-down menu. - -## Validate - -Upon creating a filter, a display message will pop up to confirm the successful role assignment. You can also use the following steps to review the roles created: - -1. Navigate to the left **Main Menu** and click on **Clusters**. - -2. This page will list all the clusters to which the user has access based on the filter created. You need to switch to each project and view the accessible clusters. - -## Resource - -* [Create a New User](/user-management/new-user#createanewuser) - diff --git a/content/docs/04-clusters/06-cluster-management/20-kubernetes-dashboard.md b/content/docs/04-clusters/06-cluster-management/20-kubernetes-dashboard.md deleted file mode 100644 index 8730686d3f..0000000000 --- a/content/docs/04-clusters/06-cluster-management/20-kubernetes-dashboard.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: 'Kubernetes Dashboard' -metaTitle: 'Kubernetes Dashboard with Spectro Proxy' -metaDescription: 'Kubernetes Dashboard with Spectro Proxy' -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - - -# Overview - -The [Kubernetes Dashboard](https://github.com/kubernetes/dashboard) is a general-purpose, web-based UI for Kubernetes clusters. You can use the dashboard to manage the cluster, deploy and manage applications, and troubleshoot issues. - -Use the [Spectro Kubernetes Dashboard](/integrations/spectro-k8s-dashboard) pack to add the Kubernetes dashboard to your cluster. The pack documentation page has instructions on how to use the pack. - -
\ No newline at end of file diff --git a/content/docs/04-clusters/06-cluster-management/40-remove-clusters.md b/content/docs/04-clusters/06-cluster-management/40-remove-clusters.md deleted file mode 100644 index ae204718dc..0000000000 --- a/content/docs/04-clusters/06-cluster-management/40-remove-clusters.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: "Cluster Removal" -metaTitle: "Remove Cluster" -metaDescription: "Learn how to remove a cluster deployed and managed by Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - - -# Delete a Cluster - -When you delete a cluster it results in the removal of all compute instances and associated resources created for the cluster. Use the following steps to delete a cluster. - -# Prerequisites - -* A host cluster. - - - -# Removal - -1. Log in to [Palette](https://console.spectrocloud.com) and ensure you are in the correct project scope. - - -2. Navigate to the left **Main Menu** and click on **Clusters**. - - -3. Click on the cluster you want to delete. - - -4. Click on the **Settings drop-down Menu**. - - -5. Click on **Delete Cluster**. - - -6. Type the cluster name and click on **OK**. - -The cluster status is updated to **Deleting** while cluster resources are removed. When all resources are successfully deleted, the cluster status is updated to **Deleted**, and the cluster is removed from the cluster list. - - -# Validate - -To validate the host cluster is deleted, use the following steps. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and click on **Cluster**. - - -4. Check the box labeled **Deleted only** to view all the clusters deleted in the last 72 hours. - -The cluster you deleted is now listed along with other previously deleted clusters. - - - -# Force Delete a Cluster - -If a cluster is stuck in the **Deleting** state for a minimum of 15 minutes, it becomes eligible for force deletion. You can force delete a cluster from the tenant and project admin scope. - -To force delete a cluster, follow the same steps outlined above. After 15 minutes, a **Force Delete Cluster** option is available in the **Settings drop-down Menu**. The drop-down menu will provide you with an estimated remaining time left before the force deletion becomes available. - -
- - -A force delete can result in Palette-provisioned resources being missed in the removal process. Verify there are no remaining resources. Use the following list to help you identify resources to remove. - -
- - - -Failure in removing provisioned resources can result in unexpected costs. - - - -
- -**Azure** - -- Virtual Network (VNet) -- Static Public IPs -- Virtual Network Interfaces -- Load Balancers -- VHD -- Managed Disks -- Virtual Network Gateway - - - -**AWS** - -- VPC -- Elastic IP -- Elastic Network Interfaces -- Internet Gateway -- Elastic Load Balancers -- EBS Volumes -- NAT Gateway - - -**GCP** - -- Virtual Private Cloud (VPC) Network -- Static External IP Address -- Network Interfaces -- Cloud NAT -- Cloud Load Balancing -- Persistent Disks -- Cloud Router - - - - diff --git a/content/docs/04-clusters/06-cluster-management/50-image-swap.md b/content/docs/04-clusters/06-cluster-management/50-image-swap.md deleted file mode 100644 index 8e787e951c..0000000000 --- a/content/docs/04-clusters/06-cluster-management/50-image-swap.md +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: "Image Swap" -metaTitle: "Image Swap" -metaDescription: "Learn how to swap out images and registries through the image swap webhook exposed by Palette." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Palette supports swapping out images and registries at the Kubernetes layer. Palette uses the *ImageSwap* webhook that is exposed by the [ImageSwap Mutating Admission Controller for Kubernetes](https://github.com/phenixblue/imageswap-webhook/blob/master/README.md). You can use this feature to override a specific number of container image registries or particular images. The following are some common use cases for image swapping:

- -- Avoid rate limit issues encountered with public images by pointing to an alternate image registry that caches public images. This is more common in an Enterprise setting. - - -- Changing the URL of an internal or external container registry. - - -- Support air-gapped environments by redirecting public image requests to an internal registry. - - - To use the image swap feature, specify an image swap configuration in the Kubernetes pack YAML. The `imageSwap` block must be its own node, meaning that it's a standalone block at the root level of the YAML. - -
- - ```yaml -imageSwap: - imageChange: |- - default: - # your custom configuration goes here - ``` - - - You can add the `imageSwap` section when you create the cluster profile or at cluster deployment. You can customize the image swap functionality several ways. We recommend you review the official [Image Swap configuration](https://github.com/phenixblue/imageswap-webhook/blob/master/README.md#configuration) documentation to learn more. To help you get started, the following are some common configuration patterns. - -
- - - - The `default::`entry specifies the default configuration for all images. The `::` delimiter is used to separate different elements of the configuration. - - - - # Configuration Examples - - ### Override a Specific Registry - - In this example, image swapping is disabled for all registries except for `example.private.io`. All image requests for `example.private.io` will be swapped for `harbor.internal.example.com`. - -
- - ```yaml -imageSwap: - imageChange: |- - default:: - example.private.io::harbor.internal.example.com - ``` - -### Apply a Global Swap with an Exception - -Enable image swapping for all registries except `example.private.io`. All image requests for `example.private.io` will not get swapped. All other image requests will get swapped to `harbor.internal.example.com`. - -
- -```yaml -imageSwap: - imageChange: |- - default::harbor.internal.example.com - example.private.io:: -``` - -### Swap a Specific Image - -Swap out a specific image. The image `example.private.io/demo:v1.0.0` will be swapped with `gcr.io/google-samples/hello-app:1.0`. The syntax format is `[EXACT]::`. - -
- - -```yaml -imageSwap: - imageChange: |- - default:: - [EXACT]example.private.io/demo:v1.0.0::gcr.io/google-samples/hello-app:1.0 -``` - - -### Replace Image Path - - -Replace an image path with a custom registry. All image requests that start with `ghcr.io/example*` will get swapped with `example.private.io`. - -
- - -```yaml -imageSwap: - imageChange: |- - default:: - [REPLACE]ghcr.io/example*::example.private.io -``` - - -
- - - - - If the registry or image mentioned in the image swap configuration cannot be located, Kubernetes will try to obtain the image from the source mentioned in the deployment configuration. - - - - -The examples provided are intended to help you get started. Refer to the official [Image Swap configuration](https://github.com/phenixblue/imageswap-webhook/blob/master/README.md#configuration) for more examples and information. - - -# Image Swap with Palette - -Use the following steps to learn how to use Palette's image swap functionality. - -## Prerequisites - -* Kubernetes 1.19.0 or greater. - - -* Palette v3.4.0 or greater. - - -## Swap Image - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Profiles**. - - -3. Click on **Add Cluster Profile**. - - -4. Fill out the input fields for **Name**, **Description**, **Type** and **Tags**. Select the type **Full** and click on **Next**. - - -5. Select your infrastructure provider and click on **Next**. - - -6. Complete the Operating System (OS) layer by selecting **Registry**, **Pack Name**, and **Pack Version**. Click on **Next layer** to continue. - - -7. Select a Kubernetes distribution and version. - - -8. Next, select the code editor button **** to edit the pack YAML configuration. Within the `pack` section's scope, add your `imageSwap` configuration block. Click on **Next layer** to continue. - -
- - ![A view of the Kubernetes layer YAML with an imageSwap configuration block.](/clusters_cluster-management_image-swap_kubernetes-layer-yaml.png) - - - - -9. Complete the remainder of the cluster profile creation wizard. - - - -10. Deploy a host cluster and use the cluster profile containing the image swap functionality. Check out the [Deploy a Cluster](/clusters/public-cloud/deploy-k8s-cluster) tutorial for additional guidance in deploying a host cluster. - - -## Validate - -You can validate that the image swap is functioning correctly by using the following steps. - - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the host cluster you deployed with the image swap functionality. - - -4. Download the kubeconfig file to access the cluster. Refer to the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl) guide for detailed steps. - - -5. Review the deployment configuration of a workload using a registry or image impacted by the image swap configuration. Verify the image or registry is swapped to the expected configuration value you provided in the image swap configuration block. - -
- - You can use the following command to verify the correct image and registry of the deployment. Change the `REPLACE_ME` value with the correct values from your environment. - -
- - ```shell - kubectl get deployment REPLACE_ME --namespace REPLACE_ME -o=jsonpath='{.spec.template.spec.containers[0].image}' - ``` - -
\ No newline at end of file diff --git a/content/docs/04-clusters/20-cluster-groups.md b/content/docs/04-clusters/20-cluster-groups.md deleted file mode 100644 index da1aedaedb..0000000000 --- a/content/docs/04-clusters/20-cluster-groups.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Cluster Groups" -metaTitle: "Palette Devx for Enterprise Developers" -metaDescription: "Explore Palette Devx as Free Developer" -hideToC: false -icon: "object-group" -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Overview - -A *Cluster Group* is a collection of one or more host clusters that together form a computing platform for you and your users to deploy Palette virtual clusters. Downstream consumers can use the cluster group when using Palette in [*App Mode*](/introduction/palette-modes#whatisappmode?). - -You can create a cluster group under the Palette [tenant](/glossary-all#tenant) scope. Alternatively, you can create a cluster group at the [project](/projects) scope. - -By default, Palette exposes a managed cluster group called *beehive* that is available for users in app mode. This cluster group is managed by Palette and falls under the free tier. The beehive cluster group is located in the eastern side of the U.S. - -You can create a cluster group that is made up of various types of host clusters. You could create a cluster group by similar cloud providers, Kubernetes versions, or by location. You have the flexibility to define the grouping criteria. The following image displays a cluster group comprised of various host clusters deployed in a public cloud, private cloud, and edge environment. - -
- - - -Cluster groups support two network endpoints: load balancer and ingress. All host clusters added to a cluster group must support the endpoint type configured for the cluster. Example: A host cluster configured for ingress as the endpoint type cannot be added to a cluster group configured for the endpoint type load balancer and vice versa. - - - -![An example cluster group made up of various clusters](/clusters_cluster-groups_index-page.png) - -Learn how to create a cluster group by reviewing the [Create and Manage Cluster Groups](/clusters/cluster-groups/create-cluster-group) guide. - -# Resources - -- [Create and Manage Cluster Groups](/clusters/cluster-groups/create-cluster-group) - -- [Enable Disk Backup on Virtual Clusters](/clusters/cluster-groups/cluster-group-backups) - -- [Set up Ingress for a Cluster Group](/clusters/cluster-groups/ingress-cluster-group) - -
\ No newline at end of file diff --git a/content/docs/04-clusters/20-cluster-groups/10-create-cluster-group.md b/content/docs/04-clusters/20-cluster-groups/10-create-cluster-group.md deleted file mode 100644 index c5b3c6a3c0..0000000000 --- a/content/docs/04-clusters/20-cluster-groups/10-create-cluster-group.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: "Create and Manage Cluster Groups" -metaTitle: "Create and Manage Palette Cluster Group" -metaDescription: "Learn how to create and manage Palette Cluster Groups" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Create a Cluster Group - -Use the instructions below to create a cluster group. - - - -## Prerequisites - -* To create a Palette Host Cluster Group, you need to deploy a healthy running [Palette host cluster](/clusters). - - -* The host clusters must match the network endpoint type of the cluster group. - - -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Cluster Groups**. - - -2. Click **+New Cluster Groups** to create a new cluster group and provide the following information to the creation wizard. - - - * **Basic Information: ** - - | Parameter | Description | - |-------------------------------|-----------------| - |Group Name | A name for the cluster group.| - |Description (optional) | Description of the group, if any. | - |Tag (optional) | Assign tags to the cluster group.| - - -3. Select **Next** to continue. - - - -4. Use the **Select clusters** drop-down menu to add available host clusters. - - - - Only host clusters created under the current scope are available to add to a cluster group. You can add host clusters created under the current project or at the tenant scope. You cannot add host clusters that were created in another project scope. - - - - -5. Click **Next** once you have added all the host clusters you wish to include. - - -6. Review the configuration options for **Host Clusters Config** and **Virtual Clusters Config**. - - -#### Cluster Group Configurations - - -|**Host Cluster Config** | **Description** | -|--------------------------------------|-------------------------------------------| -|Oversubscription (%): | The allowed oversubscription for cluster in terms of resources. Default is 120%.| -|Cluster endpoint type: | Load balancer or Ingress.| -|Host DNS: | If the selected cluster endpoint is **Ingress**, then for each selected host cluster provide the host DNS pattern. Ensure that a wildcard DNS record exists that maps the provided host pattern to the ingress controller load balancer for this cluster. Check out the [Setup Ingress](/clusters/cluster-groups/ingress-cluster-group) for additional guidance.| - - -#### Palette Virtual Cluster Configuration - -The configuration applied to all virtual clusters launched into the host clusters. Use the **Advanced Config** for further customization. The request sizing applies to the maximum amount of resources a virtual cluster is allowed to claim. - -
- -|**Palette Virtual Cluster Resource ** | **Default** |**Minimum Limit**| -|------------------------------|-------------------|-----------------| -|CPU (per request) | 6 | 4 | -| Memory (per request) | 8 GiB | 4 GiB | -| Storage (per request) | 10 GiB | 2 GiB | - - - - -A virtual cluster requires a minimum of 4 CPU, 4 GiB of memory, and 2 Gib of storage to launch successfully. The default settings in the cluster group virtual cluster configuration YAML file has the following values: - -```yaml -vcluster - resources: - limits: - cpu: 1000m - memory: 1Gi - ephemeral-storage: 1Gi - requests: - cpu: 200m - memory: 256Mi - ephemeral-storage: 128Mi -``` - -Increasing the limit and request values could result in a virtual cluster requiring more resources than the default values of 4 CPU, 4 GiB of memory, and 2 Gib of storage. - - - -To enable virtual clusters for OpenShift, review the OpenShit [instructions below](#enable-virtual-cluster-for-openshift). - - -7. Click **Next** to complete the cluster group creation process. - - -8. Click **Finish Configuration**. - -## Validate - -To review your cluster group, navigate to the left **Main Menu** and select **Cluster Groups**. Your newly created cluster group is now displayed and ready for use. - - -# Manage your Cluster Group - -Once the cluster group is created, the day two operations can be performed from the cluster group's **Settings** options. To access cluster group settings, navigate to the left **Main Menu** and select **Cluster Groups**. Select a cluster group, and click the **Settings** button. - - -## Add a Host Cluster to the Group - -You can add additional host clusters to a cluster group. Navigate to the left **Main Menu** and select **Cluster Groups**. Select the cluster group you want to add additional host clusters. Click on the **+ Add Host Cluster**. Select the desired host clusters and verify the oversubscription and cluster endpoint type settings. - -## Delete your Cluster Group - -To delete a cluster group, navigate to the left **Main Menu** and select **Cluster Groups**. Select the cluster group you want to review or modify the settings. Click on the **Settings** button. Select **Delete Cluster**, enter the cluster name, and confirm the delete operation. - - -# Enable Virtual Clusters for OpenShift - -To deploy a virtual cluster on OpenShift: - - -1. Create a new Cluster Group or edit an existing one and click **Settings**. - - -2. Select **Settings** in the **Cluster Group Settings** pane. - - -3. In the **Advanced Config** file, locate the securityContext section. - - -4. Comment out these lines: - - * ``fsGroup`` - * ``runAsGroup`` - * ``runAsUser`` - -4. Set `openshift.enable:` to `true`. - - -5. Verify these default parameter values are set as follows: - - * ``allowPrivilegeEscalation: false`` - * ``capabilities.drop: [all]`` - * ``runAsNonRoot: true`` - -The following example shows the required configuration for OpenShift. - -**Example** - - -```yaml -#fsGroup: 12345 -securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - all - - #runAsGroup: 12345 - #runAsUser: 12345 - runAsNonRoot: true - -openshift: - enable: true -``` - - - -
diff --git a/content/docs/04-clusters/20-cluster-groups/25-cluster-group-backups.md b/content/docs/04-clusters/20-cluster-groups/25-cluster-group-backups.md deleted file mode 100644 index 9f008c1b57..0000000000 --- a/content/docs/04-clusters/20-cluster-groups/25-cluster-group-backups.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: "Enable Disk Backup on Virtual Clusters" -metaTitle: "Enable Disk Backup on Virtual Clusters." -metaDescription: "Learn how to configure disk and volume backup for virtual clusters in a cluster group." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -Palette [Virtual Clusters](/clusters/palette-virtual-clusters) are a capability that cluster groups support and that you can enable when creating a cluster group. By default, the virtual cluster settings in a cluster group disable disk backups. You can back up all the volumes within a virtual cluster using the following steps. - -# Prerequisites - -* A project or tenant backup location. Refer to the [cluster backup and restore](/clusters/cluster-management/backup-restore#clusterbackupandrestore) document to learn how to configure a backup location. - -* Cluster group modification [permissions](/user-management/palette-rbac). - -* A cluster group. Review the [create a cluster group](/clusters/cluster-groups/create-cluster-group) for additional guidance. - - - - -You can also enable virtual cluster disk backup during the cluster group creation process. - - - - -# Enable Backup for Virtual Clusters - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Cluster Groups**. - - -3. Select a cluster group to enable virtual cluster disk backup. - - -4. Click **Settings** and expand the **Settings** Menu. - - -5. To enable disk backup you need to change the following configurations in the **Advanced Config** section. - - - Set `syncer.extraArgs.rewrite-host-paths` to `true` - ```yaml - syncer: - extraArgs: - - --rewrite-host-paths=true - ``` - - Set `hostpathMapper.enabled` to `true` - ```yaml - hostpathMapper: - enabled: true - ``` - - Set `podSecurityStandard` to `privileged` - ```yaml - isolation: - podSecurityStandard: privileged - ``` - - - -Setting the `podSecurityStandard` to `privileged` can introduce privilege escalations. We recommend you discuss this with your security system administrator. - - - -7. Save your changes. - - -All virtual clusters deployed in this cluster group will now include disk storage during backup operations. - -# Validate - - -You can validate the disk backups are occurring by deploying a virtual cluster and taking a backup. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Deploy a virtual cluster in your cluster group that has the disk backup settings enabled. Refer to the [Deploy a Virtual Cluster to a Cluster Group](/clusters/palette-virtual-clusters/deploy-virtual-cluster) guide to learn how to deploy Palette Virtual clusters. - - -3. Create a backup of your virtual cluster and include all disks. Use the [Create a Cluster Backup](/clusters/cluster-management/backup-restore#createaclusterbackup) guide for additional guidance. - - -4. Access the backup location's blob storage and review the backup files. - -Example of a backup that includes the virtual cluster disks. -![Example image of a backup that includes disks](/clusters_cluster-groups_cluster-group-backups_backup-overview.png) \ No newline at end of file diff --git a/content/docs/04-clusters/20-cluster-groups/35-ingress-cluster-group.md b/content/docs/04-clusters/20-cluster-groups/35-ingress-cluster-group.md deleted file mode 100644 index f4e6cdcab2..0000000000 --- a/content/docs/04-clusters/20-cluster-groups/35-ingress-cluster-group.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: "Set Up Ingress" -metaTitle: "Set Up Ingress for Cluster Groups" -metaDescription: "Learn how to configure Ingress for a Palette Cluster Group" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -Cluster Groups may have a cluster endpoint type of either Load Balancer or Ingress. The cluster endpoint type determines how Palette Virtual Clusters deployed in a Cluster Group are exposed. You specify the cluster endpoint in Cluster Group Settings. - -Using **Ingress** as the cluster endpoint type is a more cost effective way to access your Kubernetes workloads than using type **Load Balancer**, which requires a new cloud Load Balancer to be provisioned for each virtual cluster. - -When you enable **Ingress** as the endpoint for a Cluster Group, you must deploy an [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers) add-on profile, such as NGINX, on each host cluster in the Cluster Group. The Ingress Controller provides the necessary routing functionality for external traffic to reach the Kubernetes API server of each virtual cluster, as well as any apps each virtual cluster contains. - -# Prerequisites - -- At least one infrastructure or cloud-based cluster you’ve created. - - -- The Ingress Controller must have Secure Socket Layer (SSL) passthrough enabled so that Transport Layer Security (TLS) is not terminated at the ingress controller. Palette provides the ```nginx-ingress``` add-on profile with SSL passthrough already enabled. The following example shows how SSL-passthrough is enabled for the NGINX Ingress Controller. You would add an equivalent configuration to the profile of the add-on you are using.

- - ```yaml - charts: - ingress-nginx: - controller: - extraArgs: - enable-ssl-passthrough: true - ``` -
- - - Palette's ```nginx-ingress``` add-on profile automatically reroutes inbound requests from port 6443 to port 443 using a TCP service configuration. This is so that TLS termination on port 443 for all Apps can occur at the cloud load balancer while simultaneously allowing connections to the API servers of your Virtual Clusters on port 6443. - - If you are using an ingress controller other than the NGINX Ingress Controller and would like to terminate TLS at your ingress controller's cloud load balancer, an equivalent TCP service configuration would be required. Alternatively, you may handle all TLS termination inside the cluster by configuring Cert Manager to issue a certificate for each App's Ingress.
- - The following example shows how port rerouting is achieved for the NGINX Ingress Controller. You would add an equivalent Transmission Control Protocol (TCP) service configuration to the profile of the add-on you are using.

- - ```yaml - tcp: - 6443: "nginx/nginx-ingress-controller:443" - ``` - -# Set Up Ingress - -The following steps describe how to enable an Ingress Controller for a Cluster Group. You will use the `nginx-ingress` add-on profile, but you may choose another ingress controller. -
- -1. Log in to Palette as **Tenant Admin**. -
-2. Identify each host cluster that requires the addition of an NGINX Ingress Controller profile. - - This can be: - - - All the host clusters in an existing Cluster Group,
- or - - - Existing host clusters that you will add to a new Cluster Group.

- -3. Either add the ```nginx-ingress``` add-on profile to each host cluster, or manually configure your own ingress controller add-on profile with the customizations described in the -[Prerequisites](/clusters/cluster-groups/ingress-cluster-group/#prerequisites) section. - -
- - a. From the **Main Menu**, choose **Clusters** and select a cluster. - - b. In the **Profile** tab, click **Add add-on profile (+)** and select `nginx-ingress`. - - c. Confirm and save your changes. - - -4. For each host cluster with an ingress controller add-on profile deployed, follow these steps to open a web shell, identify the External-IP of the LoadBalancer Service, and copy the record you will need to create a canonical Name (CNAME) Domain Name System (DNS) record: - - a. From the **Main Menu**, select a cluster. The cluster **Overview** tab displays. - - b. In the **Details** section beneath **Metrics**, click the **Connect** button next to the Kubernetes config file to open a web shell. - - c. Invoke the following command to display the External-IP of the ```nginx-ingress``` LoadBalancer Service:

- - ``` - kubectl -n nginx get service nginx-ingress-controller - ``` - - d. Copy the record to your clipboard or to a text file. You will use the External-IP address to create a CNAME DNS record. -
- - e. Close the web shell. -
- -5. Use your DNS provider to create a wildcard CNAME record that maps to the External-IP for the NGINX Ingress Controller. Paste the External-IP you copied from the web shell to create the CNAME record. -
- - - -The CNAME record is also known as the host cluster DNS pattern. - - - -
- -6. Copy the CNAME record to your clipboard. - -
- -7. Ensure you are in Palette's Cluster Mode, under the Tenant Admin scope. From the **Main Menu**, select **Cluster Groups**, then select the Cluster Group that requires ingress.

- a. From the **Host Clusters** tab, select **Settings > Clusters**. - - b. Choose **Ingress** as the **Cluster endpoint type**. - - c. Paste the name of the wildcard CNAME record into the **Host DNS** field. - - -If you haven’t yet created a Cluster Group, you can configure each host cluster as described and add them to a new Cluster Group later. - - -# Validate - -To validate that ingress is functioning as expected, do the following: - -1. From the **User Menu**, switch to App Mode and deploy a new virtual cluster.
- To learn how to deploy a virtual cluster, check out the [Add Virtual Clusters to a Cluster Group](/clusters/palette-virtual-clusters/deploy-virtual-cluster) guide. - - -2. Use a web shell and type the following command to verify you can connect to the newly deployed virtual cluster: - - ```shell - kubectl get namespaces - ``` -This should display a list of namespaces as shown in the example: - -
- - ```shell - NAME STATUS AGE - default Active 4d11h - kube-system Active 4d11h - kube-public Active 4d11h - kube-node-lease Active 4d11h - cluster-63c91f359ae82b46c9bad615 Active 4d11h - app-gamebox-lb-spectro-gamebox Active 4d11h - ``` - -If an error message displays, it indicates something is wrong with the configuration. Verify the following: - -- Each host cluster is deployed with NGINX Ingress Controller. - -- The CNAME record correctly maps to the External-IP of the NGINX Ingress Controller’s LoadBalancer Service. - -- Cluster Group Settings specify the Cluster endpoint type as **Ingress**, and **Host DNS** specifies the CNAME record you created. - -# Resources - -- [Cluster Groups](/clusters/cluster-groups) - - - - - - diff --git a/content/docs/04.5-devx.md b/content/docs/04.5-devx.md deleted file mode 100644 index 606c4ee9d3..0000000000 --- a/content/docs/04.5-devx.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: "Palette Dev Engine" -metaTitle: "Palette Dev Engine " -metaDescription: "Explore Palette Dev Engine" -icon: "users" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette provides two different modes for deploying and managing applications. The first mode is *Cluster Mode* - this mode enables you to create, deploy, and manage Kubernetes clusters and applications. The second mode is *App Mode* - a mode optimized for a simpler and streamlined developer experience that allows you to only focus on the building, maintenance, testing, deployment, and monitoring of your applications. - -You can leverage Spectro Cloud's complementary managed Kubernetes cluster when using App Mode. The complementary resources have a limit of 12 vCPU, 16 GiB of memory, and 20 GiB of free storage. Alternatively, you may deploy applications on Kubernetes clusters that belong to your organization and are managed by Palette. - -Check out the in-depth explanation of [App Mode and Cluster Mode](/introduction/palette-modes) to learn more about each mode. - - -
- - -# Get Started - -To get started with App Mode, give the tutorial [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) a try so that you can learn how to use App Mode with Palette Dev Engine. - - -## Supported Platforms - -App Mode is available for the following Palette platforms. - -| Platform | Supported | Palette Version | -|---|----|---| -| SaaS | ✅| `v3.0.0` or greater. | -| Self-hosted | ✅ | `3.4.0` or greater. | -| Airgap Self-hosted | ❌| N/A. - - -## Manage Resources - -The PDE dashboard provides a snapshot of resource utilization in your PDE environment. You can keep track of the resource utilization in your PDE environment without having to navigate to different views. The dashboard displays the following information. -
- -* The number of apps deployed. - - -* The number of virtual clusters and their respective status. - - -* The aggregate resource utilization at both the tenant and system levels for these resources. - * Virtual clusters - * CPU - * Memory - * Storage - - -* The number of app profiles available. - - - -![A view of the PDE dashboard with resources in use.](/docs_devx_pde-dashboard-utilization.png) - - -## Automation Support - -You can manage PDE resources through the [Palette API](/api/introduction), [Spectro Cloud Terraform provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs), and the Palette CLI. Download the Palette CLI from the [Downloads](/spectro-downloads#palettecli) page to start programmatically using PDE. - -![A view of the Palette CLI menu from a terminal](/devx_devx_cli-display.png) - -Check out the [Palette CLI](/palette-cli/install-palette-cli) for installation guidance. - -
- -## PDE Visual Studio Code Extension - -You can create and manage lightweight Kubernetes clusters from within Visual Studio (VS) Code by using the PDE VS Code Extension. The plugin accelerates developing and testing your containerized applications and integrates with the Kubernetes Extension for VS Code. To learn about features of PDE VS Code Extension and how to install and activate it, check out [PDE Extension for Visual Studio Code](https://marketplace.visualstudio.com/items?itemName=SpectroCloud.extension-palette). - - -# Resources - -- [Use Cases](/devx/enterprise-user) - - -- [App Profiles](/devx/app-profile) - - -- [Apps](/devx/app-profile) - - -- [Palette Virtual Clusters](/devx/palette-virtual-clusters) - - - -- [Manage Dev Engine](/devx/manage-dev-engine) - - - -
\ No newline at end of file diff --git a/content/docs/04.5-devx/02-enterprise-user.md b/content/docs/04.5-devx/02-enterprise-user.md deleted file mode 100644 index 998c46aa5c..0000000000 --- a/content/docs/04.5-devx/02-enterprise-user.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Use Cases" -metaTitle: "PDE Use Cases" -metaDescription: "Explore Palette DevX use cases." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Overview - -You can use the Palette Developer Experience (PDE) to serve two common use cases: enterprise developers and individual application authors who want to deploy a containerized application without worrying about infrastructure overhead. - -Refer to the [Enterprise Users](#enterprise-users) section to learn more about enabling PDE for a large set of downstream users. If you are an individual application author, check out the [Individual Application Author](#application-authors) section to get started. - -# Enterprise Users - -To enable PDE for downstream users, start by reviewing the following resources. - - -1. Understand the difference between [Cluster Mode and App Mode](/introduction/palette-modes). - - -2. Create and Manage the [Cluster Group](/clusters/cluster-groups). - - -3. Allocate [User Quota](/devx/manage-dev-engine/resource-quota). - - -The next set of references are not required but good for Palette administrators to be aware of. - -- [Enable Disk Backup on Virtual Clusters](/clusters/cluster-groups/cluster-group-backups). - -- [Set Up Ingress for a Cluster Group](/clusters/cluster-groups/ingress-cluster-group). - -- [Pause and Resume Palette Virtual Clusters](/devx/palette-virtual-clusters/pause-restore-virtual-clusters). - -- [Resize Palette Virtual Clusters](/devx/palette-virtual-clusters/resize-virtual-clusters). - - -# Application Authors - -Use PDE to deploy your containerized applications to Palette. Leverage Palette's free-tier offering of PDE to get started. Create your application profiles and deploy your applications to Palette in no time. - - -Use the following resource to get started with PDE today. - -* Learn about [Palette's Free Tier Offering](/getting-started/palette-freemium). - -* [Quick Start with Palette App Mode](/devx#quickstartwithpaletteappmode). - -* Learn about [App Mode versus Cluster Mode](/introduction/palette-modes). - -* Familiarize yourself with [App Profiles](/devx/app-profile). - -* Review the supported [out-of-the-box-services](/devx/app-profile/services/service-listings). - -
- - - - - - - diff --git a/content/docs/04.5-devx/05-app-profile.md b/content/docs/04.5-devx/05-app-profile.md deleted file mode 100644 index cf67fc1879..0000000000 --- a/content/docs/04.5-devx/05-app-profile.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "App Profiles" -metaTitle: "Palette Dev Engine for Enterprise Developers" -metaDescription: "Explore Palette Dev Engine as Free Developers" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# App Profiles - -App Profiles are templates created with pre-configured services required for Palette Virtual Cluster deployment. App Profiles provide a way to drive consistency across virtual clusters. - -You create App Profiles to meet specific types of workloads on your Palette [Virtual Clusters](/devx/palette-virtual-clusters). You can use containers, Helm Charts, custom manifest, containers, and other out-of-the-box services such as databases, message queue systems, and object storage. Check out the Palette Dev Engine [Services](/devx/app-profile/services) documentation to learn more about the available services. - -You can also review all the Palette Dev Engine services that offer an out-of-the-box experience by reviewing the [Service Listings](/devx/app-profile/services). - - -
- - - -When adding a manifest-type layer to an App profile, make sure to specify a namespace. Otherwise the manifest deployment will get deployed to the `Default` namespace. - -
- -```yaml -namespace: yourNameHere -``` -
- -# Next Steps - -Get started today by learning how to create your [App Profile](/devx/app-profile/create-app-profile). - -# Resources -- [Create an App Profile](/devx/app-profile/create-app-profile) -- [Container Deployment](/devx/app-profile/container-deployment) -- [App Profile Macros](/devx/app-profile/app-profile-macros) -- [App Profile Cloning](/devx/app-profile/app-profile-cloning) -- [App Profile Versioning](/devx/app-profile/versioning-app-profile) - diff --git a/content/docs/04.5-devx/05-app-profile/00-create-app-profile.md b/content/docs/04.5-devx/05-app-profile/00-create-app-profile.md deleted file mode 100644 index 0684871722..0000000000 --- a/content/docs/04.5-devx/05-app-profile/00-create-app-profile.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: "Create an App Profile" -metaTitle: "Learn how to create an App Profile" -metaDescription: "This document provides guidance on how to create a Palette App Profile" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Create an App Profile - -You can create as many App Profiles as needed to fit various types of workloads on your Palette Virtual Clusters. Each App Profile can contain multiple services, also called layers in the App Profile stack. You can also create multiple versions of an App Profile. For more information, visit [App Profile Versioning](/devx/app-profile/versioning-app-profile). - -Use the following steps to create an App Profile. - - -
- - - - - A tutorial is available to help you learn how to use Palette Dev Engine by deploying an application. Check out [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) to get started with Palette Dev Engine. - - - - -# Prerequisites - -* A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). -
- -# App Profile Creation - -To create an App Profile: - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. In App Mode, select **App Profiles** from the **Main Menu**, and click the **New App Profile** button. - - -3. Provide the following basic information for your App Profile and click **Next**. - - -| Parameter | Description | -|-------------------------------|-----------------| -|Application Profile Name | A custom name for the App Profile| -|Version (optional) | The default value is 1.0.0. You can create multiple versions of an App Profile using the format **`major.minor.patch`**. -|Description (optional) | Description of the App Profile. | -|Tag (optional) | Assign tags to the app profile.| - - -4. Select one of the available services to start configuring your App Profile. Refer to [App Profiles](/devx/app-profile) for a list of available services. - - -5. Provide configuration information for the service. - - -6. You can add more services to the App Profile as needed. To do this, click the **Actions** button next to the **Configure tier** pane. To rearrange layers in the profile, select a service and drag it up or down in the pane. Each service becomes a layer in the App Profile stack in the order shown in this pane. - - -7. When you've provided the required configuration information for services, click **Review**. Your App Profile is now created and can be deployed. - -# Validate - -To validate your App Profile is available and ready for use, use the following steps. - -1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. - - -2. Navigate to the left **Main Menu** and click on **App Profiles**. - - -3. Select the cluster profile you created to review its details. - - -4. Hover your cursor over each app layer to learn more about the layers, including the pack name, version, and registry. - - - ![A view of a cursor triggering the info box for each app profile layer.](/devx_app-profile_create-app-profile_app-layer-infoboxes.png) - -
- - - - Use the pop-up information box for each layer to help you gather the required information when creating Terraform templates for [app profiles](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/application_profile). - - - - -5. Deploy your application to a virtual cluster to verify all the required configurations and dependencies are correct. Review the [Create and Manage Apps](/devx/apps/create-app) to learn how to deploy an app to a virtual cluster. Check out the [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) tutorial for a more in-depth guide. - -# Next Steps - -Start exploring the various [out-of-the-box](/devx/app-profile/services) services Palette exposes to application authors. Use these services to quickly deploy applications without the overhead of managing and configuring the infrastructure required for common third-party services such as databases, message queues, and more. diff --git a/content/docs/04.5-devx/05-app-profile/02-container-deployment.md b/content/docs/04.5-devx/05-app-profile/02-container-deployment.md deleted file mode 100644 index 6cd0bf56d2..0000000000 --- a/content/docs/04.5-devx/05-app-profile/02-container-deployment.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: "Container Deployment" -metaTitle: "Palette Dev Engine App Profile Container Deployment" -metaDescription: "Palette Dev Engine App Profile Container Deployment" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Container Deployment - -Palette App Mode supports the use of containers, a standard unit of software that packages code and all its dependencies to run applications quickly and reliably from one computing environment to another. Containers contain all the required executables, binary codes, libraries, and configuration files. As a result, containers are lightweight and portable with less overhead. To add a container tier to Palette Dev Engine App Profile, follow the steps below. - -
- -# Prerequisite - -* [Spectro Cloud Palette account](https://www.spectrocloud.com/get-started/) - - -## Add Container to App Profile - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Select **App Profiles** from the left **Main Menu** and click on the **New App Profile** button at the top right-hand side of the main screen. - - -3. Provide the wizard with the following information and click on **Next** after you have filled out the following basic information. - - | Parameter | Description | - |-------------------------------|-----------------| - |**Application Profile Name** | A custom name for the App Profile| - |**Description (optional)** | Description of the App Profile, if any | - |**Tag (optional)** | Tags on a cluster group are propagated to the infrastructure environment environments.| - - -4. Next, select **Container Deployment** from the available services list. - - -5. Provide the following information to the wizard. - - **General Settings**: - - | Parameter | Description | - | ---------------- | ------------------------------------------------------------------------------------------------------ | - | **Container Name** | A unique name for the container deployment. | - | **Registry** | Select the registry from which the image will be downloaded. If specifying a non-Docker Hub registry, ensure you provide the full URL of the image. | - | **Image** | Image of the container to be deployed. | - | **Replicas** | The number of application instances to be deployed. This option follows the same behavior as a [*ReplicaSet*](https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/) in the Kubernetes configuration file. A max of 10 replicas is supported. - -
- - - - - - When adding a container image from a public [DockerHub registry](https://hub.docker.com/), you can skip the registry hostname. For instance, to download the Nginx image, specify `nginx` and it will be downloaded correctly during the provisioning process. - - - -
- - **Network Access**: - - | Parameter | Description | - | -------------- | ------------------------------------------------------------------------------------------------------------- | - | **Private** | To establish connectivity to a container service through a private network. | - | **Public** | To establish connectivity to a container service through the public network. | - | **Port number** | Exposes the container for external communication. | - -
- - **Environment Variables**: - - | Parameter | Description | - | ----------------------- | ------------------------------------------------------------------------------------------------------ | - | **Environment Variables** | Environment variables can be specified as **Key-Value** pairs during the container deployment. | - -
- - **Volume**: - - | Parameter | Description | - | ------------- | --------------------------------------------------------------- | - | **Volume** | To persist the data generated by and used by the container. | - | **Name** | Volume name. | - | **Size** | The size of the volume in GiB. | - | **Mount Path** | The path to the volume. | - - -
- - * **Runtime Settings**: The command and arguments you define here will override the default command and arguments provided by the container image. - - -6. Click the **Review** button when you have filled out the information and are ready to conclude the wizard. - -Once the container is added as a layer to the App Profile, continue with the remaining steps of the [App Profile creation](/devx/app-profile/create-app-profile) wizard. You can add more services as layers if needed. - - - - -## Validate - -1. Login to [Palette](/devx#quickstartwithpaletteappmode). - - -2. Select the **App Profiles** option from the left **Main Menu**. - - -3. In the App Profiles page, you will find your App Profile listed. Click the name of the App Profile to view the profile details. The app profile tier details will show the container added to the profile. - - - - diff --git a/content/docs/04.5-devx/05-app-profile/03-app-profile-macros.md b/content/docs/04.5-devx/05-app-profile/03-app-profile-macros.md deleted file mode 100644 index a69a077af8..0000000000 --- a/content/docs/04.5-devx/05-app-profile/03-app-profile-macros.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Output Variables" -metaTitle: "Palette Dev Engine Output Variables" -metaDescription: "Explore Palette Dev Engine App Profile Macros" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Output Variables - -Palette Dev Engine output variables are defined in the [app profile](/glossary-all#appprofile) and are only resolved at cluster deployment time. The output variables have the following properties: - -* May be referenced by specifying them during app profile creation. - -* Output variables are inherited from the lower tiers of the app profile. - -* Each service type exposes a set of unique output variables. - - - -The variables are generated when the server layer is deployed. Output variables can be consumed by the higher layers in the app profile. - -Check out the [Services Connectivity](/devx/app-profile/services/connectivity) page to learn how to use output variables for establishing network connectivity between services. - - -
- - -``` -{{.spectro.app.$appdeploymentName..}} -``` - -# System Output Variables - -The following output variables are globally available for all services. - -| Output Variable | Description | -| --- | --- | -| `spectro.system.user.name` | The user name of the logged in user. | -| `spectro.system.user.uid` | The id of the logged in user.| -| `spectro.system.user.email` | The email address of the logged in user. | -| `spectro.system.tenant.uid `| The id of the current tenant or organization. | -| `spectro.system.project.uid` | The id of the current project. | -| `spectro.system.project.name` | The name of the current project. | -| `spectro.system.cluster.uid` | The id of the current cluster. | -| `spectro.system.cluster.name` | The name of the current cluster. | -| `spectro.system.kubernetes.version` | The current version of Kubernetes. | -| `spectro.system.reverseproxy.server` | The hostname of the Spectro Cloud reverse proxy server. This value is empty when not enabled. | -| `spectro.system.reverseproxy.port` | The port of the Spectro Cloud reverse proxy server. This value is empty when not enabled. | -| `spectro.system.reverseproxy.vhostport` | The port of the virtual host that is hosting the reverse proxy. | -| `spectro.system.reverseproxy.protocol` | The protocol used for the Spectro Cloud reverse proxy. | -| `spectro.system.cloud.type` | The type of cloud environment where the cluster is deployed, such as EKS, AKS, and GKE. | -| `spectro.system.cloud.region` | The cloud provider region where the cluster is deployed.| -| `spectro.system.apptier.name` | The name of the service layer from the context of the app profile. | -| `spectro.system.apptier.uid` | The id of the service layer. | -| `spectro.system.appprofile.name` | The name of the app profile. | -| `spectro.system.appprofile.uid` | The id of the app profile. | -| `spectro.system.appdeployment.uid` | The id of the app deployment. | -| `spectro.system.appdeployment.name` | The name of the app deployment. | -| `spectro.system.appdeployment.tiername` | The name of the service layer from the context of the app deployment. | -| `spectro.system.appdeployment.ingress.host` | The ingress host pattern for a cluster group with ingress enabled. This value is dynamically generated. | - -# Container Service Output Variables - -The container service type exposes the following output variables. Replace **[service-name]** with the respective name of the service layer. - -| Output Variable | Description | -| --- | --- | -| `.spectro.app.$appDeploymentName.[service-name].CONTAINER_NAMESPACE` | The Kubernetes namespace of the deployed container. | -|`.spectro.app.$appDeploymentName.[service-name].CONTAINER_SVC` | The Kubernetes DNS hostname of the service. | -|`.spectro.app.$appDeploymentName.[service-name].CONTAINER_SVC_PORT` | The exposed port of the service. | -| `spectro.app.$appDeploymentName.[service-name].CONTAINER_SVC_EXTERNALHOSTNAME`| The Kubernetes DNS hostname of the load balancer. This value is available if the service's to **Public** and deployed to a public cloud provider environment. | -|`spectro.app.$appDeploymentName.[service-name].CONTAINER_SVC_EXTERNALIP`| The public URL of the load balancer. This value is available if the service's access is set to **Public** and deployed to a private cloud provider environment.| - -# Database Service Output Variables - -Each database service exposes a set of output variables. Review each database service for more details. You can find information about each database service by checking out the [Available Services](/devx/app-profile/services/service-listings) resource. - -# Resources - -* [Palette System Macros](/registries-and-packs/pack-constraints#packmacros) - -* [Palette User Macros](/clusters/cluster-management/macros#overview) -
diff --git a/content/docs/04.5-devx/05-app-profile/04-app-profile-cloning.md b/content/docs/04.5-devx/05-app-profile/04-app-profile-cloning.md deleted file mode 100644 index becc105d5e..0000000000 --- a/content/docs/04.5-devx/05-app-profile/04-app-profile-cloning.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: "App Profile Cloning" -metaTitle: "Palette Dev Engine App Profile Cloning" -metaDescription: "Palette Dev Engine App Profile Cloning" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Clone App Profiles - -Palette supports the cloning of App Profiles across multiple projects. For example, you can clone an app profile created under a specific project to another project within the same [tenant](/glossary-all#tenant). The ability to clone App Profiles can be useful for the following use cases. - -* Share system scope App Profiles to projects scope. - - -* Share App Profiles amongst different projects. - -# Prerequisites - -* An App Profile created in Palette. Check out the [Create an App Profile](/devx/app-profile/create-app-profile) for guidance. - -# Clone an App Profile - -To clone an App Profile follow the steps below: - -1. Login to [Palette](/devx#quickstartwithpaletteappmode) - - -2. Select **App Profiles** from the left **Main Menu**. Identify the App Profile you want to clone and click on the three dots at the right handside of the row. Click on the **Clone** button from the drop down. - - -4. You will be prompted to fill out the following information: - * **Name:** Name of the new app profile. - * **Profile Version:** Version number for the new app profile. - * **Source Profile Version:** The version number of the source app profile getting cloned. - * **Target Project:** The target project to which the profile is to be cloned. Select the project name from the drop-down menu. - - -5. Click **Confirm** to conclude the cloning of the App Profile. - -In the target project specified during the clone process, you can now use the App Profile for app deployments. - - -# Validate - -To validate the App Profile is cloned and available in the target project conduct the following steps: - - -1. Login to [Palette](/devx#quickstartwithpaletteappmode) - - -2. Select the **App Profiles** option from the left **Main Menu**. - - -3. This page will list all the App Profiles available to you. In addition, this should list all the cloned App Profiles as well. Use the cloned App Profile for App deployment under the target scope. - - - - - - - diff --git a/content/docs/04.5-devx/05-app-profile/05-versioning-app-profile.md b/content/docs/04.5-devx/05-app-profile/05-versioning-app-profile.md deleted file mode 100644 index a4a03dcfea..0000000000 --- a/content/docs/04.5-devx/05-app-profile/05-versioning-app-profile.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: "App Profile Versioning" -metaTitle: "Palette Dev Engine App Profile Versioning" -metaDescription: "Learn about App Profile Versioning, what it is, how to create a version, and how to manage a version." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - - - - -# App Profile Versioning - -Palette enables users to create multiple versions of an App Profile within the scope of a single profile name. The **Version** field of the app profile takes a semantic versioning format (only numbers supported) as below: - - **`major.minor.patch`** represented as: Version 1.1.2 - -App versioning is an optional field with a default value of **1.0.0** . The users can create multiple versions of an app profile under a single profile name and each of these versions can have its own pack configurations. - -Cluster profile versions are grouped under their unique names and their uniqueness is decided by the name and version within the scope and promotes backward compatibility to profile changes. - - **Example:** Profile-1 can have multiple versions like 1.0.0 and 2.0.1. These versions are grouped under the **App Profile Name** Profile-1. The menu next to the app profile name contains the different versions under that name. - - The version numbers can be edited from the **Settings > Edit Info** option from the App Profile page. While deleting the profile, select the version to be deleted. - -The new versions of the App Profile may: - -* Contain additional tiers - -* Drop existing tiers - -* Contain new versions of a tier - -* Update the configuration of the existing tiers - - - -The following attributes are non-editable during versioning: - -* App Profile name and version number. New version numbers are created and existing version number can be deleted. - -* App Profile tier name and type. - - - - -# Apply Version to a Profile - - -## Prerequisites - -- An App Profile - -## Create Version - -1. Log in to [Palette](/devx#quickstartwithpaletteappmode) - - -2. Select the **App Profiles** option from the left **Main Menu**. - - -3. Select the App Profile to be versioned. - - -4. From the drop-down menu next to the App Profile name, select the **Create New Version**. - - -5. Give the version number per the semantic format described above. - - -6. Click on **Confirm** to complete the wizard. The UI will return a versioning successful message. - -## Validate - -To validate the App Profile is versioned and available in the target project conduct the following steps: - -1. Log in to [Palette](/devx#quickstartwithpaletteappmode) - - -2. Select the **App Profiles** option from the left **Main Menu**. - - -3. This page will list all the App Profiles available to you. In addition, this should list all the versioned App Profiles as well. Use the versioned App Profile for App deployment under the target scope. - -# Delete an App Profile - -## Prerequisites - -- An App Profile - -## Delete Profile - -1. Log in to [Palette](/devx#quickstartwithpaletteappmode) - - -2. Select the **App Profiles** option from the left **Main Menu**. - - -3. This page will list all the App Profiles available to you. Select the App Profile to be deleted. - - -4. From the drop-down menu next to the App Profile Name, select the version to be deleted and click **Delete** to delete the profile. - - -5. The selected App Profile version will be deleted. - -## Validate - - -To validate the App Profile is removed and not available in the target project, conduct the following steps: - -1. Log in to [Palette](/devx#quickstartwithpaletteappmode) - - -2. Select the **App Profiles** option from the left **Main Menu**. - - -3. Verify the app profile is not in the list of available profiles. - - -# Update an App Profile - -You can make changes to the app profile, such as version updates, manifest updates, app tier additions and removals. - -App Profile changes will generate an update notification on all the Apps that are created from the app profile. Update notifications include information about all the changes applied to the profile since the initial creation or since the previous update. You can apply the update to the Apps individually at any time. - -# Apply Updates to the App - -To apply updates to an App follow the below steps: - -1. Log in to [Palette](/devx#quickstartwithpaletteappmode) - - -2. Select the **App Profiles** option from the left **Main Menu**. - - -3. This page will list all the App Profiles available to you. Select the App Profile you want to update. - - -4. Make the desired changes. You can add or delete layers, change pack versions, change pack values, etc. and save your changes. - -5. Navigate to the left **Main Menu** and click on **Apps** - - -5. On the App page, apps eligible for an update will have an **Updates Available** badge. - - -* Click on the App with the update notification to start the **Apply** updates wizard. Click on **Apply** button. - - -* An **Apply Updates** wizard will open up with the update notification. The notification contains details about the updates that will be applied. Click the **Confirm** button to apply the updates to the app. - -## Validate - -To validate that the App profile updates are implemented on the target app, conduct the following steps: - -1. Log in to [Palette](/devx#quickstartwithpaletteappmode) - - -2. Select the **Apps** option from the left **Main Menu**. - - -3. This page will list all the Apps. Click open the updated App. - - -4. Review the app profile details, which will include the applied updates. diff --git a/content/docs/04.5-devx/05-app-profile/08-services.md b/content/docs/04.5-devx/05-app-profile/08-services.md deleted file mode 100644 index d380761b93..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "Services" -metaTitle: "Palette Dev Engine App Services" -metaDescription: "Palette Dev Engine App Services" -hideToC: false -fullWidth: false ---- - -# Services - -Palette offers you different types of services to help you model all the dependencies and resources required to deploy an application. You can choose from several different service types in Palette. - -
- -## Container Deployment - -[Containers](https://www.docker.com/resources/what-container/) are methods of building, packaging, and deploying an application. A container includes the code, run-time, libraries, and all the dependencies required by a containerized workload. Containers are deployed to their target environment. For steps on how to deploy a container in Palette, refer to [Container Deployment](/devx/app-profile/container-deployment). - - -## Helm - -Palette provides out-of-the-box Helm registries and allows you to add registries. For more information, visit [Palette Helm Registry](/registries-and-packs/helm-charts). - - -## Manifest - -You can construct App Profile layers using raw manifests to provision Kubernetes resources that are unavailable in Palette or Helm Charts. Pack Manifests provide a pass-through mechanism to orchestrate Kubernetes resources in a cluster. For example, specific integrations may require the creation of secrets or Custom Resource Definitions (CRDs). To achieve this, you can attach a Manifest file to the layer. - -# Out-of-the-box Services - -Palette also offers a set of common services or resources that application authors frequently use to expand or add capabilities to an application. These services are managed by Palette and help reduce the burden of maintaining and deploying resources required by your application. - -## Messaging System Services - -A messaging system service is a platform that enables the exchange of messages between users. It allows people to send and receive messages in real time using different devices and communication channels. - -
- -## Object Storage Services - -Object storage is a data storage solution for unlimited, unstructured data like images, videos, and backups. Uploaded data is managed as objects, not files or blocks, and is scalable and durable. - -
- - -## Database Services - -A database stores structured data electronically for fast search and retrieval. It's commonly used for applications and websites to store information such as user data, transactions, and analytics. - -
- -## Available Services - -Check out the available service offerings in Palette by visiting the [Service Listings](/devx/app-profile/services/service-listings/) resource. diff --git a/content/docs/04.5-devx/05-app-profile/08-services/15-db-services.md b/content/docs/04.5-devx/05-app-profile/08-services/15-db-services.md deleted file mode 100644 index 4f0eb26a8f..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services/15-db-services.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Databases" -metaTitle: "Palette Dev Engine Database Services" -metaDescription: "Explore Palette Dev Engine Database Services" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Palette Dev Engine facilitates database service setup, operation, and scaling without installing physical hardware, software, or performance configurations. Instead, Palette takes care of all the administrative and maintenance tasks so that you can use and access the database quickly. - -For a list of all the supported databases, refer to the [Available Services](/devx/app-profile/services/service-listings) resource and select the **Database** filter. - -## Database Deployment - -Palette leverages several Kubernetes built-in workload resources such as Deployment, ReplicaSet, DaemondSet, StatefulSet, etc. To take advantage of the persistence of the data storage, Palette deploys database services as [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/). - -StatefulSet lets you run one or more related pods that track the state. The database service workload records data persistently through a StatefulSet that matches each pod with a [PersistentVolume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). The database service running in the pods that belong to the StatefulSet can replicate data to other pods in the same StatefulSet to improve the overall resilience of the service. - -## Storage - -You must allocate storage to the database service based on the available storage within the Virtual Cluster. - -
- - - - -By default, cluster groups are configured not to back up the disk storage. This default behavior affects database services because a backup would not include the storage disk. To learn more, refer to [Enable Disk Backup on Virtual Clusters](/clusters/cluster-groups/cluster-group-backups). - - - -## Version Update - -You can make changes to the app profile services, such as version updates, manifest updates, app service additions, and removals. [App Profile Service update](/devx/app-profile/versioning-app-profile#updateanappprofile) -will generate an update notification on all the apps created from the app profile. Update notifications include all the changes applied to the profile since the initial creation or the previous update. You can apply the update to the apps individually at any time. - -## Output Variables - -Each database service has a set of exposed output variables. These output variables can be used to establish service connectivity with other service layers of the app profile by consuming the information. - -The following code snippet is an example of the output variables exposed by the MongoDB service. Check out the [service listings](/devx/app-profile/services/service-listings/) page to learn more about each service. - -
- - -``` -env: - - name: USER_NAME - value: "{{.spectro.app.$appDeploymentName.mongodb-1.USERNAME}}" - - name: PASSWORD - value: "{{.spectro.app.$appDeploymentName.mongodb-1.PASSWORD}}" - - name: MONGO_URI - value: "{{.spectro.app.$appDeploymentName.mongodb-1.MONGO_URI}}" - - name: MONGO_URI_SRV - value: "{{.spectro.app.$appDeploymentName.mongodb-1.MONGO_URI_SRV}}" -``` - -
- - -The service connectivity follows a fixed hierarchy in Palette. The connectivity is established for higher-level services using the output variable. Higher-level refers to the service added to the app profile after adding the database service. - - - -
- - -## Connect to a DB Service - -Applications and clients can connect to a Palette database service by using the information exposed in the output variables. Check out the [Service Connectivity](/devx/app-profile/services/connectivity) documentation to learn more about connecting to a database service. \ No newline at end of file diff --git a/content/docs/04.5-devx/05-app-profile/08-services/4-connectivity.md b/content/docs/04.5-devx/05-app-profile/08-services/4-connectivity.md deleted file mode 100644 index 01d05e5155..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services/4-connectivity.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "Service Connectivity" -metaTitle: "Palette Dev Engine Database Connectivity" -metaDescription: "Palette Dev Engine Database Connectivity" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Service Connectivity - - -Using the exposed output variables, you can connect different service layers. For example, assume you have one application and database defined in an app profile. You can connect the two using the exposed output variable containing the Kubernetes service hostname. - -It's important to consider the order of service layers. Using the previous example, you must add the application after the database service layer to use the output variables of the database service. In other words, the database service should be at the app profile's bottom-most layer. - -The order of the service layers is important because the output variables used in services follow a usage hierarchy. The output variables for a service are only available if the service comes after the service that exposes the output variable. Output variables from the first services you add, which become the first layer in the app profile stack, can be consumed by other services after it. However, output variables cannot be passed downwards from the top service layers. - -
- -## Connectivity Example - -The following diagram is an example of an app profile containing three different service layers. The bottom layer is a Postgres database, the second layer is a container application, and the top layer is a Helm application. - - -The API server communicates with the database, and the application sends requests to the API. For each service to establish network connectivity, each layer needs to reference the output variable of the lower layer. The API will use the output variable Postgres exposes that contains the Kubernetes hostname. - -![Output Variables example](/devx-services-connectivity-output-variables-example.png) - -The API server can consume the output variable `{{.spectro.app.$appDeploymentName.postgresql-3.POSTGRESMSTR_SVC}}` from the Postgres service layer to connect to the database. The output variable would be consumed as an environment variable. - -![The API layer consuming the DB output variable](/devx-services-connectivity-container-env-example.png) - -The application would use the output variable `{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC}}` from the API service layer to connect to the API. The output variable value can be referenced as a YAML value in the Helm manifest file. - -![The App layer consuming the API output variable](/devx-services-connectivity-helm-env-example.png) \ No newline at end of file diff --git a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings.mdx b/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings.mdx deleted file mode 100644 index abdae99199..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Available Services" -metaTitle: "Palette App Mode Services" -metaDescription: "Reference documentation for each service available in Palette App Mode." -hideToC: true -fullWidth: true ---- - -# Out-of-the-box Services - -import {Content} from "shared/layouts/Default"; -import AppTiers from "shared/components/common/Integrations/AppTiers" - -Palette Dev Engine contains a set of services commonly used by application authors to support the capabilities of an application. You can use the following service types out of the box with minimal configuration. - -
- -- **Messaging System Services**: A messaging system service is a platform that enables the exchange of messages between users. It allows people to send and receive messages in real time using different devices and communication channels. - - -- **Object Storage Services**: Object storage is a data storage solution for unlimited, unstructured data like images, videos, and backups. It's managed as objects, not files or blocks, and is scalable and durable. - - -- **Database Services**: A database stores structured data electronically for fast search and retrieval. It's commonly used for applications and websites to store information such as user data, transactions, and analytics. - - -- **Security Services**: Security services are used to protect your application from unauthorized access and to ensure that your application is compliant with security standards. - - -Select a category to narrow the list of available services and learn more about a specific offering. - - - - diff --git a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/0-mongo-db.md b/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/0-mongo-db.md deleted file mode 100644 index 805a90d1cb..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/0-mongo-db.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: "MongoDB" -metaTitle: "Palette Dev Engine MongoDB Service" -metaDescription: "Palette Dev Engine MongoDB Service" -hideToC: false -type: "appTier" -category: ['databases'] -fullWidth: false -logoUrl: "https://newrelic.com/sites/default/files/styles/800w/public/2021-10/mongo_logo.jpg?itok=Z1PabBZB" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Mongo DB - -[MongoDB](https://www.mongodb.com/) is a developer data platform that quickly builds applications with optimal performance and scalability. It provides data distribution and mobility across multiple cloud environments. In addition, this multi-cloud database service provides you with resilience, data privacy, and security. - -# Add MongoDB to an App Profile - -Use the following steps to add MongoDB to an app profile. - -
- -## Prerequisite - -A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). - -
- -## Enablement - -You can use the following steps to learn how to add MongoDB to your app profile. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. On the right side of the window, click the **User Menu** to expand it and select **Switch to App Mode**. - - -3. From the **Main Menu** click **App Profiles** to create a new profile. Check out the [Create an App Profile](/devx/app-profile/create-app-profile/) guide to learn how. Provide the following basic information and click **Next**. - -| **Parameter** | **Description** | -|-------------------------|-----------------| -|Application Profile Name | A custom name for the App Profile.| -|Version (optional) | The default value is 1.0.0. You can create multiple versions of an App Profile using the format **`major.minor.patch`**. -|Description (optional) | Description of the App Profile. | -|Tag (optional) | Assign tags to the app profile.| - - -4. Select **MongoDB** from the database services and start the configuration. - - -5. Provide the following information to the wizard: - * **Name:** The DB name. You can use the default Palette-generated name or create a custom name. - * **Username:** The user name for database access control. - * **Password:** The password for the username. - * **Database Volume Size:** Select the volume size for the database. Ensure you stay within the storage amount available in the cluster group and virtual clusters. - - * **Version:** Select the version from the **Version** drop-down. - -6. Click **Save Changes**. -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. - - -2. Navigate to the left **Main Menu** and select **Apps**. - - - -3. Select the application that contains MongoDB. - - - -4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. - -|**Color Code**| **Description**| -|--------------|--------------| -|Green| Successfully Deployed| -|Blue | Under Deployment| -|Red | Error State| - - -# Output Variables - -The exposed output variables of this service layer may be used in other service layers. These output variables are typically used for connectivity purposes. - -| Parameter | Output Variable | Description | -|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| -| Database Username | `{{.spectro.app.$appDeploymentName..USERNAME}}` | The database user name. | -| Database User Password | `{{.spectro.app.$appDeploymentName..PASSWORD}}` | The password of the database user name. | -| Connection String | `{{.spectro.app.$appDeploymentName..MONGO_URI}}` | The MongoDB connection string that contains the Kubernetes service hostname of the database. The connection string is prefixed with `mongodb://` -| DNS Seed | `{{.spectro.app.$appDeploymentName..MONGO_URI_SRV}}` | Represents the MongoDB DNS seed list connection format. The SRV indicates to the client that the host name that follows corresponds to a DNS SRV record. Contains the prefix `mongodb+srv` | - - -# Database Password - -You can get the database password by reading the content of the Kubernetes secret created for the database user. To retrieve the password for the MongoDB database user, use the following command format. - -```shell -kubectl get secret -- \ - --namespace --ns --output jsonpath='{.data.password}' | base64 --decode -``` - -Replace the values with the respective names. - - * app-name: represents the name of the app provided during the Palette app creation process. - * service-name: The name of the service layer in the app profile. - * user-name: The name of the database user. - - -#### Example: - -- App Name: `app-tarfful` - -- Service Name: `mongodb-1` - -- Database User: `myuser` - -```shell -kubectl get secret app-tarfful-mongodb-1-myuser \ - --namespace app-tarfful-mongodb-1-ns --output jsonpath='{.data.password}' | base64 --decode -``` -#### Output: -```shell -.Hr1}%DrA2MFf -``` - - -# Next Steps - -Palette Dev Engine removes the traditional challenges encountered when deploying a MongoDB instance. You can add MongoDB to your application profile and get started with MongoDB today. Check out the [MongoDB Tutorials](https://www.mongodb.com/docs/manual/tutorial/) to learn how to integrate MongoDB with your applications. - - -# Resources - - -- [MongoDB Documentation](https://www.mongodb.com/docs/) - - -- [MongoDB Tutorials](https://www.mongodb.com/docs/manual/tutorial/) - - -- [MongoDB Libraries](https://www.mongodb.com/docs/drivers/) - - - diff --git a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/1-mysql.md b/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/1-mysql.md deleted file mode 100644 index ba1d940348..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/1-mysql.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: "MySQL" -metaTitle: "Palette Dev Engine MySQL Service" -metaDescription: "Palette Dev Engine MySQL Service" -hideToC: false -type: "appTier" -category: ['databases'] -fullWidth: false -logoUrl: "https://registry.dev.spectrocloud.com/v1/mysql-operator/blobs/sha256:2d59bc428916752528280eac03330d712164163e2f3c476409f5c25d8a7c2778?type=image/png" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# MySQL - -[MySQL](https://www.mysql.com/) is an open-source relational database management system commonly used in web applications and other software that requires a database. It is known for its reliability, ease of use, and flexibility. MySQL is covered under the GNU license and uses structured query language (SQL) to manage data with the following properties: - -
- -* Creates a database for storing and manipulating data and defining the relationship of each table. - - -* Clients can retrieve and manipulate data by creating SQL queries and submitting them to the MySQL instance. - - -# Add MySQL to App Profile - -Use the following steps to add MySQL to an app profile. - -
- -## Prerequisite - -A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). - -
- -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. On the right side of the window, click on the **User Menu**, then select **Switch to App Mode**. - - -3. Navigate to the left **Main Menu** and click **App Profiles** to create a [new App Profile](/devx/app-profile/create-app-profile/). Provide the following basic information and click **Next**. - -| **Parameter** | **Description** | -|-----------------------------|-----------------| -|Application Profile Name | A custom name for the app profile.| -|Version (optional) | The default value is 1.0.0. You can create multiple versions of an App Profile using the format **`major.minor.patch`**.| -|Description (optional) | Description of the app profile.| -|Tag (optional) | Assign tags to the app profile.| - - -4. Select **MySQL** from the database services and start the configuration. - - -5. Provide the following information to the wizard: - * **Name:** The database name. You can use the auto-generated name or create a custom name. - * **Root Password:** The root password for the database service. - - * Database Volume Size (GiB): Select the volume size for the database. Ensure you stay within the storage amount available in the cluster group and virtual clusters. - - * Select the version from the **Version** drop-down menu. - -6. Click on **Save Changes**. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. - - -2. Navigate to the left **Main Menu** and select **Apps**. - - - -3. Select the application that contains MySQL. - - - -4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. - -|**Color Code**| **Description**| -|--------------|--------------| -|Green| Successfully Deployed| -|Blue | Under Deployment| -|Red | Error State| - - -# Output Variables - -The exposed output variables of this service layer may be used in other service layers. These output variables are typically used for connectivity purposes: - -| Parameter | Output Variable | Description | -|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| -| Database Root Password | `{{.spectro.app.$appDeploymentName..ROOT_PASSWORD}}` | The root password of the MySQL database. | -| Service Hostname | `{{.spectro.app.$appDeploymentName..MYSQLMSTR_SVC}}` | The Kubernetes service hostname for the database. | -| Service Port | `{{.spectro.app.$appDeploymentName..MYSQLMSTR_SVC_PORT}}` | The exposed ports for the database service. | -| Namespace | `{{.spectro.app.$appDeploymentName..MYSQLMSTR_SVC_NAMESPACE}}` | The Kubernetes namespace the MySQL database is deployed to. | - - - -# Database Password - -You can get the database password by reading the content of the Kubernetes secret created for the database. To retrieve the password for the MySQL database root user, use the following command format. - -```shell -kubectl get secret --user \ - --namespace --ns --output jsonpath='{.data.ROOT_PASSWORD}' | base64 --decode -``` - -Replace the values with the respective names. - - * app-name: represents the name of the app provided during the app creation process. - * service-name: The name of the service layer in the app profile. - -#### Example: - -- App Name: `app-tarfful` - -- Service Name: `mysql-2` - - -```shell -kubectl get secret app-tarfful-mysql-2-user \ - --namespace app-tarfful-mysql-2-ns --output jsonpath='{.data.ROOT_PASSWORD}' | base64 --decode -``` -#### Output: -```shell -,U31nQ@T2tN4uM -``` - -# Next Steps - -You can add MySQL to your application profile and start integrating MySQL with your applications. To learn more about integrating MySQL with your applications, check out the [MySQL](https://redis.io/docs/manual/) documentation from Oracle. - - - -# Resources - -- [MySQL Documentation](https://dev.mysql.com/doc/) - - -- [MySQL Tutorial](https://dev.mysql.com/doc/refman/8.0/en/tutorial.html) \ No newline at end of file diff --git a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/10-cockroach-db.md b/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/10-cockroach-db.md deleted file mode 100644 index 5c712f27f2..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/10-cockroach-db.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: "CockroachDB" -metaTitle: "CockroachDB" -metaDescription: "Learn how to use CockroachDB with Palette Dev Engine." -hideToC: false -type: "appTier" -category: ['databases'] -hiddenFromNav: false -fullWidth: false -logoUrl: "https://upload.wikimedia.org/wikipedia/en/thumb/3/31/Cockroach_Labs_Logo.png/220px-Cockroach_Labs_Logo.png" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# CockroachDB - -CockroachDB is a [distributed SQL database](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) designed for cloud-native environments. CockroachDB provides a reliable and scalable solution for managing data across multiple nodes and regions. Its architecture automates data replication, sharding, and rebalancing, By simplifying operational tasks, Cockroach enables developers to concentrate on building their applications. - -With a focus on strong consistency and horizontal scalability, CockroachDB supports fast transactions and real-time data insights. Its fault-tolerant and self-healing capabilities help reduce downtime and ensure data accuracy. As a result, CockroachDB offers a stable and efficient database solution for developers looking to build robust applications in today's demanding digital landscape. - - - -# Deploy CockroachDB - -Palette users can deploy CockroachDB to a virtual cluster by using the following steps. - -## Prerequisite - -- A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). - - -- A Virtual Cluster with the following minimum resources. - - 8 CPU - - 8 GB of Memory - - 8 GB of Storage. - - -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. On the right side of the window, click on the **User Menu** and select **Switch to App Mode**. - - - -3. Navigate to the left **Main Menu** and click on **App Profiles** to create a new app profile. Review [Create an App Profile](/devx/app-profile/create-app-profile/) for more information. Provide the following basic information and click **Next**. - -| Parameter | Description | -|-----------------------------|-----------------| -|Application Profile Name | A custom name for the app profile.| -|Version (optional) | The default value is 1.0.0. You can create multiple versions of an app profile using the format **`major.minor.patch`**. -|Description (optional) | Description of the app profile. | -|Tag (optional) | Assign tags to the app profile.| - - -4. Select the **CockroachDB** service and start the configuration. - - - -5. Provide the following information to the wizard: - * **Name**: The application name. - - - * **Username**: The user name for database access control. - - - * **dbPassword**: Security password for the DB service. - - - * **Database Name**: The name of the database to target. - - - * **PersistentVolumeClaim Size (GiB)**: Select the volume according to the storage volume available in the cluster group and virtual clusters. Ensure you do not exceed the maximum storage size for your virtual cluster. - - -6. Save your changes. - - -7. Deploy the app profile to a Palette Virtual Cluster. Use the [Deploy a Virtual Cluster](/clusters/palette-virtual-clusters/deploy-virtual-cluster#deployavirtualcluster) guide for additional guidance or check out the [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) tutorial. - - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. - - -2. Navigate to the left **Main Menu** and select **Apps**. - - - -3. Select the application that contains CockroachDB. - - - -4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. - -|**Color Code**| **Description**| -|--------------|--------------| -|Green| Successfully Deployed| -|Blue | Under Deployment| -|Red | Error State| - - -# Output Variables - -The exposed output variables of this service layer may be used in other service layers. These output variables are typically used for connectivity purposes: - -| Parameter | Output Variable | Description | -|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| -| Database Username | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_USERNAME}}` | The database user name. | -| Database User Password | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_PASSWORD}}` | The password of the database user name. | -| Database Name | `{{.spectro.app.$appDeploymentName..COCKROACHDBMSTR_DB_NAME}}` | The name of the database. -| Service Hostname | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_SVC}}` | The Kubernetes service hostname for the database. | -| Service Port | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_SVC_PORT}}` | The exposed ports for the database service. | -| Service Namespace | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_SVC_NAMESPACE}}` | The namespace of the service. | - - -# Database Password - -You can get the database secret by reading the content of the Kubernetes secret created for the database user. To retrieve the password for the Redis database, use the following command format. - -
- -```shell -kubectl get secret --user \ - --namespace --ns --output jsonpath='{.data.password}' | base64 --decode -``` - -Replace the values with the respective names. - - * app-name: represents the name of the app provided during the app creation process. - * service-name: The name of the service layer in the app profile. - -#### Example: - -- App Name: `app-tion-medon` - -- Service Name: `cockroachdb-1` - - -```shell -kubectl get secret app-tion-medon-cockroachdb-1-user \ - --namespace app-tion-medon-cockroachdb-1-ns --output jsonpath='{.data.password}' | base64 --decode -``` -#### Output: -```shell -.Hr1}%DrA2MFf -``` -# Next Steps - -To learn more about developing with CockroachDB, check out the [CockroachDB Developer Guide](https://www.cockroachlabs.com/docs/stable/developer-guide-overview.html). The developer guide is a great resource for understanding how to get started with CockroachDB and build applications that are scalable, resilient, and secure. - - -# Resources - -- [CockroachDB Official Documentation](https://www.cockroachlabs.com/docs/) - - -- [Developer Guide](https://www.cockroachlabs.com/docs/stable/developer-guide-overview.html) \ No newline at end of file diff --git a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/2-postgresql-db.md b/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/2-postgresql-db.md deleted file mode 100644 index a27d92a41a..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/2-postgresql-db.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: "Postgres" -metaTitle: "Palette Dev Engine Postgres Service" -metaDescription: "Palette Dev Engine Postgres Service" -hideToC: false -type: "appTier" -category: ['databases'] -fullWidth: false -logoUrl: 'https://upload.wikimedia.org/wikipedia/commons/2/29/Postgresql_elephant.svg' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Deploy Postgres - -Palette supports [Postgres](https://www.postgresql.org/) as a database service. Postgres is a powerful open-source object-relational database system with over 35 years of active deployment with a strong reputation for reliability, feature robustness, and performance. Postgres uses and extends the SQL language combined with many features that safely store and scale the most complicated data workloads. - -
- -## Prerequisites - -The following are the requirements for using Postgres in Palette: - -* Do not use the Postgres user names `postgres` and `admin`. These user names are reserved for internal system operations and will cause internal conflicts. - - -* The user name format does not support the special character hyphen(-). For example, `name-1` is not supported. - - -* Clients must set `sslMode=require` or a stricter setting, as the server instance requires encryption for all connections. Review the [Postgres SSL documentation](https://www.postgresql.org/docs/current/libpq-ssl.html) to learn more about the SSL modes. - - -# Add Postgres to an App Profile - - -## Prerequisite - -A Spectro Cloud [account](https://www.spectrocloud.com/get-started/) - -## Enablement - -You can use the following steps to learn how to add Postgres to your app profile. - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. On the right side of the window, click the **User Menu** to expand it and select **Switch to App Mode**. - - -3. Navigate to the left **Main Menu** and click on **App Profiles** to create a [new App Profile](/devx/app-profile/create-app-profile/). Provide the following basic information and click **Next**. - -| **Parameter** | **Description** | -|-------------------------|-----------------| -|Application Profile Name | A custom name for the App Profile.| -|Version (optional) | The default value is 1.0.0. You can create multiple versions of an App Profile using the format **`major.minor.patch`**. -|Description (optional) | Description of the app profile. | -|Tag (optional) | Assign tags to the app profile.| - - -4. Select **Postgres** from the database services and start the configuration. - - -5. Provide the following information to the wizard: - - * **Name:** The database service name. You can use the auto-generated name or create a custom name. - - - * **Username:** The user name for database access control. - - - * **Password:** Security password for the DB service. - - - - -You can use the default system-generated password. If the default password is used, it can be retrieved from the Postgres secret that is created for the password. Review the [Database Password](/devx/app-profile/services/service-listings/postgresql-db/#database-password) section for guidance. - - - - - * **Database Volume Size (GiB):** Select the volume size for the database. Ensure you stay within the storage amount available in the cluster group and virtual clusters. - - * **Version: **Select the version from the **Version** **drop-down Menu**. - -6. Save your changes. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. - - -2. Navigate to the left **Main Menu** and select **Apps**. - - - -3. Select the application that contains Postgres. - - - -4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. - -|**Color Code**| **Description**| -|--------------|--------------| -|Green| Successfully Deployed| -|Blue | Under Deployment| -|Red | Error State| - - -# Output Variables - -The exposed output variables of this service layer may be used in other service layers. These output variables are typically used for connectivity purposes: - -| Parameter | Output Variable | Description | -|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| -| Database Username | `{{.spectro.app.$appDeploymentName.database-.USERNAME}}` | The database user name. | -| Database User Password | `{{.spectro.app.$appDeploymentName.database-.PASSWORD}}` | The password of the database user name. | -| Service Hostname | `{{.spectro.app.$appDeploymentName.database-.POSTGRESMSTR_SVC}}` | The Kubernetes service hostname for the database. | -| Service Port | `{{.spectro.app.$appDeploymentName.database-.POSTGRESMSTR_SVC_PORT}}` | The exposed ports for the database service. | - - -# Database Password - -You can get the database password by reading the content of the Kubernetes secret created for the database user. To retrieve the password for the Postgres database user, use the following command format. - -```shell -kubectl get secret --postgres--credentials \ - --namespace --ns --output jsonpath='{.data.password}' | base64 --decode -``` - -Replace the values with the respective names. - - * app-name: represents the name of the app provided during the app creation process. - * service-name: The name of the service layer in the app profile. - * user-name: The name of the database user. - - -#### Example: - -- App Name: `app-tarfful` - -- Service Name: `postgresql-3` - -- Database User: `pguser` - -```shell -kubectl get secret app-tarfful-postgresql-3-postgres-pguser-credentials \ - --namespace app-tarfful-postgresql-3-ns --output jsonpath='{.data.password}' | base64 --decode -``` -#### Output: -```shell -zFniawyxEVdFtSF9uPfDsjFlOnAeDcrpndi3ReaUbqSGTMSnZ1gawSWkJCLabZR9 -``` - -# Next Steps - -Add Postgres to your application profile and explore all the capabilities Postgres has to offer. The official Postgres documentation has several [tutorials](https://www.postgresql.org/docs/online-resources/) to help you learn more about Postgres and how to leverage Postgres with your applications. - - -# Resources - -- [Postgres Documentation](https://www.postgresql.org/docs/) - - -- [Community Postgres Tutorials](https://www.postgresqltutorial.com/) - - -- [Postgres Tutorials](https://www.postgresql.org/docs/online-resources/) - - -- [Postgres SSL documentation](https://www.postgresql.org/docs/current/libpq-ssl.html) diff --git a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/3-redis-db.md b/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/3-redis-db.md deleted file mode 100644 index fbf76c79ff..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/3-redis-db.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: "Redis" -metaTitle: "Palette Dev Engine Redis Database Service" -metaDescription: "Palette Dev Engine Redis Database Service" -hideToC: false -type: "appTier" -category: ['databases'] -hiddenFromNav: false -fullWidth: false -logoUrl: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSjxG5Qb38rX39m1M2p1W4t8H70OKpRY2breg&usqp=CAU" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Redis - -[Redis](https://redis.io/docs/about/) is an open-source (BSD licensed), in-memory data structure store used as a data cache store or database service. Redis has built-in replication, Lua scripting, least recently used eviction, transactions, and different levels of on-disk persistence capabilities. In addition, Redis provides high availability via Redis Sentinel and automatic partitioning with Redis Cluster. - -# Add Redis to an App Profile - -Use the following steps to add Redis to an app profile. - -
- -## Prerequisite - -A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). - -
- -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com) - - -2. On the right side of the window, click on the **User Menu** and select **Switch to App Mode**. - - -3. Navigate to the left **Main Menu** and click on **App Profiles** to create a [new App Profile](/devx/app-profile/create-app-profile/). Provide the following basic information and click **Next**. - -| Parameter | Description | -|-----------------------------|-----------------| -|Application Profile Name | A custom name for the app profile.| -|Version (optional) | The default value is 1.0.0. You can create multiple versions of an app profile using the format **`major.minor.patch`**. -|Description (optional) | Description of the app profile. | -|Tag (optional) | Assign tags to the app profile.| - - -4. Select the **Redis DB** service and start the configuration. - - -5. Provide the following information to the wizard: - * **Name:** The database name. - * **Password:** The password for the database service. - * **Database Volume Size (GiB):** Select the volume as per the storage volume available in the cluster group and virtual clusters. - -6. Save your changes. -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. - - -2. Navigate to the left **Main Menu** and select **Apps**. - - - -3. Select the application that contains Redis. - - - -4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. - -|**Color Code**| **Description**| -|--------------|--------------| -|Green| Successfully Deployed| -|Blue | Under Deployment| -|Red | Error State| - - -# Output Variables - -The exposed output variables. Use these variables when connecting higher-level services with the database: - - - -| Parameter | Output Variable | Description | -|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| -| Database Username | `{{.spectro.app.$appDeploymentName..USERNAME}}` | The database user name. | -| Database User Password | `{{.spectro.app.$appDeploymentName..PASSWORD}}` | The password of the database user name. | -| Service Hostname | `{{.spectro.app.$appDeploymentName..REDISMSTR_SVC}}` | The Kubernetes service hostname for the database. | -| Service Port | `{{.spectro.app.$appDeploymentName..REDISMSTR_SVC_PORT}}` | The exposed port for the database service. | -| Namespace | `{{.spectro.app.$appDeploymentName..REDISMSTR_NS}}` | The Kubernetes namespace the Redis database is deployed to. | - - -# Database Password - -You can get the database secret by reading the content of the Kubernetes secret created for the database user. To retrieve the password for the Redis database, use the following command format. - -```shell -kubectl get secret --redis-auth \ - --namespace --ns --output jsonpath='{.data.password}' | base64 --decode -``` - -Replace the values with the respective names. - - * app-name: represents the name of the app provided during the app creation process. - * service-name: The name of the service layer in the app profile. - -#### Example: - -- App Name: `app-tarfful` - -- Service Name: `redis-4` - - -```shell -kubectl get secret app-tarfful-redis-4-redis-auth \ - --namespace app-tarfful-redis-4-ns --output jsonpath='{.data.password}' | base64 --decode -``` -#### Output: -```shell - .Hr1}%DrA2MFf -``` - -# Next Steps - -You can add Redis to your application profile and start integrating Redis with your applications. To learn more about integrating Redis with your applications, check out the [Using Redis](https://redis.io/docs/manual/) documentation from Redis. - - -# Resources - -- [Using Redis](https://redis.io/docs/manual/) diff --git a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/4-vault.md b/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/4-vault.md deleted file mode 100644 index 565121f73e..0000000000 --- a/content/docs/04.5-devx/05-app-profile/08-services/50-service-listings/4-vault.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: "Vault" -metaTitle: "Vault" -metaDescription: "Learn how to use Vault with Palette Dev Engine." -hideToC: false -type: "appTier" -category: ['security'] -hiddenFromNav: false -fullWidth: false -logoUrl: "https://icon-library.com/images/padlock-icon-png/padlock-icon-png-29.jpg" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Vault - -Palette Dev Engine (PDE) users can deploy Vault onto their virtual cluster using the out-of-the-box Vault offering. Vault deployed through PDE is using Banzai Cloud Bank-Vaults. Bank-Vaults is a wrapper for the official [Vault](https://www.vaultproject.io/) client. Vault is a tool that helps you securely manage and protect sensitive information, like passwords, API keys, and encryption keys. The Bank-Vaults client enhances the official Vault client by adding automatic token renewal, built-in Kubernetes support, and a dynamic database credential provider. - -Vault keeps these secrets safe by locking them in a virtual "vault" and only allows authorized users to access them. Vault also tracks who has accessed which secrets and when, making it easier to maintain security. You can use Vault to govern access to secrets, automate application delivery, and consume secrets programmatically. - -Vault is deployed behind the scenes through the use of the [Bank-Vaults Vault Operator Helm Chart](https://github.com/banzaicloud/bank-vaults/tree/main/charts/vault-operator). - -
- - - -Vault is deployed as a single container in the virtual cluster, and the container is not tied to any particular node. - - - - -# Deploy Vault - -Use the following steps to learn how to deploy Vault to your virtual cluster. - -## Prerequisites - -- A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). - - -- A Virtual Cluster with at least the following minimum resources. - - 4 CPU - - 6 GB Memory - - 6 GB Storage - - -- Kubernetes 1.6.x or greater. - -
- -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. Click on the **User Menu** at top right, and select **Switch to App Mode**. - - - -3. Navigate to the left **Main Menu** and click on **App Profiles** to create a [new App Profile](/devx/app-profile/create-app-profile/). Provide the following basic information and click **Next**. - -| Parameter | Description | -|-----------------------------|-----------------| -|Application Profile Name | A custom name for the app profile.| -|Version (optional) | The default value is 1.0.0. You can create multiple versions of an app profile using the format `major.minor.patch`. -|Description (optional) | Description of the app profile. | -|Tag (optional) | Assign tags to the app profile.| - - -4. Select the **Vault** service and start the configuration. - - - -5. Provide the following information to the wizard: - * **Name:** The application name. - * **PersistentVolumeClaim Size (GiB):** Select the volume as per the storage volume available in the cluster group and virtual clusters. Ensure you do not exceed the maximum storage size for your virtual cluster. - - -6. Save your changes. - - -7. Deploy the app profile to a Palette Virtual Cluster. Use the [Deploy a Virtual Cluster](/clusters/palette-virtual-clusters/deploy-virtual-cluster#deployavirtualcluster) guide for additional guidance or check out the [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) tutorial. - - -## Validate - -You can validate the Vault instance deployed successfully by using the following steps. - -1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. - - -2. Navigate to the left **Main Menu** and select **Apps**. - - -3. Select your application that contains Vault to view its details page. - - -4. Ensure the **Status** is **Deployed** and that the **Vault** service has a green dot next to it. - - -5. Next, click on the **Virtual Cluster** link in the App details page. - - -6. Click the URL to download the **kubeconfig**. - - -7. Set up your local kubectl environment to use the **kubeconfig** file you downloaded. Review the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl) guide for additional guidance. - - -8. Export the following environment variables to prepare your environment to interact with Vault. - -
- - ```shell - export VAULT_ADDR=https://127.0.0.1:8200 - ``` - -
- - ```shell - export VAULT_SKIP_VERIFY=true - ``` - -9. Configure port forwarding between your local workstation and the pod hosting Vault. Use the following commands to configure the port forward. - -
- - ```shell - VAULT_NAMESPACE=$(kubectl get pods --selector app.kubernetes.io/name=vault --all-namespaces --output jsonpath='{.items[0].metadata.namespace}') && \ - kubectl port-forward $(kubectl get pods --selector app.kubernetes.io/name=vault --all-namespaces --output jsonpath='{.items[0].metadata.name}') 8200:8200 --namespace $VAULT_NAMESPACE - ``` - -
- - ```shell - kubectl port-forward $(kubectl get pods --selector app.kubernetes.io/name=vault --all-namespaces --output jsonpath='{.items[0].metadata.name}') 8200:8200 --namespace $VAULT_NAMESPACE - ``` - -10. Open your browser and visit [https://localhost:8200/ui](https://localhost:8200/ui) to access the Vault UI. You will receive a warning due to the usage of a self-signed certificate but you can ignore this warning. - -To acquire the Vault root token, review the [Vault Credentials](/devx/app-profile/services/service-listings/vault#vaultcredentials) section. - - -# Output Variables - -The exposed output variables. Use these variables when connecting higher-level services with Vault: - - - -| Parameter | Output Variable | Description | -|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| -| Vault Root Token | `{{.spectro.app.$appDeploymentName..VAULT_ROOT_TOKEN}}` | The root token of the Vault instance. | -| Service Hostname | `{{.spectro.app.$appDeploymentName..VAULTMSTR_SVC}}` | The Kubernetes service hostname for the Vault service. | -| Service Port | `{{.spectro.app.$appDeploymentName..VAULTMSTR_SVC_PORT}}` | The exposed port for the Vault service. | -| Namespace | `{{.spectro.app.$appDeploymentName..VAULTMSTR_SVC_NAMESPACE}}` | The Kubernetes namespace the Vault instance is deployed to. | - - -# Vault Credentials - -The Vault root token and the unseal keys are stored as a Kubernetes secret inside the virtual cluster. You can retrieve the Vault root token by following these steps.

- - -1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. - - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - - -3. Select the cluster that has Vault installed to view its details page. - - - -4. Download the cluster **kubeconfig** file. - - - -5. Set up your local kubectl environment to use the **kubeconfig** file you downloaded. Review the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl) guide for additional guidance. - - -6. You need to get the Vault namespace and application name. Issue the following command to get the unique values. - -
- - ```shell - VAULT_NAMESPACE=$(kubectl get pods --selector app.kubernetes.io/name=vault --all-namespaces --output jsonpath='{.items[0].metadata.namespace}') && \ - APP_NAME=$(echo "$VAULT_NAMESPACE" | sed 's/-ns$//') - ``` - -2. Next, issue the command below to retrieve the Vault root token. - -
- - ```shell - kubectl get secret $APP_NAME-unseal-keys --output jsonpath='{.data.vault-root}' --namespace $VAULT_NAMESPACE | base64 --decode - ``` - -3. To acquire all five unseal keys, use the following command. - -
- - ```shell - kubectl get secret $APP_NAME-unseal-keys --namespace $VAULT_NAMESPACE --output json \ - | jq -r '.data | to_entries | .[] | select(.key | startswith("vault-unseal-")) | .value | @base64d + "\n"' - ``` - - - - -# Next Steps - -You can add Vault to your application profile and start integrating Vault with your applications. To learn more about integrating Vault with your applications, check out the [Vault App Integrations](https://developer.hashicorp.com/vault/tutorials/app-integration) tutorials from HashiCorp. - - -# Resources - - -- [Vault Documentation](https://developer.hashicorp.com/vault/docs) - - -- [HashiCorp Vault Tutorial](https://developer.hashicorp.com/vault/tutorials) - - -- [Bank-Vaults Vault Operator Helm Chart](https://github.com/banzaicloud/bank-vaults/tree/main/charts/vault-operator) \ No newline at end of file diff --git a/content/docs/04.5-devx/06-apps.md b/content/docs/04.5-devx/06-apps.md deleted file mode 100644 index 6ff987428b..0000000000 --- a/content/docs/04.5-devx/06-apps.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Apps" -metaTitle: "Palette Dev Engine for Enterprise Developerss" -metaDescription: "Explore Palette Dev Engine as Free Developers" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Overview - -Applications are the combination of an [App Profile](/devx/app-profile) and a [Palette Virtual Cluster](/devx/palette-virtual-clusters). When you specify an application profile and deploy it to a virtual cluster, you create an application. - -Check out the resource links below to learn more about Apps. - -
- - -# Resources - -- [Create and Manage Apps](/devx/apps/create-app) - - -- [App Logs](/devx/apps/logs) - - -- [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) \ No newline at end of file diff --git a/content/docs/04.5-devx/06-apps/02-create-app.md b/content/docs/04.5-devx/06-apps/02-create-app.md deleted file mode 100644 index 1d28fd1c7f..0000000000 --- a/content/docs/04.5-devx/06-apps/02-create-app.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: "Create and Manage Apps" -metaTitle: "Create and Manage Apps" -metaDescription: "Learn how to create and manage an app in Palette Dev Engine." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Overview - -Use the following steps to create and deploy an app to a virtual cluster. - - -# Prerequisite - -- An application profile. Use the guide [Create an App Profile](/devx/app-profile/create-app-profile) to learn how to create an app profile. - -
- - - - - A tutorial is available to help you learn how to use Palette Dev Engine by deploying an application. Check out [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) to get started with Palette Dev Engine. - - - -## Create a New App - - -1. Login to [Palette](https://console.spectrocloud.com). - - - -2. Navigate to the top right **User Menu** and select **Switch to App Mode**. - - - -3. Select the **Apps** from the left **Main Menu** and click on **New App**. - - - -4. Next, provide the following information to the app creation wizard. - - * **Application name:** A custom name for the application. - - * **App Profile**: Select an app profile from the existing list by clicking **Select App Profile**. - - - -5. Choose a Virtual Cluster deployment option. You have two options available. - - - **Deploy In A Palette Virtual Cluster** - - - **Deploy In An Existing Palette Virtual Cluster** - - Create a new virtual cluster or select an existing one from the available list, depending on your choice - - -6. Click on **Create an application** to complete the application wizard. - - - -The application will begin the deployment process. This may take a few minutes, depending on the number of layers and types of applications specified in the app profile. - - - -# Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. Navigate to the top right **User Menu** and select **Switch to App Mode**. - - - -3. Select the **Apps** from the left **Main Menu** and click on **New App**. - - - -4. Review the list and select your application to view the details page. - - -5. Ensure the **Status** is marked as **Deployed**. diff --git a/content/docs/04.5-devx/06-apps/05-deploy-app.md b/content/docs/04.5-devx/06-apps/05-deploy-app.md deleted file mode 100644 index 803fefd302..0000000000 --- a/content/docs/04.5-devx/06-apps/05-deploy-app.md +++ /dev/null @@ -1,1313 +0,0 @@ ---- -title: "Deploy an Application using Palette Dev Engine" -metaTitle: "Deploy an Application using Palette Dev Engine" -metaDescription: "Learn how to deploy applications to a Kubernetes cluster without the traditional overhead accompanied by Kubernetes. Palette’s App Mode reduces the deployment time and complexity when deploying applications to Kubernetes. Learn how to get started with Palette’s App Mode in this tutorial. Get started with the free tier of Palette App Mode" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Deploy an Application using Palette Dev Engine - -Palette’s mission is to reduce the challenges you, as a user, face when interacting with Kubernetes. Whether you are a system administrator or an application developer, Kubernetes can introduce overhead that slows down the development process. One of Palette’s core components, *Dev Engine*, focuses on reducing the application development time by enabling builders to deploy applications to Kubernetes with minimal friction. - -This tutorial will teach you how to deploy single and multiple applications to Kubernetes through Palette’s Dev Engine experience. You will learn about *App Mode*, *App Profiles*, and *Palette Virtual Clusters* and understand how they enable you to deploy applications to Kubernetes quickly with minimal overhead. - -# Prerequisites - -To complete this tutorial, you will need the following items. - -- A Spectro Cloud account -- Basic knowledge about containers. - -If you select the Terraform workflow, you will need the following software installed. -- [Docker Desktop](https://www.docker.com/products/docker-desktop/) or another container management tool. - -There are no expenses associated with this tutorial as everything falls under the Palette Free Tier. - - -# Architecture - -The tutorial includes two scenarios, and for each scenario, you will deploy a separate Kubernetes environment. The following diagram illustrates the different layers that will power the tutorial environment. - -![Architecture diagram depicting two virtual clusters](/tutorials/deploy-app/devx_apps_deploy-apps_architecture-diagram.png) - -The top layer is Palette, which is the product platform. Palette can be used in two modes: app mode or cluster mode. Each mode is intended for different use cases and personas, but for this tutorial, you will use app mode. For an in-depth explanation of each mode’s differences, check out the [App Mode and Cluster Mode](/introduction/palette-modes) documentation. - -# Deploy The Environment - -The following steps will guide you through deploying the two scenarios. You will start with the single application scenario to build up your knowledge before deploying the multiple applications scenario. - -From Palette, you will deploy two Palette Virtual Clusters. Palette Virtual Clusters will be referred to as virtual clusters for the rest of the tutorial. Each virtual cluster will be hosted on a host cluster group managed by us, Spectro Cloud, called *beehive*. You can deploy up to two virtual clusters in the beehive group for free. Each scenario’s virtual cluster will sit on the beehive host cluster group. - -
- - - - -Virtual clusters are standalone Kubernetes environments that sit on top of what you would consider a traditional Kubernetes cluster or host cluster. Palette Virtual Clusters are Kubernetes clusters that run as nested clusters within an existing host cluster and share the host cluster resources, such as CPU, memory, and storage. Palette Virtual Clusters use k3s, a highly available, certified Kubernetes distribution designed for production workloads. Palette Virtual Clusters are also powered by vCluster. - - - -You can complete this tutorial by using the Palette console, simulating a manual workflow. Or you may leverage infrastructure as code and complete the tutorial using Terraform. - -
- - - - - - - - -## UI Workflow. - - - -Start by logging in to Palette. From the landing page, click on the user **drop-down Menu** and click on **App Mode**. - - - -![Image with an arrow pointing to the user drop-down Menu](/tutorials/deploy-app/devx_apps_deploy-apps_toggle-app-mode.png) - - - - -From the app mode landing page, navigate to the left **Main Menu** and click on **Virtual Clusters**. Next, click on the button **New Virtual Cluster**. - - - -![View of the virtual cluster list](/tutorials/deploy-app/devx_apps_deploy-apps_virtual-cluster-list.png) - - - -In the following screen, you will be prompted for the cluster group, virtual cluster name, and the cluster size in terms of CPU, memory, and storage. Select beehive for the cluster group, name the cluster `cluster-1`, and allocate 4 CPU, 4 GiB memory, and 2 GiB of storage. Click on **Deploy Virtual Cluster** after you have filled out all the required information. - - - -Palette Dev Engine allows you to deploy up to two virtual clusters into the beehive cluster group. Each virtual cluster requires a minimum of 4 CPU, 4 GiB memory, and 2 GiB storage. When using the beehive cluster, you can allocate a maximum of 12 CPU, 16 Gib memory, and 20 GiB of storage. Check out the [Palette Dev Engine and Quotas](/devx/manage-dev-engine/resource-quota) documentation to learn more about limits. - - - -It will take a few minutes for the virtual cluster to deploy. In the meantime, navigate to the left **Main Menu** and click on **App Profiles**. - - - - -![The App Profile page with arrows guiding](/tutorials/deploy-app/devx_apps_deploy-apps_app-profiles.png) - - - - -App Profiles are templates that contain all the configurations and settings required to deploy applications to virtual clusters. App Profiles provide a way to drive consistency across virtual clusters as you can re-use app profiles and deploy them to different virtual clusters. You can think of app profiles as declarative templates that inform the Kubernetes cluster of the desired application or set of applications. - - - -Click on the **New App Profile** button to start creating your first app profile. Give the app profile the name `hello-universe-ui` and add the tag `scenario-1`. Click on **Next**. The following screen is the service type selection page. You have the option to deploy applications through containers, Helm, or Manifests. You can also consume services such as databases and more. Click on **Container Deployment**. - - - -Name the container `ui`, select a public registry, and provide the image URL `ghcr.io/spectrocloud/hello-universe:1.0.12`. Change the network access to **Public** and add the port `8080`. - - - -![App Profile container creation page with details](/tutorials/deploy-app/devx_apps_deploy-apps_app-profile-creation.png) - - - - -Click on **Review** once you have filled out the provided information. On the next page, click on the **Deploy New App** button. - - - -It’s time to deploy your application to a virtual cluster. Name the application `single-scenario`. For the **App profile** input field, click on the button to the right of the input field to get a list of all your available app profiles. Select the **hello-universe-ui profile** and click on **Confirm**. - - - -Next, click the radio button **Deploy in An Existing Palette Virtual Cluster**. Select **cluster-1** and click on **Create App** to deploy the app profile onto the virtual cluster. - - - -
- - - - - - - -If no clusters are displayed, then **cluster-1** is not yet available. Wait a few more moments and return to the above steps. You can refresh the page, but you must fill out all the required input fields. - - - - - - - -The app profile deployment takes a few moments to finish. You can review the application's deployment progress by navigating to the left **Main Menu** and selecting **Virtual Clusters**. Click on **cluster-1** to view its details page. You can review cluster information, log events, access a remote shell session in the cluster, and more from the cluster details page. - - - -![Cluster details view displaying exposed services](/tutorials/deploy-app/devx_apps_deploy-apps_cluster-details-view.png) - - - -When the application is deployed and ready for use, the **Services** row on the details page will automatically be updated by Palette with the app's public-facing URL. Click on the **:8080** link to view the application. - - - -
- - - - - - - - - -It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - - - - -![Hello Universe landing page displaying global clicks](/tutorials/deploy-app/devx_apps_deploy-apps_hello-universe.png) - - - -Welcome to [Hello Universe](https://github.com/spectrocloud/hello-universe), a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the global counter and for a fun image change. - - - -You have deployed your first application to Palette. Your first application is a single container application with no upstream dependencies. In a production environment, you often deploy applications that consume other services and require connectivity with other resources. The next scenario expands on the single application scenario by adding an API server and Postgres database to simulate a common application architecture encountered in a production environment. - - - -## Deploy Multiple Applications - - - -Create another virtual cluster for the multi-application scenario. From the app mode landing page, navigate to the left **Main Menu** and click on **Virtual Clusters**. Next, click on the **New Virtual Cluster** button. - - - -Add the following details. Select beehive for the cluster group, name the cluster **cluster-2**, add the tag **scenario-2**, and allocate 8 CPU, 12 GiB memory, and 12 GiB of storage. Click on **Deploy Virtual Cluster** after you have filled out all the required information. - - - -It will take a few minutes for the new virtual cluster to deploy. In the meantime, go ahead and navigate to the left **Main Menu** and click on **App Profiles**. - - - -### Postgres - - - -Click on the **New App Profile** button to create your second app profile. Give the app profile the name `hello-universe-complete` and add the tag `scenario-2`. Click on **Next**. This application profile will contain three different applications, and you will create a service configuration for each. The three layers or tiers will together make up the entire application deployment. The order in which you create each layer plays an important role, as it dictates the deployment order. For this scenario, you will deploy the database, the API, and the UI. To create the first layer, select the database service Postgres. - - - - -In the next screen, assign the following values to the Postgres database. - - - -- Name: `postgres-db` - -- Username: `pguser` - -- Database Name: `counter` - -- Database Volume Size: `2` - -- Version: `14` - - - -![Postgres service creation page](/tutorials/deploy-app/devx_apps_deploy-apps_postgres-service-create.png) - - - -Take note of the **Output Variables** section. The Postgres service exposes several output variables to help other applications connect with the database. In the next section, you will use these output variables and other output variables that Palette exposes for each service. You can learn more about output variables by reviewing the app profile [output variables](/devx/app-profile/app-profile-macros) documentation. - - - -Next, navigate to the top left side of the wizard screen and click on the **Actions** button **+**. Go ahead and select **Container Deployment**. - - - -### API - - - -The API is available as a container image. To deploy the API successfully, you need to provide the API server with information about the database such as hostname, database user, database name, and password. The required information can be retrieved using Palette's global output variables and the output variables the database service exposes. - - - -Provide the container service with the following information: - - - -- Container Name: `api` - -- Registry: Public - -- Image: `ghcr.io/spectrocloud/hello-universe-api:1.0.8` - -- Network Access: Public - -- Ports: `3000` - - - -Assign the following environment variables to the API service: - - - -| Parameter | Value | -|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `DB_NAME` | `counter` | -| `DB_HOST` | `{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}` | -| `DB_PASSWORD` | `{{.spectro.app.$appDeploymentName.postgres-db.PASSWORD}}` | -| `DB_INIT` | `true` | -| `DB_USER` | `{{.spectro.app.$appDeploymentName.postgres-db.USERNAME}}` | -| `DB_ENCRYPTION` | `require` | -| `AUTHORIZATION` | `true` | - - - - - -You can learn more about each environment variable's purpose by reviewing the API server's [documentation](https://github.com/spectrocloud/hello-universe-api#environment-variables). One variable that you should understand in greater detail is the `DB_HOST.` The value of this environment variable is constructed using the output variables the Postgres service exposed. The `{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}` variable contains the Kubernetes DNS value of the Postgres service container. - -
- - - -To learn more about connecting different service layers, refer to the [Service Connectivity](/devx/app-profile/services/connectivity) resource. - - - - - -A virtual cluster is a Kubernetes environment, and because it’s a Kubernetes environment, you can use the [Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) record created for each service and pod. You will have another opportunity to practice this concept when you deploy the UI. - - - -When you have filled out all the required information, navigate to the top left side of the wizard screen and click on the **Actions** button **+**. Select the **Container Deployment** to add the final service layer, the UI. - - - -### UI - - - -This time the UI will point to the API server that you manage. The API server has authentication enabled, so to ensure all API requests are accepted you will provide the UI with the anonymous token. - - - -![A diagram of the reverse proxy architecture](/tutorials/deploy-app/devx_apps_deploys-apps_reverse-proxy-diagram.png) - - - -Provide the UI container with the following information. - -- Container Name: `ui` - -- Registry: Public - -- Image: `ghcr.io/spectrocloud/hello-universe:1.0.12` - -- Network Access: Public - -- Ports: `8080` - - - -Assign the following environment variables to the UI service: - - - -| Parameter | Value | -|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `API_URI` | `http://{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}:3000` | -| `TOKEN` | `931A3B02-8DCC-543F-A1B2-69423D1A0B94` | - - - -If you want to explore the UI service's environment variables in greater detail, you can review the UI [documentation](https://github.com/spectrocloud/hello-universe). The `API_URI` contains the address of the application load balancer that will be deployed for the API service. - -The output variable `{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}` is used to retrieve the load balancer URL value. - - -Click on the **Review** button at the bottom of the screen to finalize the app profile. Click on **Deploy New App** in the following screen to deploy the new app profile to cluster-2. - - - -Name the app `multiple-app-scenario`, select the app profile **hello-universe-complete**, pick version **1.0.0** and toggle the radio button **Deploy In An Existing Palette Virtual Cluster**. Select **cluster-2** and click on **Create App**. - - -
- - - - - - - -If cluster-2 is not displayed. Wait a few more moments and return to the above steps. You can refresh the page but you must fill out all the required input fields. - - - - - - - -![App deployment cluster-2](/tutorials/deploy-app/devx_app_deploy-apps_cluster-2-deploy-app.png) - - - - -The app profile deployment takes a few moments to finish. You can review the application's deployment progress by navigating to the left **Main Menu** and selecting **Virtual Clusters**. Click on **cluster-2** to view its details page. - -Once the app is successfully deployed, the cluster details page will expose the public-facing URLs of the services. - - - -![Cluster 2's details page](/tutorials/deploy-app/devx_apps_deploy-apps_cluster-2-details-page.png) - - - -Click on the UI’s service URL for port **8080** to access the Hello Universe application in a three-tier configuration. - - - - - -![View of the self-hosted version of Hello Universe](/tutorials/deploy-app/devx_apps_deploy-app_self-hosted-hello-universe.png) - - - - -The global counter is no longer available. Instead, you have a counter that starts at zero. Each time you click on the center image, the counter is incremented and stored in the Postgres database along with metadata. Also, remember that the reverse proxy injects the Bearer token value in each request sent to the API. - - - -## Cleanup - - - -To remove all resources created in this tutorial, begin by navigating to the left **Main Menu** and select **Apps**. For each application, click on the **three-dots Menu** to expand the options menu and click on the **Delete** button. Repeat this process for each application. - - - -![Apps view with an arrow pointing towards the delete button](/tutorials/deploy-app/devx_apps_deploy-apps_delete-apps-view.png) - - - -Next, in the left **Main Menu**, click on the **Cluster** link to access the clusters page. - -Click on **cluster-1** to access its details page. Click on **Settings** from the details page to expand the settings menu. Click on **Delete** to delete the cluster. You will be asked to enter the cluster name to confirm the delete action. Type the cluster name to proceed with the delete step. Repeat this process for cluster-2. - - - -![Delete a cluster view with arrow](/tutorials/deploy-app/devx_apps_deploy-apps_delete-cluster-view.png) - - - - - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for **Force Delete**. To trigger a force delete, navigate to the respective cluster’s details page and click on **Settings**. Click on the **Force Delete Cluster** to delete the cluster. Palette will automatically remove clusters stuck in the cluster deletion phase for over 24 hours. - - - - -
- - - - -## Terraform Workflow - -The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider enables you to create and manage Palette resources in a codified manner by leveraging Infrastructure as Code (IaC). There are many reasons why you would want to utilize IaC. A few reasons worth highlighting are: the ability to automate infrastructure, improve collaboration related to infrastructure changes, self-document infrastructure through codification, and track all infrastructure in a single source of truth. If you need to become more familiar with Terraform, check out the [Why Terraform](https://developer.hashicorp.com/terraform/intro) explanation from HashiCorp. - -
- - - -As you go through the Terraform workflow, be aware that high-level concepts from Palette will not be discussed in-depth to optimize the reader experience and focus more on the Terraform concepts that apply to Palette. To better understand the mentioned Palette concepts, review the UI workflow where the concepts are explained in greater detail. - - - -
- - - - - - -Ensure Docker Desktop on your local machine is available. Use the following command and ensure you receive an output displaying the version number. - -
- -```bash -docker version -``` - -Download the tutorial image to your local machine. -
- -```bash -docker pull ghcr.io/spectrocloud/tutorials:1.0.4 -``` - -Next, start the container, and open a bash session into it. - -
- -```shell -docker run --name tutorialContainer --interactive --tty ghcr.io/spectrocloud/tutorials:1.0.4 bash -``` - -Navigate to the tutorial code. - -
- -```shell -cd terraform/hello-universe-tf/ -``` - -
- - - - -Open a terminal window to begin the tutorial and download the tutorial code from GitHub. - -
- -```shell -git@github.com:spectrocloud/tutorials.git -``` - -Change directory to the tutorial folder. - -
- -```shell -cd tutorials/ -``` - -Check out the following git tag. - -
- -```shell -git checkout v1.0.4 -``` - -Change directory to the tutorial code. - -
- -```shell -cd terraform/hello-universe-tf/ -``` - - -
-
- -Before you can get started with the Terraform code, you need a Spectro Cloud API key. -### API Key - -To create an API key, log in to Palette, and click on the user **User Menu** and select **My API Keys**. - -![Image that points to the user drop-down Menu and points to the API key link](/tutorials/deploy-app/devx_apps_deploy-app_create-api-key.png) - -Next, click on **Add New API Key**. Fill out the required input field, **API Key Name**, and the **Expiration Date**. Click on **Confirm** to create the API key. Copy the key value to your clipboard, as you will use it shortly. - - -### Initialize Terraform - -The tutorial folder contains several Terraform files that you should review and explore. Each file is named after the respective type of Palette resource it supports. Use the following list to gain a high-level overview of the files. - -
- -- **provider.tf** - the provider configuration and version of the provider. -- **inputs.tf** - contains all the Terraform variables and the default values used in the tutorial. -- **outputs.tf** - contains the output variables that are used to expose information. -- **data.tf** - all the data resources that are used to dynamically retrieve data from Palette. -- **virtual-clusters.tf** - the code for the virtual clusters that will be deployed in Palette. -- **application-profiles.tf** - contains the configurations that make up all the app profiles. -- **application.tf** - the configuration that creates a Spectro Cloud app and deploys the app into a virtual cluster. - -The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider requires credentials to interact with the Palette API. Export the API key as an environment variable so that the Spectro Cloud provider can authenticate with the Palette API. - -```shell -export SPECTROCLOUD_APIKEY=YourAPIKeyHere -``` - -Next, initialize the Terraform provider by issuing the following command. - -```shell -terraform init -``` - -```shell -Initializing the backend... - -Initializing provider plugins... - -Terraform has been successfully initialized! - -You may now begin working with Terraform. Try running "terraform plan" to see -any changes that are required for your infrastructure. All Terraform commands -should now work. - -If you ever set or change modules or backend configuration for Terraform, -rerun this command to reinitialize your working directory. If you forget, other -commands will detect it and remind you to do so if necessary. -``` - -The `init` command downloads all the required plugins and providers specified in **provider.tf** file. In the provider configuration, the scope or context of Palette is set. The provider is configured for the `Default` project, but you can change this value to point to any other projects you may have in Palette. - -
- -```hcl -terraform { - required_providers { - spectrocloud = { - version = ">= 0.11.1" - source = "spectrocloud/spectrocloud" - } - } -} - -provider "spectrocloud" { - project_name = "Default" -} -``` - -To deploy the first scenario, a single application container, you must first create a configuration for the virtual cluster. Look at the virtual cluster resources in **virtual-clusters.tf**, and check out the "cluster-1" resource. The resource specifies the cluster name, the cluster group id, the resource limits, and the tags that will apply to the cluster. - -
- -```hcl -resource "spectrocloud_virtual_cluster" "cluster-1" { - name = var.scenario-one-cluster-name - cluster_group_uid = data.spectrocloud_cluster_group.beehive.id - - resources { - max_cpu = 4 - max_mem_in_mb = 4096 - min_cpu = 0 - min_mem_in_mb = 0 - max_storage_in_gb = "2" - min_storage_in_gb = "0" - } - - tags = concat(var.tags, ["scenario-1"]) - - timeouts { - create = "15m" - delete = "15m" - } -} -``` - -The cluster group id is retrieved from the data resource `spectrocloud_cluster_group.beehive`. The data resource will query the Palette API and retrieve information about the specified cluster group, which is the *beehive* cluster group made available for all Palette users. This resource will create a new virtual cluster that is hosted in the *beehive* cluster group. - -
- -```hcl -data "spectrocloud_cluster_group" "beehive" { - name = var.cluster-group-name - context = "system" -} -``` - -Next, take a look at the **application-profiles.tf** file. The resource `spectrocloud_application_profile.hello-universe-ui` is the resource responsible for creating the app profile for the first scenario. There are several points of interest in this resource that you should be familiar with. Focus on these five key points: - -
- -1. The pack object represents a single tier or layer in the app profile. Inside the pack object, you define all the attributes that make up the specific layer of the app profile. - - -2. The type of app layer. This application is hosted on a container image. Therefore a container pack is specified. Instead of hard coding the value, the data resource `data.spectrocloud_pack_simple.container_pack` is specified. - - -3. A pack requires a registry id. To create the app profile, Terraform needs to know what registry is hosting the pack. For containers, you can use the `Public Repo` hosting most of the Palette packs. This time the data resource `data.spectrocloud_registry.public_registry` is specified to avoid hardcoding values. - - -4. The attribute `source_app_tier` is used to specify the unique id of the pack. All packs are assigned a unique id, including different versions of a pack. To ensure the correct pack is selected, the data resource `data.spectrocloud_pack_simple.container_pack` is used. - - -5. The `values` attribute is used to specify the properties of the specific service. In this case, the properties of the container such as the image name, ports, and service type, are specified. These properties can be provided as an extended string using the [Terraform Heredoc strings](https://developer.hashicorp.com/terraform/language/expressions/strings#heredoc-strings), or you can specify these values as a stringified JSON object. - - - - -```hcl -resource "spectrocloud_application_profile" "hello-universe-ui" { - name = "hello-universe-ui" - description = "Hello Universe as a single UI instance" - version = "1.0.0" - pack { - name = "ui" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT - pack: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" - postReadinessHooks: - outputParameters: - - name: CONTAINER_NAMESPACE - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.namespace - - name: CONTAINER_SVC - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.annotations["spectrocloud.com/service-fqdn"] - - name: CONTAINER_SVC_EXTERNALHOSTNAME - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].hostname - conditional: true - - name: CONTAINER_SVC_EXTERNALIP - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].ip - conditional: true - - name: CONTAINER_SVC_PORT - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: spec.ports[0].port - containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.single-container-image} - access: public - ports: - - "8080" - serviceType: load balancer - EOT - } - tags = concat(var.tags, ["scenario-1"]) -} -``` - - - - -A tip for gathering the required values to provide the `values` attribute is to visit the Palette console and create the app profile through the UI. During the app profile creation process, click on the API button to display the API payload. Review the payload's `values` attribute to find all of the properties of the service. You can copy the entire string and pass it to the resource `spectrocloud_application_profile` as an input for the `values` attribute. - - -![UI's ability to display the API object](/tutorials/deploy-app/devx_apps_deploy-apps_ui-api-display.png) - - -The last Terraform resource to review before deploying the application is located in the **application.tf** file. The resource `spectrocloud_application.hello-universe-ui` is what creates the *app*. In Palette, an app combines a virtual cluster and an app profile. When you deploy an app profile into a virtual cluster, you create an app. This resource points to the app profile `spectrocloud_application_profile.hello-universe-ui` and the cluster resource `spectrocloud_virtual_cluster.cluster-1`. The two resources are required to create an app. - -
- - - - - -```hcl -resource "spectrocloud_application" "scenario-1" { - name = "single-scenario" - application_profile_uid = spectrocloud_application_profile.hello-universe-ui.id - - config { - cluster_name = spectrocloud_virtual_cluster.cluster-1.name - cluster_uid = spectrocloud_virtual_cluster.cluster-1.id - } - tags = concat(var.tags, ["scenario-1"]) -} -``` - - - -You can preview the resources Terraform will create by issuing the following command. - -```shell -terraform plan -``` - -``` -// Output condensed for readability -Plan: 3 to add, 0 to change, 0 to destroy. -``` - -The output displays the resources Terraform will create in an actual implementation. If you review the output, you will find the three resources previously discussed in great detail. - -Go ahead and deploy the application by using the `terraform apply` command. - -```shell -terraform apply -auto-approve -``` - -``` -// Output condensed for readability -Apply complete! Resources: 3 added, 0 changed, 0 destroyed. -``` - -Log in to [Palette](https://console.spectrocloud.com), navigate to the left **Main Menu**, and select **Apps**. Click on the **scenario-1** row, which takes you to the application’s overview page. Once you are on the scenario-1 overview page, click on the exposed URL for the service. A hyperlink for port 8080 is available. - - -![scenario-1 overview page with an arrow pointing to the URL](/tutorials/deploy-app/devx_app_deploy-apps_scenario-1-overview.png) - -
- - - - -It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - -Welcome to Hello Universe, a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the global counter and for a fun image change. - - -![Hello Universe landing page displaying global clicks](/tutorials/deploy-app/devx_apps_deploy-apps_hello-universe.png) - - -You have deployed your first app profile to Palette. Your first application is a single container application with no upstream dependencies. In a production environment, you often deploy applications that consume other services and require connectivity with other resources. The following scenario expands on the single application scenario by adding an API server and Postgres database to simulate a common application architecture encountered in a production environment. - - -## Deploy Multiple Applications - - -The second scenario contains two additional microservices, an API, and a Postgres database. This time, instead of using a the global API for storing clicks, you will instead deploy your own API server and Postgres database. The following diagram illustrates the network connectivity path and behavior discussed. - - -![A diagram of the three-tier architecture where the load balancer forwards all requests to the UI container OR the API container](/tutorials/deploy-app/devx_apps_deploys-apps_reverse-proxy-diagram.png) - -To deploy the second scenario, you will again deploy the same three resource types previously discussed but another instance of them. - -- `spectrocloud_virtual_cluster` - `cluster-2` - this resource will create the second virtual cluster. - - -- `spectrocloud_application_profile` - `hello-universe-complete` - the application profile that will contain the three different services, database, API, and UI. - - -- `spectrocloud_application` - `scenario-2` - the application that will be deployed into cluster-2 that uses the `spectrocloud_application_profile.hello-universe-complete` app profile. - - -You can review all the resources for the second scenario in the respective Terraform files. You can find the second scenario code after the comment block in all of the files that have resources specific to the second scenario. - -```hcl -########################################## -# Scenario 2: Multiple Applications -########################################## -``` - - -From a Terraform perspective, there are no significant differences in the authoring experience. The main difference in the second scenario lies in the application profile resource `spectrocloud_application_profile.hello-universe-complete`. The other difference is that the virtual cluster you will deploy in the second scenario, cluster-2, is much larger than cluster-1. - -You can add multiple services to an app profile, but you must add a `pack {}` block for each service in the `spectrocloud_application_profile` resource. Take a close look at the `spectrocloud_application_profile.hello-universe-complete` resource below. - -
- -```hcl -resource "spectrocloud_application_profile" "hello-universe-complete" { - count = var.enable-second-scenario == true ? 1 : 0 - name = "hello-universe-complete" - description = "Hello Universe as a three-tier application" - version = "1.0.0" - pack { - name = "postgres-db" - type = data.spectrocloud_pack_simple.postgres_service.type - source_app_tier = data.spectrocloud_pack_simple.postgres_service.id - properties = { - "dbUserName" = var.database-user - "databaseName" = var.database-name - "databaseVolumeSize" = "8" - "version" = var.database-version - } - } - pack { - name = "api" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT -pack: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" -postReadinessHooks: - outputParameters: - - name: CONTAINER_NAMESPACE - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.namespace - - name: CONTAINER_SVC - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.annotations["spectrocloud.com/service-fqdn"] - - name: CONTAINER_SVC_EXTERNALHOSTNAME - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].hostname - conditional: true - - name: CONTAINER_SVC_EXTERNALIP - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].ip - conditional: true - - name: CONTAINER_SVC_PORT - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: spec.ports[0].port -containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.multiple_container_images["api"]} - access: public - ports: - - "3000" - serviceType: load balancer - env: - - name: DB_HOST - value: "{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}" - - name: DB_USER - value: "{{.spectro.app.$appDeploymentName.postgres-db.USERNAME}}" - - name: DB_PASSWORD - value: "{{.spectro.app.$appDeploymentName.postgres-db.PASSWORD}}" - - name: DB_NAME - value: counter - - name: DB_INIT - value: "true" - - name: DB_ENCRYPTION - value: "${var.database-ssl-mode}" - - name: AUTHORIZATION - value: "true" - EOT - } - pack { - name = "ui" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT - pack: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" - postReadinessHooks: - outputParameters: - - name: CONTAINER_NAMESPACE - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.namespace - - name: CONTAINER_SVC - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.annotations["spectrocloud.com/service-fqdn"] - - name: CONTAINER_SVC_EXTERNALHOSTNAME - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].hostname - conditional: true - - name: CONTAINER_SVC_EXTERNALIP - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].ip - conditional: true - - name: CONTAINER_SVC_PORT - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: spec.ports[0].port - containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.multiple_container_images["ui"]} - access: public - ports: - - "8080" - env: - - name: "API_URI" - value: "http://{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}:3000" - - name: "TOKEN" - value: "${var.token}" - serviceType: load balancer - EOT - } - tags = concat(var.tags, ["scenario-2"]) -} -``` - -Each service has its own `pack {}` and a set of unique properties and values. - -The database service block uses a different data resource, `data.spectrocloud_pack_simple.postgres_service`, to find the Postgres service. If you review the data resource, you will find a different type, `operator-instance`. The Postgres service uses a Postgres [operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) to manage the database inside the virtual cluster. - -
- -``` -data "spectrocloud_pack_simple" "postgres_service" { - name = "postgresql-operator" - type = "operator-instance" - version = "1.8.2" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -Inside the `pack {}` block, the database services uses the `properties` attribute instead of the `values` attribute. The `properties` values provided are the same properties you must fill out when creating the database service through the UI workflow. - -
- -```hcl - pack { - name = "postgres-db" - type = data.spectrocloud_pack_simple.postgres_service.type - source_app_tier = data.spectrocloud_pack_simple.postgres_service.id - properties = { - "dbUserName" = var.database-user - "databaseName" = var.database-name - "databaseVolumeSize" = "8" - "version" = var.database-version - } - } -``` - -If you go further down the app profile stack, you will find the `pack {}` object for the API. A good part of the content provided to the `values` attribute will be removed in the following code snippet to improve readability. Take a closer look at the `env` block inside the `containerService` section. The API server requires a set of environment variables to start properly, such as the database hostname, user, password, and more. The Postgres service lower in the app profile stack exposes output variables you can use to provide information to other services higher up in the app profile stack. - -The `env` section uses the output variables exposed by the Postgres service. Other environment variables specified will be populated during Terraform runtime because they reference Terraform variables. Palette will populate the environment variables referencing a Palette output variable at runtime inside the virtual cluster. - -
- -```hcl -pack { - name = "api" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT -pack: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" -postReadinessHooks: - outputParameters: - #.... - #... -containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.multiple_container_images["api"]} - access: public - ports: - - "3000" - serviceType: load balancer - env: - - name: DB_HOST - value: "{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}" - - name: DB_USER - value: "{{.spectro.app.$appDeploymentName.postgres-db.USERNAME}}" - - name: DB_PASSWORD - value: "{{.spectro.app.$appDeploymentName.postgres-db.PASSWORD}}" - - name: DB_NAME - value: counter - - name: DB_INIT - value: "true" - - name: DB_ENCRYPTION - value: "${var.database-ssl-mode}" - - name: AUTHORIZATION - value: "true" - EOT - } -``` - -The last `pack {}` block in the app profile resource `spectrocloud_application_profile.hello-universe-complete` is for the UI. Like the API service, environment variables are used to initialize the UI and the reverse proxy. The UI service requires the URL of the API service and the URL of the public-facing load balancer. Palette output variables are used to populate these two environment variables. A Terraform variable will populate the authentication token required for all API requests. - -
- -```hcl -pack { - name = "ui" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT - # .... - # .... - containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.multiple_container_images["ui"]} - access: public - ports: - - "8080" - env: - - name: "API_URI" - value: "http://{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}:3000" - - name: "TOKEN" - value: "${var.token}" - serviceType: load balancer - EOT - } -``` - - - - - -All container services expose their service address, Kubernetes hostname, and the exposed service ports as output variables. -You will use output variables frequently when creating app profiles in the future. You can learn more about connecting services by referring to the [Service Connectivity](/devx/app-profile/services/connectivity) documentation. - - - - -Open the **inputs.tf** file and set the `variable enable-second-scenario"` default value to `true`. - -
- -```terraform -variable "enable-second-scenario" { - type = bool - description = "Whether to enable the second scenario" - default = true -} -``` - -Next, issue the command `terraform apply` to deploy the second scenario. Notice how the `-var` flag is included with the token value in the command. - -
- -```shell -terraform apply -var="token=931A3B02-8DCC-543F-A1B2-69423D1A0B94" -auto-approve -``` - -``` -// Output condensed for readability -Apply complete! Resources: 3 added, 0 changed, 0 destroyed. -``` - -Log in to [Palette](https://console.spectrocloud.com) and navigate to the left **Main Menu**, click on **Apps**. Select the **scenario-2** row. When you are on the scenario-2 overview page, click on the exposed URL for the service. A hyperlink for port 8080 and port 3000 is available. - -![A view of the scenario-2 overview page](/tutorials/deploy-app/devx_apps_deploy_scenario-2-overview.png) - -Click on the UI’s service URL for port **8080** to access the Hello Universe application in a three-tier configuration. - -
- - - - -It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - -![View of the self-hosted hello universe app](/tutorials/deploy-app/devx_apps_deploy-app_self-hosted-hello-universe.png) - -The global counter is no longer available. Instead, you have a counter that starts at zero. Each time you click on the center image, the counter is incremented and stored in the Postgres database along with metadata. - - -## Cleanup - -To remove all resources created in this tutorial, issue the `terraform destroy` command. - -
- -```shell -terraform destroy -var="token=931A3B02-8DCC-543F-A1B2-69423D1A0B94" -auto-approve -``` - -```shell -Destroy complete! Resources: 6 destroyed. -``` - -
- -If you are using the tutorial container and want to exit the container, type `exit` in your terminal session and press the **Enter** key. Next, issue the following command to stop the container. - -
- -```shell -docker stop tutorialContainer && \ -docker rmi --force ghcr.io/spectrocloud/tutorials:1.0.4 -``` - -
- - - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for **Force Delete**. To trigger a force delete, navigate to the respective cluster’s details page and click on **Settings**. Click on the **Force Delete Cluster** to delete the cluster. Palette will automatically remove clusters stuck in the cluster deletion phase for over 24 hours. - - - -
- -
- -
- - -# Wrap-Up - -In this tutorial, you learned about Palette’s Dev Engine and App Mode. You deployed two virtual clusters, each containing a different architecture and configuration of the Hello Universe application. Palette’s Dev Engine enables developers to quickly deploy applications into a Kubernetes environment without requiring Kubernetes knowledge. In a matter of minutes, you deployed a new Kubernetes cluster and all its applications without having to write Kubernetes configuration files. - -To learn more about Palette Dev Engine and its capabilities, check out the references resource below. -
- -- [Palette Modes](/introduction/palette-modes) -- [Spectro Cloud Terraform Provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) -- [App Profiles](/devx/app-profile) -- [App Services](/devx/app-profile/services) -- [Palette Virtual Clusters](/devx/palette-virtual-clusters) -- [Hello Universe GitHub respository](https://github.com/spectrocloud/hello-universe) diff --git a/content/docs/04.5-devx/06-apps/05-logs.md b/content/docs/04.5-devx/06-apps/05-logs.md deleted file mode 100644 index cc2b8493ca..0000000000 --- a/content/docs/04.5-devx/06-apps/05-logs.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "App Logs" -metaTitle: "Palette Dev Engine App Logs" -metaDescription: "Download Palette application logs." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Application Logs - -Palette Dev Engine (PDE) provides access to application configuration and status logs for each application. The following files are available for download. - -| File | Description | -|---------------------------|-----------------------------------------------------------------------------------------------------------------------------| -| **cloudconfig.yaml** | The cluster configuration file, which contains Kubernetes specifications for the cluster. | -| **manifest.yaml** | The generated manifest file, which contains pack details. | -| **spec_description.yaml** | A file that contains metadata about clusters and applications. This file captures status probes and their results. | - - - - -To download cluster logs, navigate to the cluster's **Overview** page and select **Settings** > **Download Logs**. Select the log files you want to review. - - - - -# Download Application Logs - -Use the steps below to download application logs. The download bundle is a zip file containing all the log files. - - -## Prerequisites - -* A deployed application in app mode. - -* Access to view the application and its logs. - - -## Download Logs - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. If you are not already in App Mode, navigate to the **User Menu** and select **Switch to App Mode**. - - -3. On the left **Main Menu**, select **Apps**. - - -4. Select you application. - - -5. Click the **Actions** drop-down menu. - - -6. Select **Download Logs**. - - -7. A message displays with the download link. - - -8. Click the download link and review the files. - - -## Validate - -To review the logs, locate a zip file with file name format `[clusterNameHere]-logs-[currentTimeStamp]` in the downloads folder on your device. \ No newline at end of file diff --git a/content/docs/04.5-devx/07-palette-virtual-clusters.md b/content/docs/04.5-devx/07-palette-virtual-clusters.md deleted file mode 100644 index c7f6fb493b..0000000000 --- a/content/docs/04.5-devx/07-palette-virtual-clusters.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "Palette Virtual Clusters" -metaTitle: "Palette Dev Engine for Enterprise Developers" -metaDescription: "Explore Palette Dev Engine as Free Developers" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - - -## Palette Virtual Clusters - -Palette Virtual Clusters are Kubernetes clusters that run as nested clusters within an existing cluster (also known as a Host Cluster) or Host Cluster groups and share the host cluster resources, such as CPU, memory, and storage. By default, Palette Virtual Clusters will use k3s as virtual Kubernetes cluster, which is a highly available, certified Kubernetes distribution designed for production workloads. Palette Virtual Clusters are powered by [vCluster](https://www.vcluster.com/) - -The Palette platform provisions and orchestrates all Palette Virtual CLusters, making it simple to use the lightweight, Kubernetes technology stack and tools ecosystem. Deploy virtual clusters on Host Cluster Group by following the wizard and attaching Add-on profiles. - -### Create your Palette Virtual Cluster: - -To create your new Palette Virtual Cluster complete the following actions. - - -
- -1. Log in to the Palette Dev Engine console - -2. Select the `Palette Virtual Clusters` from the left ribbon menu, click `+ Palette Virtual Clusters,` and provide the following information to the app creation wizard. - * Select the Cluster Group: From the available host cluster group, select the cluster group to host the new virtual cluster. - * Palette virtual cluster name: Provide a custom virtual cluster name or go with the default name. - * Provide the resource limit in terms of CPU, memory, and storage. - -
- - |**Palette Virtual Cluster Resource ** | **Default ** |**Minimum Limit**| - |------------------------------|-------------------|-----------------| - | CPU (per request) | 4 | 3 | - | Memory (per request) | 4 GiB | 3 GiB | - | Storage (per request) | 2 GiB | 0 GiB | - -
- - -3. Review the information and deploy the Palette virtual cluster. The Palette virtual cluster will be provisioned within the next few minutes. - -
- -### Resource Tracking for Palette Virtual Clusters - -Palette users can track the available resources within a Cluster Group while launching a virtual cluster. The UI color codes give a rough estimation of available CPU, memory, and storage within the selected Cluster Group. The interpretations are as follows: - -
- -* **Grey**: Resources already in use. - - -* **Green**: The resources allocated for the Virtual Cluster under deployment. - - -* **White**: Resources available within the Cluster Group which can be utilized after deploying the new Virtual Cluster. - - -### Example Scenario - -The example screenshot below illustrates the following scenario. The Cluster Group selected in the example has a virtual cluster already running on it. The info box displays the recommended minimum CPU and memory allocated to the new virtual cluster. The color-coded bar summarizes the used, allocated, and available CPU, storage, and memory within the Cluster Group. Users can use this information to plan resource utilization per available resources. - -![color-tracking.png](/color-tracking.png) - - -## Palette Virtual Cluster Pause and Resume - -Palette allows the pause and resume of Palette Virtual Clusters when not in use. This feature enables the users to optimize resource utilization by pausing the virtual clusters not in use. This adds significant flexibility in managing operating costs and resource management for the Palette Virtual Clusters. - -
- -### System and Resource Impact - -* The quota allocation is independent of a virtual cluster's pause or resume status. - - -* The CPU and memory are freed and returned to the cluster group when you pause a virtual cluster. - - -* Resources such as storage, and load balancers remain allocated to a virtual cluster regardless of the state. - - -* The Apps deployed on a virtual cluster go to a pause state when the cluster is paused. - - -* New Apps cannot be deployed on a virtual cluster in the paused state. - - -* Virtual clusters in a paused state will continue to appear as an entry in the Palette Dev Engine Console. - - -[Click here to know How to Pause and Release your Palette Virtual Cluster](/devx/palette-virtual-clusters/pause-restore-virtual-clusters). - -
-
diff --git a/content/docs/04.5-devx/07-palette-virtual-clusters/00-pause-restore-virtual-clusters.md b/content/docs/04.5-devx/07-palette-virtual-clusters/00-pause-restore-virtual-clusters.md deleted file mode 100644 index c5cd53a704..0000000000 --- a/content/docs/04.5-devx/07-palette-virtual-clusters/00-pause-restore-virtual-clusters.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Pause and Resume Virtual Clusters" -metaTitle: "Pause and Resume Virtual Clusters" -metaDescription: "Learn how to pause and resume Palette Virtual Clusters." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Overview - -To optimize resource utilization, Palette allows you to pause and resume virtual clusters that are not in use. This adds significant flexibility in managing operating costs and resource management for virtual clusters. - -# Prerequisite - -* A Running [Palette Virtual Cluster](/devx/palette-virtual-clusters/pause-restore-virtual-clusters). - -# Pause and Resume a Palette Virtual Cluster - -Invoke the pause and resume operations from the Palette Console. - -1. Log in to the **Palette Dev Engine** console. - - -2. Navigate to the **Main Menu** and select **Palette Virtual Cluster** to be paused. - - -3. Go to the cluster details page by clicking the name of the virtual cluster to be paused. - - -4. Click **Settings** and select the **Pause** option. To resume a paused virtual cluster, select the **Resume** option. - -# Validate Pause/Resume - -You can verify the state of a cluster by reviewing the cluster details. To review the state of a cluster and its details, do the following steps. - -1. First, navigate to the left "Main Menu" on the Palette Dev Engine console and click on Virtual Clusters. - - -2. Click on the specific cluster you want to check the status. This will take you to the cluster detail page. On this page, look for a section titled Status. The status section displays the current state of the cluster. - - -3. The Palette Virtual Cluster shows the following cluster **Status**: - -* **Paused**: For a paused virtual cluster -* **Running**: For a resumed or running virtual cluster - -**Note:** The status of a Palette Virtual cluster can also be viewed against the cluster name, in the existing cluster listing page of Palette Dev Engine console. - diff --git a/content/docs/04.5-devx/07-palette-virtual-clusters/05-resize-virtual-clusters.md b/content/docs/04.5-devx/07-palette-virtual-clusters/05-resize-virtual-clusters.md deleted file mode 100644 index 38de0ecc82..0000000000 --- a/content/docs/04.5-devx/07-palette-virtual-clusters/05-resize-virtual-clusters.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: "Resize Virtual Clusters" -metaTitle: "Resize Virtual Clusters" -metaDescription: "Learn how to resize Palette Virtual Clusters" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Overview - -You can resize virtual clusters from the default size of 4 CPU, 4 GiB Memory, 2 GiB Storage to a size that does not exceed the system-level quota for a cluster group like Beehive or the user quota for tenant-level cluster groups. - -# Prerequisite - -* A running [Palette Virtual Cluster](/devx/palette-virtual-clusters/pause-restore-virtual-clusters). - -# Resize Virtual Clusters - - -Use the following steps to resize a virtual cluster. -
- -1. Log in to [Palette](https://console.spectrocloud.com). -
- -2. In App Mode, click **Virtual Clusters** in the **Main Menu**. -
- -3. Select the virtual cluster you want to resize, and click **Settings > Cluster Settings**. -
- -4. Click **Cluster Size** and specify new resource allocations for your virtual cluster. The size you specify cannot be greater than the system-level quota for a cluster group like Beehive or the user quota for tenant-level cluster groups. To learn more about resource quotas, refer to the [resource quota](/devx/manage-dev-engine/resource-quota) documentation. -
- -5. Save your changes. - - -# Validate - -To verify your changes, click **Virtual Clusters** in the left **Main Menu** and select the resized cluster. The virtual cluster Overview page displays the new **Allocated Quota** for the cluster. - - - - diff --git a/content/docs/04.5-devx/09-manage-dev-engine.md b/content/docs/04.5-devx/09-manage-dev-engine.md deleted file mode 100644 index 96bf86c8cc..0000000000 --- a/content/docs/04.5-devx/09-manage-dev-engine.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "Manage Dev Engine" -metaTitle: "Palette Dev Engine Management" -metaDescription: "Palette Dev Engine Management" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -You manage Palette Dev Engine (PDE) primarily through the [Cluster Group's Virtual Cluster](/clusters/cluster-groups) settings. You also can manage PDE through the resources shared below to help you customize the PDE environment to better fit your needs. - - - - -# Resources - -- [Resource Quotas](/devx/manage-dev-engine/resource-quota) - - -- [Dev Engine Registries](/devx/manage-dev-engine/registries) - - -- [Manage Single Sign-On (SSO)](/devx/manage-dev-engine/sso) - - - - - diff --git a/content/docs/04.5-devx/09-manage-dev-engine/04-resource-quota.md b/content/docs/04.5-devx/09-manage-dev-engine/04-resource-quota.md deleted file mode 100644 index 866e40a216..0000000000 --- a/content/docs/04.5-devx/09-manage-dev-engine/04-resource-quota.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: "Resource Quotas" -metaTitle: "Palette Dev Engine for Enterprise developers" -metaDescription: "Explore Palette Dev Engine as a Free developer" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Overview - -This section covers the available deployment environments for Palette Virtual Clusters and the resource quotas that apply to users and virtual clusters. - -# Available Environments - -Palette Dev Engine users have access to a Palette-managed cluster group named *beehive*. The beehive cluster group is also a *system-level cluster group*, meaning that Spectro Cloud manages it. The beehive cluster group falls under the free tier of Palette and comes with its own set of resource limits. All users are subject to the following resource quotas when using the beehive cluster group. - -| Type | Max Limit | Description | -|-----------------|-----------|-------------------------------------------------------------------------------------------------------| -| Virtual Cluster | 2 | Each user is allowed to deploy a total of two virtual clusters. | -| CPU | 12 | Each user is allowed to consume a total of 12 CPU. This limit spans both virtual clusters. | -| Memory | 12 Gib | Each user is allowed to consume a total of 12 GiB of Memory. This limit spans both virtual clusters. | -| Storage | 20 GiB | Each user is allowed to consume a total of 20 GiB of storage. This limit spans both virtual clusters. | - - -Palette administrators can remove the beehive cluster, and other system-level cluster groups for all downstream users by setting the tenant developer setting **Hide system-level cluster groups from tenant users** to **true**. When this setting value is **true**, the beehive cluster is not displayed in the cluster group drop-down menu when deploying Palette virtual clusters. - -![The deployment path for a user](/045-devx_resource-quota_is-beehive-enabled.png) - -You can change tenant developer settings by switching to the tenant scope and navigating from the left **Main Menu** to **Tenant Settings > Developers Settings**. Toggle the **Hide system-level cluster groups from tenant users** button. - - -# Virtual Cluster Resource Quota - -Virtual clusters inherit resource quotas from the parent cluster group. The cluster group's virtual cluster settings determine the maximum resources per virtual cluster, and can be used to limit the number of resources a virtual cluster can claim from the group. By default, each virtual cluster requires at least 4 CPU, 4 GiB of memory, and 2 GiB of storage. Keep the required minimum values in mind when deploying virtual clusters or when defining the cluster group's virtual cluster settings. - -|**Virtual Cluster** | **Minimum Limit**| -|------------------------------|-----------------| -|CPU (per request) | 4 | -|Memory (per request) | 4 GiB | -|Storage (per request) | 2 GiB | - - - - -A virtual cluster requires a minimum of 4 CPU, 4 GiB of memory, and 2 Gib of storage to launch successfully. The default settings in the cluster group virtual cluster configuration YAML file has the following values: - -```yaml -vcluster - resources: - limits: - cpu: 1000m - memory: 1Gi - ephemeral-storage: 1Gi - requests: - cpu: 200m - memory: 256Mi - ephemeral-storage: 128Mi -``` - -Increasing the limit and request values could result in a virtual cluster requiring more resources than the default values of CPU, 4 GiB Memory, and 2 Gib of storage. - - - -If a user attempts to create a virtual cluster that needs more resources than the cluster group allows, the request will be denied because it exceeds the cluster group's defined limits. - - -Refer to the [Create and Manage Cluster Groups](/clusters/cluster-groups/create-cluster-group) to learn more about adjusting cluster group's virtual cluster settings. - - -# User Resource Quotas - -All Palette users are subject to resource quotas. The two entities that impact a user's resource quotas when interacting with virtual clusters are the tenant developer user quotas and the cluster group virtual cluster settings. - -## Tenant Developer User Quotas - -The global user quotas that a Palette administrator has defined in the tenant developer settings are always evaluated first. The tenant user quotas define the maximum set of resources a user can claim. - -* Virtual clusters - -* CPU - -* Memory - -* Storage - -For example, assume the following tenant developer user quotas for four virtual clusters are defined as 20 CPU, 32 GiB of memory, and 60 GiB of storage. With these settings, all users could deploy four virtual clusters, each virtual cluster with a maximum size allowed by the cluster group limits. - -Users can also deploy a single virtual cluster that consumes 20 CPU, 32 GiB of memory, and 60 GiB of storage. In the latter example, the user cannot deploy additional clusters because CPU, memory, and storage resources are exhausted. - -
- - - -To change tenant user quotas, switch the scope to **Tenant Admin** and navigate from the left **Main Menu** to **Tenant Settings** > **Developer Settings**. In the **User Quota** section, you can adjust the maximum number of resources for users. - - - - -# Quota Evaluation - -Palette evaluates each virtual cluster creation request to verify the requesting user has enough resource quotas remaining based on the defined tenant user quota and if the virtual cluster request falls within the allowed limits of the parent cluster group. - -The following diagram displays the evaluation process Palette uses to determine the status of a virtual cluster creation request. - -![Order of flow when it comes to evaluating cluster requests](/045-devx_resource-quota_evaluation-process.png) - -To better understand this concept, use the following examples. - -* Tenant Developer User Quotas: - * Number of virtual clusters: 4 - * CPU: 20 - * Memory: 32 GiB - * Storage: 60 GiB -* Hide system-level cluster groups from tenant users: false - - -* Cluster Group *dev-special* Virtual Cluster Settings - * CPU (per requests): 8 - * Memory (per requests): 12 GiB - * Storage (per requests): 12 GiB - - -* User A's Current Resource Utilization - * 1 virtual Cluster in dev-special - * 8 CPU - * 12 GiB Memory - * 20 GiB of Storage - - -* User B's Current Resource Utilization - * 4 Virtual Cluster in dev-special - * 16 CPU - * 32 GiB Memory - * 60 GiB of Storage - - -#### Scenario 1 - -User A is creating a request to deploy a virtual cluster to the dev-special cluster group. The virtual cluster is requesting the following resources: -* 8 CPU -* 12 GiB Memory -* 20 GiB Memory - -**Request**: ✅ - -**Explanation**: Based on tenant user quota, user A has these remaining resources for two virtual clusters: 12 CPU, 20 GiB Memory, and 40 GiB of storage. Based on cluster group quota, user A is within the resource limits of the dev-special cluster group. - -
- -#### Scenario 2 - -User B is creating a request to deploy a virtual cluster to the dev-special cluster group. The virtual cluster is requesting the following resources: -* 4 CPU -* 8 GiB Memory -* 4 GiB Storage - -**Request**: ❌ - -**Explanation**: User B has exceeded the tenant user quota for four clusters. Based on cluster group quota, the virtual cluster request falls within the approved limits. - -
- -#### Scenario 3 - -User B is creating a request to deploy a virtual cluster to the beehive cluster group. The virtual cluster is requesting the following resources: -* 4 CPU -* 8 GiB Memory -* 4 GiB Storage - -**Request**: ✅ - -**Explanation**: The request is accepted because it targets a system-level cluster group, the beehive cluster group and not a cluster group managed by the tenant. Based on the cluster group quota, the number of requested resources falls within the within the approved limits of the system-level quota. - - -
- diff --git a/content/docs/04.5-devx/09-manage-dev-engine/08-registries.md b/content/docs/04.5-devx/09-manage-dev-engine/08-registries.md deleted file mode 100644 index 96a78dd70d..0000000000 --- a/content/docs/04.5-devx/09-manage-dev-engine/08-registries.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: "Dev Engine Registries" -metaTitle: "Palette Dev Engine for Enterprise Developers" -metaDescription: "Palette Dev Engine Registries" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Palette Dev Engine Registries - -The Pack registry is a server-side application that stores and serves packs to its clients. Packs from a pack registry are retrieved and presented as options during the creation of a cluster profile. Palette supports the configuration of multiple registries. - -## Default Registry -The default pack registry is Spectro Cloud's public pack registry. It consists of several packs that make it simple for a user to quickly create a cluster profile and launch a Kubernetes cluster with their choice of integrations. Spectro Cloud maintains all packs in the default pack registry, this includes taking care of upgrades in the pack registry whenever required. - -## Custom Pack Registry -Users can set up a custom pack registry using a Docker image provided by Spectro Cloud to upload and maintain custom packs. Spectro Cloud provides a [CLI tool](/registries-and-packs/spectro-cli-reference) to interact with and manage pack content in the pack registry. Custom registries offer a mechanism of extending the capabilities of a platform by defining additional integrations. - -Palette Dev Engine supports the following types of custom registries: - -* [Helm Registry](/registries-and-packs/helm-charts): Visit here for more information on Palette Helm Registry -* [OCI Registry](/registries-and-packs/oci-registry): Visit here for more information on Palette OCI Registry - - - -
-
- diff --git a/content/docs/04.5-devx/09-manage-dev-engine/30-sso.md b/content/docs/04.5-devx/09-manage-dev-engine/30-sso.md deleted file mode 100644 index 7d35c5eb34..0000000000 --- a/content/docs/04.5-devx/09-manage-dev-engine/30-sso.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Manage Single Sign-On (SSO)" -metaTitle: "Manage Single Sign-On (SSO)" -metaDescription: "Learn how to configure SSO for Palette Dev Engine." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Palette supports the ability to use Single Sign-On (SSO) and third-party Social Sign-In Providers, such as Google and GitHub. Use the following steps to either enable or disable the feature. - -# Enable SSO - -To enable SSO with third-party Social Sign-In Providers use the following steps. - -
- - - - -To learn more about the Sign-In Flow, refer to the [User Authentication](/user-management/user-authentication#signinflow) documentation. - - - - -## Prerequisites - -* Palette Tenant Administrator access. - - -## Enable SSO - -1. Log in to [Palette](https://console.spectrocloud.com) as a Tenant Admin. - - -2. Navigate to the left **Main Menu**, select **Tenant Settings**, and select **SSO**. - - -3. Next, click the **Auth Providers** tab and toggle the **Enable Provider Login** button on. - - - ![The Auth providers tenant settings page with an arrow toward the toggle button.](/devx_manage-dev-engine_sso_display-oidc-page.png) - - -4. Select one of the supported Social Sign-In providers, and confirm your change. - - - - -## Validate - -You can validate SSO is enabled by attempting to log into your Palette tenant through SSO. Select the third-party provider you enabled for SSO. - - -![Palette's login view with the SSO providers highlighted.](/devx_manage-dev-engine_sso_palette-login-view.png) - - -# Disable SSO - -Palette provides the flexibility to disable SSO to restrict this capability. Use the following steps to disable SSO for Palette. - - -## Prerequisites - -* Palette Tenant Administrator access. - - - -## Disable Steps - -1. Log in to [Palette](https://console.spectrocloud.com) as a Tenant Admin. - - -2. Navigate to the left **Main Menu**, select **Tenant Settings**, and select **SSO**. - - -3. Next, click the **Auth Providers** tab and toggle the **Enable Provider Login** button off. - - -4. Log out of Palette. - - -## Validate - -You can validate SSO is disabled by attempting to log into your Palette tenant through SSO. Any SSO attempts will fail due to SSO being disabled at the tenant level. - - - diff --git a/content/docs/04.6-vm-management.md b/content/docs/04.6-vm-management.md deleted file mode 100644 index 9466ee5283..0000000000 --- a/content/docs/04.6-vm-management.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: "Virtual Machine Orchestrator" -metaTitle: "Virtual Machine Orchestrator" -metaDescription: "Learn about the Palette Virtual Machine Orchestrator solution for managing containerized and virtualized applications." -icon: "server" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette Virtual Machine Orchestrator (VMO) provides a unified platform for managing containerized and virtualized applications. This solution allows organizations to onboard, deploy, manage, and scale VMs within the same cluster as their containerized applications. Palette VM Orchestrator simplifies managing infrastructure, improves resource utilization, and removes the cost of having a hypervisor. - - -![A drawing of VMs deployed to Palette](/docs_vm-mangement_vmo-diagram.png) - - -# Use Cases - -Palette VM Orchestrator is particularly suitable in the following scenarios: - -
- -- Organizations that want to remove their virtualization infrastructure due to an aging environment or to reduce costs. By using Palette VM Orchestrator, legacy applications and modern, containerized applications can be deployed on VMs. - - -- Organizations that want to consolidate their infrastructure and deploy both containerized and virtualized applications on the same Kubernetes cluster. - - -- Edge locations with a few VMs deployed and where a hypervisor is no longer desired. - - -# Prerequisites - -Palette Virtual Machine Orchestrator requires the following: - -
- -- Palette version 3.3.0 or higher. - -- For data centers, production VMs are supported on bare metal Kubernetes clusters deployed on Canonical MAAS. To learn how to configure MAAS and create MAAS clusters in Palette, refer to the [Install and Manage MAAS Gateway](/clusters/data-center/maas/install-manage-maas-pcg) guide. - -- To use VMO on Edge, contact our support team by sending an email to [support@spectrocloud.com](mailto:support@spectrocloud.com) - -- VMs with Persistent Volume Claim (PVC) must have a StorageClass that supports ``ReadWriteMany`` (``RWX``) access mode for seamless live migration to a different node - either when triggered manually or during a Kubernetes upgrades. - -
- - - - In environments that use nested virtualization, where VMs operate inside of VMs due to lack of hardware to host VMs, it is technically possible to operate VMs in Kubernetes by setting the KubeVirt resource ``useEmulation`` to true. However, we do not recommend this approach. - - - - -# Get Started With VM Orchestrator - -To get started, review [Virtual Machine Orchestrator Pack](/vm-management/vm-packs-profiles) to learn about its components. - -Review [Create a VMO Profile](/vm-management/vm-packs-profiles/create-vmo-profile) and [Add Roles and Role Bindings](/vm-management/vm-packs-profiles/add-roles-and-role-bindings) to learn how to create the cluster profile and add roles and permissions that allow users to create and manage Virtual Machines (VMs). - -Palette VM Orchestrator provides various methods to quickly deploy VMs from out-of-the-box templates or from your organization's templates. To learn more about using and creating templates, review [Deploy VM From a Template](/vm-management/create-manage-vm/standard-vm-operations/deploy-vm-from-template) and [Create a VM Template](/vm-management/create-manage-vm/create-vm-template). - - -# Feature Gates - -Palette VM Orchestrator utilizes open-source KubeVirt as a component of the **Virtual Machnine Orchestrator** pack to manage VMs and enables the following KubeVirt feature gates by default: - -
- -- LiveMigration -- Snapshot -- HotplugVolumes -- VMExport -- ExpandDisks -- HotplugNICs -- VMLiveUpdateFeatures - -KubeVirt offers other feature gates you may find useful and which you can enable using [Kubernetes feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). To enable more KubeVirt feature gates, you can modify the ``kubevirt.kubevirtResource.additonalFeatureGates`` parameter in the **Virtual Machine Orchestrator** manifest. - -For more information on KubeVirt feature gates, refer to the [KubeVirt user guide](https://kubevirt.io/user-guide/operations/activating_feature_gates/). - -# Resources - -- [Virtual Machine Orchestrator Pack](/vm-management/vm-packs-profiles) - - -- [Create a VMO Profile](/vm-management/vm-packs-profiles/create-vmo-profile) - - -- [Add Roles and Role Bindings](/vm-management/vm-packs-profiles/add-roles-and-role-bindings) - - -- [Create and Manage VMs](/vm-management/create-manage-vm) - - -- [Standard VM Operations](/vm-management/create-manage-vm/standard-vm-operations) - - -- [Deploy VM from a Template](/vm-management/create-manage-vm/standard-vm-operations/deploy-vm-from-template) - - -- [Create a VM Template](/vm-management/create-manage-vm/create-vm-template) - - -- [VM Roles and Permissions](/vm-management/vm-roles-permissions) - - -- [KubeVirt user guide](https://kubevirt.io/user-guide/operations/activating_feature_gates/) - -
- -
diff --git a/content/docs/04.6-vm-management/05-vm-packs-profiles.md b/content/docs/04.6-vm-management/05-vm-packs-profiles.md deleted file mode 100644 index 6fdd30b963..0000000000 --- a/content/docs/04.6-vm-management/05-vm-packs-profiles.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: "Virtual Machine Orchestrator Pack" -metaTitle: "Virtual Machine Orchestrator Pack" -metaDescription: "Learn about components of the Virtual Machine Orchestrator pack." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - - -# Overview - -The **Virtual Machine Orchestrator** pack provides a single-pack experience that consolidates all the dependencies needed to deploy and manage VMs in your Kubernetes host cluster. You use **Virtual Machine Orchestrator** pack to create a VMO cluster profile. The pack's components are described below. All the components are enabled by default in the `charts:` section of the pack YAML configuration file. - -
- -- **Spectro VM Dashboard**: Enables access to a web console so you can manage and monitor your VMs. The console is accessible from the **Virtual Machines** tab that appears on the cluster overview page when using Palette Virtual Machine Orchestrator (VMO). The dashboard provides a web interface to create and manage VMs in your Kubernetes cluster. - - -- **KubeVirt**: Allows you to create VMs within a Kubernetes cluster using open-source [KubeVirt](https://kubevirt.io). KubeVirt provides feature gates you can enable in the Virtual Machine Orchestrator pack YAML file. To learn which feature gates Palette enables by default and how you can enable additional feature gates, check out the [Feature Gates](/vm-management#featuregates) section. - - KubeVirt extends Kubernetes with additional virtualization resource types using Kubernetes Custom Resource Definitions (CRD) API. KubeVirt also includes controllers and agents that provide VM management capabilities on the cluster. Through KubeVirt you can use the Kubernetes API to manage VM resources similar to the way you manage Kubernetes resources. - - -- **KubeVirt CDI**: Provides persistent storage for Kubernetes clusters. It enables Persistent Volume Claims (PVCs) to be used as disks for KubeVirt VMs. - - -- **Volume Snapshot Controller**: A Kubernetes plugin that watches VolumeSnapshot CRD objects and manages the creation and deletion of volume snapshots. A snapshot represents a point-in-time copy of a volume. - - -- **Multus CNI**: A Controller Network Interface (CNI) plugin that enables multiple network interfaces to attach to Kubernetes pods. In this context, it is used to attach VM networks to the launched VM. - - - - -The **Spectro Proxy** pack enables the use of a reverse proxy with a Kubernetes cluster and is automatically installed when you create the cluster with the default **Proxied** setting for **Access** during cluster profile creation. Check out the [Spectro Proxy](/integrations/frp) pack documentation to learn more. - - - - -Administrators can configure the out-of-the-box add-on packs, cluster profiles, and VM templates that include commonly used operating systems, or they can define their own VM templates to share with users. - - -# Resources - - -- [Spectro Proxy](/integrations/frp) - - -- [Feature Gates](/vm-management#featuregates) - -
- -
\ No newline at end of file diff --git a/content/docs/04.6-vm-management/05-vm-packs-profiles/15-create-vmo-profile.md b/content/docs/04.6-vm-management/05-vm-packs-profiles/15-create-vmo-profile.md deleted file mode 100644 index fd15e9cc7d..0000000000 --- a/content/docs/04.6-vm-management/05-vm-packs-profiles/15-create-vmo-profile.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: "Create a VMO Profile" -metaTitle: "Create a VMO Profile" -metaDescription: "Learn how to create a cluster profile to utilize Palette Virtual Machine Orchestrator capabilities." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -The **Virtual Machine Orchestrator** pack that you use to create a cluster profile conveniently includes several components and automatically installs the [Spectro Proxy](/integrations/frp) pack when you use the default profile configuration. To learn about pack components, refer to [Virtual Machine Orchestrator Pack](/vm-management/vm-packs-profiles). - - -# Prerequisites - -- A Palette permission key `create` for the resource `clusterProfile`. - - -# Create the Profile - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Select **Profiles** in the left **Main Menu** and click the **Add Cluster Profile** button. - - -3. Enter basic information for the profile: name, version if desired, and optional description. - - -4. Select type **Add-on**, and click **Next**. - - -5. In the following screen, click **Add New Pack**. - - -6. Use the information below to find the **Virtual Machine Orchestrator** pack: - -
- - - **Pack Type**: System App - - **Registry**: Public Repo - - **Pack Name**: Virtual Machine Orchestrator - - **Pack Version**: 1.0 or higher - - -7. Review the **Access** configuration panel at right. The default setting is **Proxied**, which automatically adds the **Spectro Proxy** pack when you create the cluster. Check out the [Spectro Proxy](/integrations/frp) guide to learn more. Changing the default may require some additional configuration. - - The **Direct** option is intended for a private configuration where a proxy is not implemented or not desired. - -
- - - - We recommend using the pack defaults. Default settings provide best practices for your clusters. Changing the default settings can introduce misconfigurations. Carefully review the changes you make to a pack. - - - -8. Click **Confirm & Create**. - - -9. In the following screen, click **Next**. - - -10. Review the profile and click **Finish Configuration**. - - -11. Apply the profile to your cluster. - - - - -# Validate - -You can validate the profile is created. - -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to **Profiles** from the left **Main Menu**. - - -3. Locate the newly created profile in the list. - - -4. From the left **Main Menu**, click **Clusters** and select your cluster. - - -5. Based on your Single Sign-On (SSO) settings, the **Virtual Machines** tab may display on the **Cluster Overview** page, or the **Connect** button may display next to **Virtual Machines Dashboard** in cluster details. - - -# Next Steps - -You will need to configure roles and role bindings to give users access virtual clusters. You can use VM user roles and permissions or standard Kubernetes roles. For configuration guidance, refer to [Add Roles and Role Bindings](/vm-management/vm-packs-profiles/add-roles-and-role-bindings). The [VM User Roles and Permissions](/vm-management/vm-roles-permissions) reference lists Cluster Roles and equivalent Palette Roles. - - -# Resources - -- [Add Roles and Role Bindings](/vm-management/vm-packs-profiles/add-roles-and-role-bindings) diff --git a/content/docs/04.6-vm-management/05-vm-packs-profiles/20-add-roles-and-role-bindings.md b/content/docs/04.6-vm-management/05-vm-packs-profiles/20-add-roles-and-role-bindings.md deleted file mode 100644 index 09b6c4972c..0000000000 --- a/content/docs/04.6-vm-management/05-vm-packs-profiles/20-add-roles-and-role-bindings.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: "Add Roles and Role Bindings" -metaTitle: "Add Roles and Role Bindings" -metaDescription: "Learn how to configure user roles and cluster role bindings for Virtual Machines managed by Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -You must configure permissions for actions that users can perform on Virtual Machines (VMs) deployed using Palette Virtual Machine Orchestrator (VMO), such as cloning, updating, and migrating VMs. You can do this by creating roles and cluster role bindings to determine access permissions. Refer to [VM User Roles and Permissions](/vm-management/vm-roles-permissions) for a list of Cluster Roles and equivalent Palette Roles. To learn more about Cluster RBAC in Palette, review the [RBAC and NS Support](/clusters/cluster-management/cluster-rbac) guide. - - -# Prerequisites - -- A cluster profile with the **Virtual Machine Orchestrator** add-on pack configured. Check out the [Create a VMO Profile](/vm-management/vm-packs-profiles/create-vmo-profile) guide to learn more. - - -- Two defined cluster role bindings for every user: ``spectro-list-namespaces``and ``spectro-list-vmtemplates``. - - -- Additional cluster roles, based on the user's persona, must be associated with the user by specifying a cluster role binding or a namespace-restricted role binding: - -
- - - ``spectro-vm-admin`` - - - ``spectro-vm-power-user`` - - - ``spectro-vm-user`` - - - ``spectro-vm-viewer`` - - Alternatively, you can use standard Kubernetes roles ``cluster-admin``, ``admin``, ``edit``, and ``view`` instead of defining bindings based on ``spectro-vm-*`` roles. - - -- Assigned permissions to access Palette clusters. - - -# Add Roles and Role Bindings - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, click **Clusters** and select your cluster. - - -3. Click on **Settings** and choose **RBAC** to add role bindings. Refer to [Create a Role Binding](/clusters/cluster-management/cluster-rbac#createrolebindings) for guidance. Refer to [VM User Roles and Permissions](/vm-management/vm-roles-permissions) for a list of Cluster Roles and equivalent Palette Roles. - - -4. Click **Confirm** to update the cluster. - -The cluster status displays as **Upgrading** on the **Cluster Overview** page. Upgrading can take several minutes depending on your environment. You can track events from the **Events** tab. - - -# Validate - -You can verify role creation and role binding is successful by following the steps below. - -
- - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the cluster you created the role binding in to view its details page. - - -4. Download the **kubeconfig** file for the cluster or use the web shell to access the host cluster. - - -5. Use the following commands to review details about the role and to ensure the role binding was successful. - - -#### Cluster Role: - -```shell -kubectl get clusterrole --output yaml -``` - - -#### Role - -```shell -kubectl get role --namespace --show-kind --export -``` - - -# Next Steps - -Now you are ready to deploy a VM. Review the [Deploy VM From a Template](/vm-management/create-manage-vm/standard-vm-operations/deploy-vm-from-template) guide to get started with the deployment process. - - -# Resources - -- [VM User Roles and Permissions](/vm-management/vm-roles-permissions) diff --git a/content/docs/04.6-vm-management/10-create-manage-vm.md b/content/docs/04.6-vm-management/10-create-manage-vm.md deleted file mode 100644 index 91777c0b8a..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Create and Manage VMs" -metaTitle: "Create and Manage VMs" -metaDescription: "Learn methods to create VMs using Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette Virtual Machine Orchestrator (VMO) allows you to deploy and manage Virtual Machines (VMs) alongside containerized applications. - -# Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. - - -- Users or groups must be mapped to a Virtual Machine RBAC role. You can create a custom role through a manifest and use Palette's RoleBinding feature to associate users and groups with the role. Refer to the [Create Role Bindings](/clusters/cluster-management/cluster-rbac#createrolebindings) guide to learn more. - - -- A namespace for VMs. Although you can deploy VMs from the default namespace, we recommend creating at least one namespace dedicated to VMs as a way to organize and manage them. To learn how to create a namespace, check out [Create a Namespace](/clusters/cluster-management/namespace-management#createanamespace). - - -# VM Creation - -You can create a VM three ways: - -
- -- Deploy a VM from a template. Palette provides out-of-the-box templates, or your organization may provide templates. For the latter, refer to the [Create a VM Template](/vm-management/create-manage-vm/create-vm-template) guide. - - -- Create an empty VM and install the Operating System (OS) using a standard method, such as a Preboot Execution Environment (PXE) or optical disk image (ISO). - - -- Clone an existing VM. - -Administrators can also import VMs from their existing VMware vSphere environment into Palette. - -Although no additional components are required in VMs, the **QEMU Guest Agent** is an optional component that runs inside a VM and provides runtime information. - -Additionally, Virtio is a virtualization standard for network and disk device drivers where only the guest's device driver knows it is deployed in a virtual environment, and cooperates with the hypervisor. This enables guests to receive high performance network and disk operations and provides most of the performance benefits of paravirtualization. - -
- - - -We recommend installing the QEMU guest agent to display additional details in Palette Virtual Machine Orchestrator. We also recommend installing VirtIO drivers to ensure you can use the paravirtualized hardware properly. - - - -# Resources - -- [Standard VM Operations](/vm-management/create-manage-vm/standard-vm-operations) - - -- [Deploy VM from a Template](/vm-management/create-manage-vm/standard-vm-operations/deploy-vm-from-template) - - -- [Create a VM Template](/vm-management/create-manage-vm/create-vm-template) - - -
- -
- diff --git a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/04-deploy-vm-from-template.md b/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/04-deploy-vm-from-template.md deleted file mode 100644 index 9b520a1e47..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/04-deploy-vm-from-template.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: "Deploy VM From a Template" -metaTitle: "Deploy VM From a Template" -metaDescription: "Learn how to deploy a VM from a template using Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -You can deploy a Virtual Machine (VM) using Palette's out-of-the-box templates or from templates that your organization's administrator provides. - -# Prerequisites - -- Configured Virtual Machine Orchestrator profile applied to your cluster. Review [Create a VMO Profile](/vm-management/vm-packs-profiles/create-vmo-profile) to configure the dashboard. - -# Deploy VM from a Template - -These steps will help guide you to deploy a VM from an out-of-the-box VM template. - -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, click **Clusters** and select the cluster in which you deploy VMs. - - -3. Click the **Virtual Machines** tab. - - -4. Select the appropriate namespace from the **drop-down Menu**. - - -5. From the **Virtual Machines** tab that appears, click **New Virtual Machine**. - - -6. Click the **New Virtual Machine** button. Available templates are displayed based on supported Operating Systems (OS). - - -7. You can deploy from a template or create an empty VM as follows: - -
- - - To deploy from a template, select one of the VM templates. These can be Palette's out-of-the-box templates or templates that you or your administrator created. - -
- - - To create an empty VM, close the templates choice page and install the OS using a different method. - -
- -8. Give the VM a name and specify memory and CPUs. - - -9. Optionally, you can enable the checkbox to start the VM automatically after creation. - - -10. Click the **Next** button, which displays the YAML file. Tooltip help is available when you hover over lines in the file. - - -11. Review the YAML file and click the **Next** button when you are done. - - -12. Click **Create Virtual Machine**. - - -VM status will display as **Starting** for several minutes while the required resources are built and the image is pulled from the registry. If you did not enable the checkbox to start the VM automatically, VM status displays as **Stopped** until the VM is fully deployed. - -
- - - -VMs do not self-heal. If a VM is running on a node that fails, the VM is re-scheduled to a different node. Similar to live migration, to provide high availability, the disks should be ``ReadWriteMany`` so that they can be mounted on other nodes when the VM is restarting. - - - - -# Validate - -1. Log in to [Palette](https://console.spectroloud.com). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Select the host cluster that contains your VMs to view its details page. - - -4. Click on the **Virtual Machines** tab. - - -5. Review the list of VMs and ensure the new VM is displayed and has the status **Running**. - - -# Next Steps - -Try installing your applications. If you did not install the QEMU guest agent as part of the VM deployment, you can install it now. The guest agent displays additional details in the **Virtual Machines** > **Details** tab. - -You can update the VM configuration from the VM console or from tabs when you click on the VM. Learn about updates you can make in the [Update VM Configuration](/vm-management/create-manage-vm/standard-vm-operations/update-vm-configuration) guide. - -
- -
diff --git a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/05-update-vm-configuration.md b/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/05-update-vm-configuration.md deleted file mode 100644 index f230e13411..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/05-update-vm-configuration.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Update VM Configuration" -metaTitle: "Update VM Configuration" -metaDescription: "Learn how to add disk storage and network interfaces to a VM using Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - - -You can add storage and additional network interfaces to your virtual machines (VMs). - - -# Add Disk Storage - -KubeVirt allows hot plugging additional storage into a running VM. Both block and file system volume types are supported. - -## Prerequisites - -- A deployed VM. - -## Add a Disk - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, click **Clusters** and click on your cluster. - - -3. Navigate to **Virtual Machines** > **Disks** tabs and click the **Add disk** button. - - -4. Review the parameters and update as needed. You can specify the disk size, disk type (Disk, CD-ROM, or LUN), and network interface. - - The interface type determines out-of-the-box operating system (OS) support and disk performance. Choose from the following: - -
- - - **virtio**: Optimized for best performance, but the operating system may require additional Virtio drivers. - -
- - - **sata**: Most operating systems support Serial ATA (SATA). However it offers lower performance. - -
- - - **scsi**: A paravirtualized Internet Small Computer System Interface (iSCSI) HDD driver that offers similar functionality to the virtio-block device but with some additional enhancements. In particular, this driver supports adding hundreds of devices and names devices using the standard SCSI device naming scheme. - - -5. Click **Add** when you are done. - -## Validate - -The **Disks** tab lists the newly added disk as ``PersistingHotplug``. - -
- -# Add Network Interfaces - -You can add additional network interfaces to a VM. By default, VMs use the native networking already configured in the pod. Typically, this means using the Bridge option, and your VM has the same IP address as the pod. This approach makes interoperability possible. The VM can integrate with different cases like sidecar containers and pod masquerading. - -When using pod masquerading, you choose a CIDR for which VMs are not assigned a private IP, and instead use Network Address Translation (NAT) behind the pod IP. - -Multus is a secondary network that uses Multus-CNI. Multus allows you to attach multiple network interfaces to pods in Kubernetes. If you use Multus as your network, ensure that Multus is installed across your cluster and that you have created a default ``NetworkAttachmentDefinition`` CRD. For more information, refer to the [Multus CNI](/integrations/multus-cni) guide. - - -## Prerequisites - -- A deployed VM. - -## Add an Interface - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, click **Clusters** and click on your cluster. - - -3. Navigate to **Virtual Machines > Network Interfaces** and click the **Add network interface** button. - - -4. Review the parameters and update as needed. Interface types are: **Masquerade**, **Bridge**, and **SR-IOV**. - - -5. Click **Add** when you are done. - - -
- - - -Multus allows hot plugging network interfaces only when interfaces use the **virtio** model connected through bridge binding. - - - -## Validate - -The **Network Interfaces** tab lists the newly added interface. - -# Resources - -- [Multus CNI](/integrations/multus-cni) diff --git a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/10-migrate-vm-to-different-node.md b/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/10-migrate-vm-to-different-node.md deleted file mode 100644 index b4913f4ca2..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/10-migrate-vm-to-different-node.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: "Migrate a VM" -metaTitle: "Migrate a VM to a Different Node" -metaDescription: "Learn how to migrate a VM to another physical host in the cluster using Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette supports virtual machine (VM) migration to another physical host in the cluster. This is known as *live migration*. During live migration, the VM and its memory, storage, and CPU resources are moved from one cluster compute node to another without any noticeable downtime. - -Successful live migrations rely on appropriately configured storage and networking, and live migration must be enabled as a feature gate. Live migration is enabled by default in the ``feature-gates`` section of the KubeVirt configuration file that is part of the **Virtual Machine Orchestrator** pack. Refer to [Feature Gates](/vm-management#featuregates) for more information. - -Live migration is used with rolling Kubernetes upgrades and workload balancing. To avoid interrupting a VM when a node is placed into maintenance or upgraded, all VM instances require a ``LiveMigrate`` eviction strategy. - - -# Prerequisites - -- All VM instances must have an eviction strategy set as `evictionStrategy: LiveMigrate` to ensure that a VM is not interrupted if the node is placed into maintenance. This is configured automatically in the KubeVirt configuration file. If needed, you can override the default setting by configuring `spec.template.spec.evictionStrategy`. - - -- VMs that use Persistent Volumes must have shared ``ReadWriteMany`` (``RWX``) access. For more information, refer to the [Persistent Volume Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) Kubernetes resource. VMs that do not use persistent storage, such as containerDisks, do not require modifications for live migration. - - -- A VM’s pod network cannot use a Bridge interface. Disable the default Bridge interface on the pod network. However, other interfaces such as those that Multus grants, may use a bridge interface for live migration. - - -# Migrate VM to a Different Node - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, choose **Clusters** and click on your cluster. - - -3. Click on the **Virtual Machines** tab. - - -4. Select the VM to migrate and click either the **three-dot Menu** or **Actions**. - - -5. Click **Migrate Node to Node**. - - - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, click **Clusters** and select the cluster. - - -3. Navigate to the **Virtual Machines** tab, and click the VM you migrated. - - -4. Click the **Details** tab, and verify that the name and IP address of the new node is changed. - - -# Evacuate a Host - -Compute nodes can be placed into maintenance mode using Palette or manually using the `cordon` and `drain` commands. The `cordon` command marks the node as un-schedulable and the `drain`command evacuates all the VMs and pods from it. This process is useful in case you need to perform hardware maintenance on the node - for example to replace a disk or network interface card (NIC) card, perform memory maintenance, or if there are any issues with a particular node that need to be resolved. To learn more, check out the [Safely Drain a Node](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/#use-kubectl-drain-to-remove-a-node-from-service) Kubernetes resource. - - -## Prerequisites - -- Ensure `LiveMigrate` is set as the eviction strategy for all affected VMs. When the host is put in maintenance mode, this feature allows for a smooth and uninterrupted migration process. - - -## Evacuate VMs in Palette - - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, choose **Clusters** and click on the **Nodes** tab. - - -3. Click the **three-dot Menu** in the row of the node you want to evacuate and select **Turn on maintenance mode**. This evacuates all workloads from the node to other nodes in the worker pool. - - -4. Turn off maintenance mode by clicking the **three-dot Menu** in the row of the evacuated node and select **Turn off maintenance mode**. - -
- - - - Maintenance mode reduces cluster capacity. Be sure to turn off maintenance mode after maintenance completes. - - - - -## Validate - -You can validate evacuation completed by following the steps below. - -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, choose **Clusters**. - - -3. Verify the **Health** column displays the **Maintenance mode: Completed** icon. - - - -## Evacuate VMs Manually - -
- -1. Obtain the kubeconfig file from Palette, and set the KUBECONFIG environment variable to access it so you can issue kubectl commands to the cluster. To learn how, refer to [Set up Kubectl](https://docs.spectrocloud.com/clusters/cluster-management/palette-webctl/#setupkubectl). - - -2. Issue the following command to mark the node as *un-schedulable*. This alerts the Kubernetes scheduler not to schedule any new pods on that node but allows existing pods on the node to continue to operate. - -
- - - Example: - ```bash - kubectl cordon - ``` - - **node-name**: The name of the node that should be marked as *un-schedulable*. - - -3. Issue the following command to gracefully remove all pods from the node that is undergoing maintenance. When you drain a node, all pods and VMs will be safely evicted from the node. - - Example: - ```bash - kubectl drain - ``` - - **node-name**: The name of the node that you wish to drain. - -
- - - - The kubectl `drain` command should only be issued to a single node at a time. - - - - -## Validate - - -1. Using kubectl, log in to a machine that has access to the kubernetes cluster. - - -2. Issue the following command to verify the pods are rescheduled on a different node by verifying the name and IP address of the new node changed. - -
- - ```bash - kubectl get pods --output wide - ``` - - -# Resources - -- [Persistent Volume Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) - - -- [Safely Drain a Node](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/#use-kubectl-drain-to-remove-a-node-from-service) - - -
- -
- diff --git a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/15-take-snapshot-of-vm.md b/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/15-take-snapshot-of-vm.md deleted file mode 100644 index 21ccd2961a..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/15-take-snapshot-of-vm.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "Take a VM Snapshot" -metaTitle: "Take a Snapshot of the VM" -metaDescription: "Learn how to snapshot a VM using Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -A snapshot is a copy of a virtual machine's (VM) disk file at a given point in time. Snapshots provide a change log for the virtual disk and are used to restore a VM to a particular point in time. - -You can take a snapshot of a VM that is online (**Running** state) or offline (**Stopped** state). When you take a snapshot of an active VM, the controller checks for the QEMU guest agent in the VM. If the guest agent is present, the controller freezes the VM file system before it takes the snapshot and unfreezes the file system afterwards. This provides for crash consistency. - -
- - - -For optimal snapshots, we recommend taking snapshots of online VMs that have the QEMU Guest Agent installed. If the guest agent is not installed, a best effort snapshot is taken. - -To check whether the VM has the ``qemu-guest-agent`` active, look for ``AgentConnected`` in the **Virtual Machines > Snapshots** tab. The ``vmSnapshot Status`` will display if the snapshot was taken online and with or without guest agent participation. - - - -
- -You can take a snapshot of an online VM that has hotplugged disks. Only persistent hotplugged disks will be included in the snapshot. Only disks with a snapshot-supported storage class defined are included in snapshots. If no eligible disk is found, the **Snapshot** action is not possible. - -# Prerequisites - -- A deployed VM. - - -# Take a Snapshot - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, click **Clusters** and select your cluster. - - -3. Navigate to **Virtual Machines > Snapshots**, and click the **Take snapshot** button. - - - - -In some situations, such as with the Fedora operating system, SELinux on the guest prevents the QEMU guest agent from quiescing the target filesystem. As a workaround, you can do one of the following: - -- Generate an appropriate local security module that permits `qemu-ga` to operate correctly. This is the preferred workaround. - - -- Turn off SELinux **Enforcing** mode before the snapshot by issuing the `setenforce 0` command as the root user. Enforcing can be re-enabled after the snapshot using the `setenforce 1` command. - - - - -The **Snapshots** tab displays the ``vmSnapshot Status`` parameter with snapshot phases for the VM: **InProgress**, **Succeeded**, or **Failed**. - -The default time for a snapshot is five minutes. If the snapshot has not successfully completed within that time, it's status will display as **Failed**. The VM will be unfrozen and the snapshot content will be cleaned up if necessary. The snapshot will remain in **Failed** state until you delete it. You can change the default snapshot time to meet your workload requirements. - -
- - - -Snapshots should not be used as a backup method, as running a VM on a snapshot for extended periods of time can cause instability and data loss. - - - -# Validate - -1. From the **Snapshots** tab, verify the ``vmSnapshot Status`` parameter displays **Succeeded**. - - -2. If the snapshot status displays as **Failed**, delete the snapshot and take a new one. You may need to change the default snapshot time in the VM configuration. - - - - - - - diff --git a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/20-clone-vm.md b/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/20-clone-vm.md deleted file mode 100644 index 05ab1007b9..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm/05-standard-vm-operations/20-clone-vm.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "Clone a VM" -metaTitle: "Clone a VM" -metaDescription: "Learn how to clone a VM from a template using Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - - -A VM clone is a copy of an existing, or parent, virtual machine (VM). The cloned VM has the same configuration settings and identifiers as the parent VM. After you clone a VM, it as a separate virtual machine. - -Cloning is a quick way to create a new virtual machine that shares the same properties as the parent. You may want to clone a VM for the following reasons: - -
- -- Software testing - developers can clone an active VM to test new changes to their code. - - -- Forensics - security administators can clone an infected machine and connect it to an air-gaped network to investigate the source of the infection while the parent VM can be destroyed or remediated. - - -# Prerequisites - -There are no requirements. - -# Clone a VM - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, click **Clusters** and click on your cluster. - - -3. Select the VM to clone and click either the **three-dot Menu** or **Actions** - - -4. Power off the parent VM and click **Clone**. If you forget to power it off, the parent VM will automatically be powered off while cloning is in progress. - - -5. Give the clone a name, give an optional description, and select a namespace. - - -6. Optionally, you can enable the checkbox to start the cloned VM automatically when cloning is complete. - - -# Validate - -From the **Virtual Machines** tab, verify the cloned VM is listed and displays **Running** status. - - - - - - diff --git a/content/docs/04.6-vm-management/10-create-manage-vm/10-standard-vm-operations.md b/content/docs/04.6-vm-management/10-create-manage-vm/10-standard-vm-operations.md deleted file mode 100644 index 7abc9a6325..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm/10-standard-vm-operations.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: "Standard VM Operations" -metaTitle: "Standard VM Operations" -metaDescription: "Learn about standard VM operations that you can perform using Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - - -# Overview - -Palette Virtual Machine Orchestrator (VMO) supports standard VM power operations: - -
- -- **Start** - - -- **Stop** - - -- **Pause** - - -- **Restart** - - -From the **Virtual Machines** tab, you can select a VM to view its details and perform standard VM operations, such as live migration (vMotion), snapshots, and cloning. VM operations are accessible from the **three-dot Menu** at the right of each listed VM. - -When you select a VM from the **Clusters** > **Virtual Machines** tab, the following tabs display. Tabs are specific to the selected VM. - -
- -- **Overview**: Provides general information about the VM, such as its IP address, operating system, creation date and time zone, status, active users, whether the guest agent is installed or not, the quantity of Network Interface Cards (NIC) and disks, and any recent events. - - -- **Details**: Provides additional VM details such as labels associated with the VM, pod information, scheduling and resource requirements, and CPU and memory. If the QEMU Guest Agent is not installed, **Not Available** displays in place of details that would otherwise be available to you. - - -- **YAML**: You can review and change the VM configuration from here. - - -- **Events**: Displays streaming events in the VM. Any standard operations you perform on the VM are captured here. - - -- **Console**: Allows you to access and interact with the VM through its console. If you are not using a template, you can configure the VM using the console. - - -- **Network Interfaces**: Allows you to add and manage network interfaces. By default, the Pod Networking interface is a masquerade type interface, or in simple terms, it's a one-to-many IP address translation. You can change this to be a Bridge or other interface type. - - -- **Disks**: Allows you to add and manage disks. You can update the disk size, specify type `Disk`, `CD-ROM`, or `LUN`, and specify the interface `virtuo`, `sata`, or `scsi`. By default, `spectro-storage-class` is applied to the disk. - - -- **Snapshots**: Allows you to take a new snapshot of a VM's disk file at a given point in time and manage existing snapshots. - - - - -# Resources - -- [Deploy VM From a Template](/vm-management/create-manage-vm/standard-vm-operations/deploy-vm-from-template) - - -- [Update VM Configuration](/vm-management/create-manage-vm/standard-vm-operations/update-vm-configuration) - - -- [Migrate VM to a Different Node](/vm-management/create-manage-vm/standard-vm-operations/migrate-vm-to-different-node) - - -- [Take a VM Snapshot](/vm-management/create-manage-vm/standard-vm-operations/take-snapshot-of-vm) - - -- [Clone a VM](/vm-management/create-manage-vm/standard-vm-operations/clone-vm) - - - - -
- -
\ No newline at end of file diff --git a/content/docs/04.6-vm-management/10-create-manage-vm/15-create-vm-template.md b/content/docs/04.6-vm-management/10-create-manage-vm/15-create-vm-template.md deleted file mode 100644 index 9e2f643ff0..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm/15-create-vm-template.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: "Create a VM Template" -metaTitle: "Create a VM Template" -metaDescription: "Learn how to create a VM template using Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Although Palette provides out-of-the box templates, we recommend that you create and manage your own templates. - -# Prerequisites - -- Valid YAML that defines your VM template. - -# Create a VM Template - -Create a template by adding a YAML file as a manifest in an add-on profile. - -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. From the left **Main Menu**, click **Profiles** and click the **Add Cluster Profile** button. - - -3. Give the profile a name, select type **Add-on**, and click **Next**. - - -4. On the Profile Layers page, click **Add Manifest**. - - -5. Give the layer a name, and click **Edit manifest** and enter a name for the first template. Click the checkmark icon. - - -6. In the blank manifest file at right, enter the VM definition as a YAML file. You can add multiple manifests for multiple templates in the same add-on profile. They will display as layers in the profile. - - -7. Click **Confirm and Create**, then click **Next**. - - -8. Click **Finish Configuration**. - -
- -#### Example YAML for a VM template - - -```yaml -apiVersion: spectrocloud.com/v1 -kind: VmTemplate -metadata: - labels: - app.kubernetes.io/name: fedora-36 - app.kubernetes.io/instance: fedora-36-instance - app.kubernetes.io/part-of: vmtemplate - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: vmtemplate - name: fedora-36 -spec: - description: This is Fedora 36 image - displayName: Fedora 36 - icon: "https://s3.amazonaws.com/manifests.spectrocloud.com/logos/fedora.png" - running: false - template: - spec: - domain: - cpu: - cores: 1 - devices: - disks: - - name: containerdisk - disk: - bus: virtio - - name: cloudinitdisk - disk: - bus: virtio - interfaces: - - name: default - masquerade: {} - resources: - requests: - memory: 1Gi - cpu: 1 - limits: - memory: 2Gi - cpu: 2 - networks: - - name: default - pod: {} - volumes: - - name: containerdisk - containerDisk: - image: gcr.io/spectro-images-public/release/vm-dashboard/os/fedora-container-disk:36 - - name: cloudinitdisk - cloudInitNoCloud: - # user name is fedora - userData: | - #cloud-config - ssh_pwauth: True - chpasswd: { expire: False } - password: spectro - disable_root: false - packages: - qemu-guest-agent - runcmd: - - ["sudo", "systemctl", "enable", "--now", "qemu-guest-agent"] -``` - - -# Validate - -1. Navigate to the left **Main Menu** and click **Profiles**. - - -2. Verify your newly added manifest is listed. - -# Next Steps - -Try applying the template to your cluster. Navigate to **Clusters** and click `+` next to Addon Layers, then select the VMO profile you created. \ No newline at end of file diff --git a/content/docs/04.6-vm-management/10-create-manage-vm/20-access-cluster-with-virtctl.md b/content/docs/04.6-vm-management/10-create-manage-vm/20-access-cluster-with-virtctl.md deleted file mode 100644 index d7c5bb1620..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm/20-access-cluster-with-virtctl.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Access VM Cluster with virtctl" -metaTitle: "Set up virtctl" -metaDescription: "Set up KubeVirt virtctl to facilitate VM operations in Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -The virtctl command-line interface (CLI) tool facilitates some of the virtual machine (VM) operations you will perform by providing convenient commands for copying and pasting into the virtual console, starting and stopping VMs, live migrating VMs, and uploading VM disk images. - -The virtctl CLI also provides a lightweight Secure Copy Protocol (SCP) client with the `virtctl scp` command, which you can use to transfer files to and from a VM. Its usage is similar to the ssh command. - - -# Prerequisites - -- An active virtual cluster with Palette Virtual Machine Orchestrator (VMO). - - -- Access to the virtual cluster. - - - -# Download and Connect virtctl - -1. Download the most recent virtctl artifact based on your machine type from the official [KubeVirt Assets](https://github.com/kubevirt/kubevirt/releases/tag/v0.60.0-alpha.0). Scroll down to the **Assets** section. - - -2. Assign the execute permission to the virtctl command. - -
- - ```shell - chmod +x virtctl - ``` -
- -3. Next, log in to [Palette](https://console.spectrocloud.com) to connect your host cluster with the virtctl CLI. - - -4. Navigate to the left **Main Menu** and select **Clusters**. - - -5. Select the cluster you want to connect to. - - -6. From the cluster overview page, navigate to the middle column containing cluster details and locate the **Kubernetes Config File** row. - - -7. Click on the kubeconfig link to download the file. - - -8. Open a terminal window and set the KUBECONFIG environment variable to the file path of the kubeconfig file. - - Example: - ```shell - export KUBECONFIG=~/Downloads/dev-cluster.kubeconfig - ``` - -
- -9. Issue the `virtctl ssh ` or `virtctl vnc ` command to display the login screen. - - Example: - ```shell - virtctl ssh ubuntu - ``` - -
- -You can now issue virtctl commands against the VM in your Kubernetes cluster. - -# Validate - -Verify you have access to your virtual machine by issuing virtctl commands against it, as shown in the example below. - -
- -```bash -virtctl guestosinfo -``` \ No newline at end of file diff --git a/content/docs/04.6-vm-management/10-create-manage-vm/25-vm-oversubscription.md b/content/docs/04.6-vm-management/10-create-manage-vm/25-vm-oversubscription.md deleted file mode 100644 index 1764e2b4be..0000000000 --- a/content/docs/04.6-vm-management/10-create-manage-vm/25-vm-oversubscription.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "VM Performance" -metaTitle: "VM Performance" -metaDescription: "Learn how to improve VM performance by maximizing virtual machine CPU and memory using Palette." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette Virtual Machine Orchestrator (VMO) allows administrators to oversubscribe the physical resources on a host to maximize the number of active workloads. - -VM workloads typically have varying resource demands and peak utilization patterns. By oversubscribing resources, it is possible to allocate them flexibly and take advantage of the fact that not all VMs will require their maximum allocation simultaneously. - -The hypervisor automatically overcommits CPU and memory. This means that more virtualized CPU and memory can be allocated to VMs than there are physical resources on the system. - -
- -## CPU Overcommit - -Kubevirt offers the `cpuAllocationRatio` in its Custom Resource Definitions (CRD). This ratio is used to normalize the amount of CPU time the pod will request based on the number of virtual CPUs (vCPUs). - -Using the following algorithm, when `cpuAllocationRatio` is set to 1, the full amount of vCPUs are requested for the pod: `pod CPU request = number of vCPUs * 1/cpuAllocationRatio`. - -The `cpuAllocationRatio` is global, so setting it to greater than 1 has the effect of requesting less CPU from Kubernetes for each VM. - -Certain workloads that require a predictable latency and enhanced performance would benefit from obtaining dedicated CPU resources. KubeVirt relies on the Kubernetes CPU manager to pin vCPUs to the physical host’s CPUs. To learn more, refer to [Dedicated CPU Resources](https://kubevirt.io/user-guide/virtual_machines/dedicated_cpu_resources/) and [Resources Requests and Limits](https://kubevirt.io/user-guide/virtual_machines/virtual_hardware/#resources-requests-and-limits) Kubevirt documentation. - - -
- - - -- We do not recommend overcommitting CPUs in a production environment without extensive testing. Applications that use 100 percent of processing resources may become unstable in overcommitted environments. - - -- Ensure you don't overcommit guest VMs on more than the physical number of processing cores. For example, a guest VM with four vCPUs should only be deployed on a host physical machine with a quad-core processor instead of a dual-core processor. - - We recommend no more than 10 total allocated vCPUs per physical processor core. - - - -
- -## Memory Overcommit - -KubeVirt allows you to assign more or less memory to a VM than a VM requests to Kubernetes. You may want to overcommit VM memory if you have a cluster or a few nodes that are dedicated to running VMs. In this case, overcommitting memory makes use of all the memory in the nodes regardless of reserved or requested memory from the system. - -To learn about options for memory overcommitment, refer to [Node Overcommit](https://kubevirt.io/user-guide/operations/node_overcommit/) KubeVirt resource. - -You can make several changes to reduce the memory footprint and overcommit the per-VMI memory overhead. - -
- -- Enable guest overhead overcommit by setting `spec.domain.resources.overcommitGuestOverhead` to true. - - -- Enable guest memory by setting `spec.domain.memory.guest` to a value higher than `spec.domain.resources.requests.memory`, as shown in the example. - -```yaml - apiVersion: kubevirt.io/v1alpha3 - kind: VirtualMachineInstance - metadata: - name: testvmi-nocloud - spec: - terminationGracePeriodSeconds: 30 - domain: - resources: - overcommitGuestOverhead: true - requests: - memory: 1024M - memory: - guest: 2048M -``` - -
- -- Enable implicit memory overcommit by setting `spec.configuration.developerConfiguration.memoryOvercommit` in the KubeVirt CRD to a percentage of the desired memory overcommit. - -# Resources - -- [Dedicated CPU Resources](https://kubevirt.io/user-guide/virtual_machines/dedicated_cpu_resources/) - - -- [Resources Requests and Limits](https://kubevirt.io/user-guide/virtual_machines/virtual_hardware/#resources-requests-and-limits) - - -- [Node Overcommit](https://kubevirt.io/user-guide/operations/node_overcommit/) \ No newline at end of file diff --git a/content/docs/04.6-vm-management/15-vm-roles-permissions.md b/content/docs/04.6-vm-management/15-vm-roles-permissions.md deleted file mode 100644 index 986f8d7080..0000000000 --- a/content/docs/04.6-vm-management/15-vm-roles-permissions.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "VM User Roles and Permissions" -metaTitle: "VM User Roles and Permissions" -metaDescription: "Learn about roles and permissions to apply to VMs when using Palette Virtual Machine Orchestrator." -icon: " " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -You must configure roles and role binding before any user, including you as administrator, can access Palette Virtual Machine Orchestrator (VMO). There are two sets of roles: Cluster Roles and Palette Roles, along with the required bindings configuration. - -Palette provides the following four out-of-the-box Cluster roles for Palette Virtual Machine Orchestrator. The table also lists the corresponding Palette roles. - -
- -| Cluster Role | Description | Restrictions | Palette Role | -|-----------|-------------|-----------|-----------| -| ``spectro-vm-admin`` | Has admin privileges to manage the Kubernetes cluster, VMs, and templates.| None | Cluster Admin or
Editor

Cluster Profile Admin or Editor

Virtual Machine Admin -| ``spectro-vm-power-user`` | Can perform most VM operations, but does not handle infrastructure aspects. | Cannot manage or administer the
Kubernetes cluster.

Cannot manage or update VM templates. | Cluster Viewer

Virtual Machine Power User | -| ``spectro-vm-user`` | Primarily uses VMs created by others. | Cannot launch new VMs or clone existing ones.

Cannot delete VMs.

Cannot migrate VMs from one node to another. | Cluster Viewer

Virtual Machine User -| ``spectro-vm-viewer`` | A view-only role. | Cannot perform any of the operations offered to the above users.| Cluster Viewer

Virtual Machine Viewer - - -
- - - -These roles are currently only relevant to access Palette Virtual Machine Orchestrator APIs. To access the Virtual Machines console, users must have permissions to access the host clusters. These permissions can be granted through the [default Kubernetes roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings) Admin/Editor/Viewer. - - - -
- -You can create additional roles based on the permissions granularity that Palette offers. Palette provides the ability to specify bindings to configure granular Role-Based Access Control (RBAC) rules. - -
- - -You can configure namespaces and RBAC from within a cluster or from a Palette workspace that contains a cluster group. In a cluster group, all RoleBindings must occur at the namespace level. For details, review the [Cluster RBAC](/clusters/cluster-management/cluster-rbac/) and [workspace RBAC](/workspace/#rolebasedaccesscontrol(rbac)) guides. - -Palette leverages Regex Pattern matching so you can select multiple namespaces to apply role bindings. Check out [Regex for Namespaces](/workspace/workload-features) to learn more. - -
- -
- - - - diff --git a/content/docs/05-workspace.md b/content/docs/05-workspace.md deleted file mode 100644 index cdbc0f606c..0000000000 --- a/content/docs/05-workspace.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "Workspaces" -metaTitle: "Creating Workspaces for Spectro Cloud Clusters" -metaDescription: "The methods of creating Workspaces" -icon: "workspaces" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette extends its multi-cluster management and governance capabilities by introducing **Workspaces**. Workspaces enable the logical grouping of clusters and namespaces to provide application or team-specific governance and visibility into workloads, cost, and usage metrics. For example, the application or team workload may be deployed into namespaces across clusters to achieve High Availability (HA), Disaster Recovery (DR), organization-specific placement policies, etc. Grouping such namespaces and clusters into a workspace provide central management and governance in a multi-cluster distributed environment. The following sections describe various aspects of multi-cluster management via workspaces. - -# Namespace Management - -Workspaces automate the creation and deletion of namespaces common to all clusters within the workspace. A workspace can hold a set of namespaces. Spectro Cloud Palette will periodically reconcile the workspace definition and add/remove namespaces if required from all clusters part of the workspace. - -# Quota Control - -Usage quota in terms of CPU and memory usage limits is specified within the namespaces. Spectro Cloud Palette sets the specified limits across all the clusters in the namespaces. - -# Role Based Access Control(RBAC) - -Role bindings and cluster role bindings are specified within workspaces. Furthermore, these role bindings and cluster role bindings are created in every cluster within the workspaces, thus enabling centralized RBAC. - -# Utilization - -Spectro Cloud Palette reports detailed resource utilization of workloads deployed in all the namespaces in the workspace across clusters. In addition, the CPU and memory usage trends within the workspace provide valuable insights into the consumption patterns of an application distributed across clusters. - -# Cost Attribution - -Spectro Cloud Palette computes utilization costs for workloads deployed in all the namespaces that are part of the workspace across all the clusters based on the detailed resource utilization data. This can be used for internal charge-back or show-back purposes to determine the cost incurred by an application or team. - -# Workload Visibility - -Workspaces provide a workload browser to view all the workloads such as pods, deployment, jobs, stateful sets, etc., deployed in all the namespaces that are part of the workspace across all the clusters. The workload browser aggregates resources across clusters from relevant namespaces and presents them with centralized visibility. - -# Backup and Restore - -A workspace-based backup is similar to a cluster backup, with the additional coverage of multiple clusters, should the workspace include more than one. The prerequisites and detailed instructions to backup and restore clusters are specified on the [Clusters page](/clusters/#manage_clusters). - - -# Regex for Namespaces - -Palette leverages [Regex Pattern matching](/workspace/workload-features#regexfornamespaces) to select multiple namespaces to apply Role binding concurrently. When we have many namespaces to be configured for role binding, the user can provide a Regex pattern matching multiple namespaces instead of giving a single namespace. This will help select all the namespaces matching the given Regex pattern to be selected together for role binding. - - -
-
diff --git a/content/docs/05-workspace/1-adding-a-new-workspace.md b/content/docs/05-workspace/1-adding-a-new-workspace.md deleted file mode 100644 index ecd13d0644..0000000000 --- a/content/docs/05-workspace/1-adding-a-new-workspace.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: "Adding a Workspace" -metaTitle: "Adding a workspace" -metaDescription: "How to create multi-cluster workspace in Palette" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - - -# Overview - -Palette enables multi-cluster management and governance capabilities by introducing Workspaces. This section explains how a workspace can be created in the Palette console. - -# Prerequisites - - * One or more running workload clusters within the project. - * Cluster must not be imported with read-only mode. - * RBAC should not be set at cluster level but to be included at workspace level. - * Palette Virtual Clusters cannot be part of the workspace. - -# Create Your Workspace - -#### 1. Add the Basic Information -Provide the basic information for the workspace such as: - -* Unique Name -* Optional Description -* Optional Tag - -#### 2. Associate Clusters - - * Select the cluster(s) to be added to the workspace. (See [New Clusters](/clusters) to learn how to add a new Cluster.) Palette clusters, as well as brownfield clusters, can be added to your workspace. - - - * Configure the Cluster Role Binding (optional). Role bindings can be created on all workspace clusters. - - As step 2 of the new Workspace creation, select **Add Cluster Role Binding**. - - Provide the name of the role for which the cluster role binding needs to be created. The role should be pre-existing or an in-built system role. Palette does not create cluster roles. - - Subjects for the cluster role binding can be groups, users, or service accounts. - - | **Subject Type** | **Subject Name** | **Subject Namespace** | - | ---------------- | ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | - | **User** | a valid path segment name | NA | - | **Group** | a valid path segment name | NA | - | **Service Account** | a valid path segment name | Granting super-user access to all service accounts
cluster-wide is strongly discouraged. Hence, grant a
role to all service accounts in a namespace. | - - -#### 3. Associate Namespaces - -* Enter one or more namespaces that need to be part of the workspace. The combination of workspace and cluster is unique across workspaces in a project. Palette ensures that all the namespaces are created for all the clusters in the workspaces, in case they are not pre-existing. - - -* Add the resource quota for the namespaces by specifying CPU and Memory limits (optional). - - -* Configure the Role Binding (optional). The following information is required for each role binding: - * Select a namespace name or the Regex for namespaces for selecting multiple namespaces. - * Specific name for the role which is pre-existing - * Make the selection of Subjects from the dropdown list (User, Group, or ServiceAccount). For the subject selected, provide a valid path segment name. For the subject, ServiceAccount select namespace name as granting super-user access to all service accounts cluster-wide is strongly discouraged due to security concerns. - * Confirm the information provided to complete the configuration of role binding. - -#### 4. Settings - - -* [Schedule Backups](/clusters/cluster-management/backup-restore#createaworkspacebackup) - set the backup and restore policies. - -* [Container Image](/workspace/workload-features#restrictedcontainerimages) - list out the container images to be restricted within a Workspace namespace. - -#### 5. Review and finish the configuration and complete the deployment. - - diff --git a/content/docs/05-workspace/2-workload-features.md b/content/docs/05-workspace/2-workload-features.md deleted file mode 100644 index 9230671726..0000000000 --- a/content/docs/05-workspace/2-workload-features.md +++ /dev/null @@ -1,587 +0,0 @@ ---- -title: "Workspace Management" -metaTitle: "The additional features to optimize workspace performance" -metaDescription: "How to get unified view of workloads in logically grouped namespaces and clusters" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - - -# Manage Palette Workspace - -Palette supports several day 2 operations to manage the end-to-end lifecycle of the Kubernetes clusters through Workspaces. It also provides several capabilities across new and imported clusters to keep your clusters secure, compliant, up to date, and perform ongoing management operations like Backup and Restore. Additionally, you can have visibility into the workloads running inside your cluster and cluster costs. - -The following sections describe these capabilities in detail: - -
- ------------------------- - - - - - -# Workload Visibility - -Workspace provides visibility into workloads deployed across clusters. - -|**Resource**|**Description availed from Workspace**| -|---|-----| -|**Namespaces**|Cluster Specific namespaces with CPU and Memory utilization.| -|**Pods**|Lists all the pods running on a particular namespace with cluster names with the detailed health status, age, and resource utilization of each of them.| -|**Deployments**|All the running deployments specific to clusters belonging to the Workspace with namespace to which these deployments belong, pods details, replicas, and age are enumerated| -|**DaemonSets**|DaemonSet resource utilization is described, with details on namespaces, pods, and age of individual Daemon sets| -|**StatefulSets**|All the active StatefulSets specific to clusters belonging to the Workspace with corresponding namespace, pods details, replicas, and age are enumerated| -|**Jobs**|A Job creates one or more Pods and will continue to retry execution of the Pods until a specified number of them successfully terminate.| -|**CronJobs**|Cron Jobs are regularly scheduled actions or jobs such as backups, report generation, etc. Each of these jobs will recur as scheduled.| -|**RoleBinding**|A role binding grants the permissions defined in a role to a user or set of users. | -|**ClusterRoleBinding**|A Cluster Role binding defines the permissions defined across a cluster.| - - - - - -# Workspace Backup and Restore - -Palette users can create cluster backups from within a workspace (usually consisting of multiple clusters) and restore them later time as desired. Palette allows granular controls within a workspace for users to perform specific tasks within the workspace, without having the ability to update workspace details. To provide granular access within a workspace for specific actions, Palette provides the following two Roles: - -## Workspace Operator - -Users assigned the **Workspace Operator** Role can only perform Backup and Restore actions within the Workspace. - -## Workspace Admin - -A Role that has all administrative permissions and privileges within the Workspace. - -## Create your Workspace Roles - -To create your **Workspace Role**, follow the steps below: - -1. Log in to the Palette Management Console as **Tenant Admin**. - - -2. Go to the **Users and Teams** option. - - -3. From the listed users, select the user to be assigned with Workspace Roles. See here for [User Creation](/projects/#projects). - - -4. Select the **Workspace Roles** tab and click **+ New Workspace Role** to create a new role. - - -5. Fill the following information into the **Add Roles to User-Name** wizard: - * Project - * Workspace - * Choose the role from the options: - * Workspace Admin - * Workspace Operator - - -6. Confirm the information provided to complete the wizard. - - -7. The user set with the Workspace Role can take Workspace-wide Backups and Restores in compliance with their permissions and privileges. - -Palette leverages the BackUps to the following locations: - -
- -#### Amazon Web Services (AWS) S3 Buckets: [Prerequisites](/workspace/workload-features#foranamazonwebservices(aws)bucketasbackuplocation), [Configure your Backup](/workspace/workload-features#configureyourbackupinawss3) - -#### Google Cloud Platform (GCP) Buckets: [Prerequisites](/workspace/workload-features#foragooglecloudplatform(gcp)backuplocation), [Configure your Backup](/workspace/workload-features#configureyourbackupingcpbucket) - -#### MinIO S3 Buckets: [Prerequisites](/workspace/workload-features#forminios3backup), [Configure your Backup](/workspace/workload-features#configureyourbackupinminio) - -#### Azure Blob: [Prerequisites](/workspace/workload-features#forazureblobbackup), [Configure your Backup](/workspace/workload-features#configureyourbackupinazure:azureblob) - -# Prerequisites - -## For an Amazon Web Services (AWS) Bucket as Backup Location - -* The AWS S3 permissions listed in the next section need to be configured in the AWS account to provision Backup through Palette. - -* Pre-create a bucket at the AWS or MinIO object-store. - -## For a Google Cloud Platform (GCP) Backup Location - -* GCP service account with a **Storage Admin** role. - -* Pre-create a bucket at the GCP object storage. - -## For MinIO S3 Backup - -* S3 bucket with Read/Write Access - -* A unique access key (username) and corresponding secret key (password) from MinIO Console. - -* Service provider certificate (Optional) - -## For Azure Blob Backup - -* An active Azure cloud account with the following pieces of information noted down: - * Tenant Id - * Client Id - * Subscription Id - * Client Secret created - - -* An [Azure storage account](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-create?tabs=azure-portal) created with the following information to be noted down for Palette use: - * Storage Name: Custom name given to the Azure storage created. - * Stock-keeping unit - - -* A container to be created in the Azure Storage account - -# Backup Locations - -AWS Simple Cloud Storage (S3) and other S3 compliant object stores such as MinIO and GCP Buckets are currently supported as backup locations. These locations can be configured and managed under the **Project** > **Settings** option and can be selected as a backup location, while backing up any cluster in the project. - -## Configure your Backup in AWS S3 - -The following details are required to configure a backup location in AWS: - -1. **Location Name** - Name of your choice. - - -2. **Location Provider** - AWS (This is currently the only choice on the UI. Choose this option when backing up to AWS S3 or any S3 compliance object store). - - -3. **Certificate** - Required for MinIO. - - -4. **S3 Bucket** - S3 bucket name must be pre-created on the object-store. - - -5. **Configuration** - region={region-name},s3ForcePathStyle={true/false},s3Url={S3 URL}. S3 URL need not be provided for AWS S3. - - -6. **Account Information** - Details of the account which hosts the S3 bucket to be specified as Credentials or STS. - * Credentials - Provide access key and secret key. - * STS - Provide the ARN and External ID of the IAM role that has permission to perform all S3 operations. The STS role provided in the backup location should have a trust set up with the account used to launch the cluster itself and should have the permission to assume the role. - - -7. Palette mandates the AWS S3 Permissions while users use the static role to provision worker nodes. - -### AWS S3 Permissions - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeVolumes", - "ec2:DescribeSnapshots", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:CreateSnapshot", - "ec2:DeleteSnapshot" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:PutObject", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploadParts" - ], - "Resource": [ - "arn:aws:s3:::BUCKET-NAME/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::BUCKET-NAME" - ] - } - ] - } - - ``` - -### Trust Setup Example - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::141912899XX99:root" - }, - "Action": "sts:AssumeRole", - "Condition": {} - } - ] - } - ``` - -## Configure your Backup in GCP Bucket - -These locations can be configured and managed from the **Settings** option under **Project** and can be selected as a backup location while backing up any cluster in the project. - -The following details are required to configure a backup location in GCP: - -1. **Location Name** - Name of your choice. - - -2. **Location Provider** - Google Cloud (Choose this option when backing up to the GCP bucket object store). - - -3. **Bucket** - The name of the bucket name pre-created on the object store. - - -4. **JSON Credentials** - For external authentication of the GCP storage. - - -## Configure your Backup in MinIO - -The following details are required to configure a backup location in AWS: - -1. **Location Name**: Name of your choice. - - -2. **Location Provider**: Minio - - -3. **Certificate**: Optionally required for MinIO. - - -4. **S3 Bucket**: S3 bucket name must be pre-created on the MinIO object-store. - - -5. **Region**: Region in which the S3 bucket is created. Example: us-east-1 - - -6. **S3 URL**: Url of the MinIO object storage console. Example: `http://12.123.234.567:0000' - - -7. **Force S3 path style** : To force S3 pathstyle addressing or else the url will be converted to virtual-hosted style addressing with bucket name appended to the url.This is an optional setting. - - -8. **Authenticate** using MinIo access key and secret access key. - - -9. Click **Create** to complete the location creation wizard. - -## Configure your Backup in Azure: Azure Blob - -The following details are required to configure a backup location in Azure: - -1. **Location Name**: A custom name for the storage location getting created. - - -2. **Location Provider:** Select **Azure** from the drop-down. - - -3. **Container Name:** The container created in Azure Storage. - - -4. **Storage Name**: Name of the Azure storage created. - - -5. **Stock-Keeping Unit**: Information from the Azure storage. - - -6. **Resource Group:** Azure Resource Group name - - -7. **Tenant ID:** Azure Account Credential. - - -8. **Client ID:** Azure Account Credential. - - -9. **Subscription ID**: Azure Account Credential. - - -10. **Client Secret:** Secret created in the Azure console needs to be validated. - - -11. Click **Create** to complete the location creation wizard. - - -## Add a Backup Location - -Go to **Project Settings** > **Backup locations** > **Add a New Backup location**. - -# Create a Workspace Backup - -Backups can be scheduled or initiated in an on demand basis, during the workspace creation. The following information is required for configuring a Workspace Backup, on demand- - -1. **Backup Prefix / Backup Name**: For scheduled backup, a name will be generated internally, add a prefix of our choice to append with the generated name. For an on demand backup, a name of user choice can be used. - - -2. Select the Backup location. - - -3. **Backup Schedule** - Create a backup schedule of your choice from the dropdown list, applicable only to scheduled backups. - - -4. **Expiry Date** - Select an expiry date for the backups. The backup will be automatically removed on the expiry date. - - -5. **Include all disks** - Optionally, backup persistent disks as part of the backup. - - -6. **Include Cluster Resources** - Select or deselect on your choice. - - -|On Demand Backup | -|-------------------| -|Select the **Workspace to Backup** > **Settings** > **Schedule Backups**| - - -|Scheduled Backup | -|-----------------| -|**Workspace Creation** > **Policies** > **Backup Policies**| - - -## Backup Scheduling Options - -Both the cluster and workspace backup support the following scheduling options: - -* Customize your backup for the exact month, day, hour, and minute of the user's choice. -* Every week on Sunday at midnight -* Every two weeks at midnight -* Every month on the 1st at midnight -* Every two months on the 1st at midnight - -# Restore a Backup - -Backups created manually or as part of the schedule are listed under the Backup/Restore page of the cluster. - -1. Restore operation can be initiated by selecting the restore option for a specific backup. - - -2. Next, you will be prompted to select a target cluster where you would like the backup to be restored. The progress of the restore operation can be tracked from the target cluster's Backup/Restore page. - - -3. Finally, restore operations can be done to the cluster running on the same project. - - - -## Restore Your Backup - -To initiate a restore operation: -
- -1. Log in to the Palette console as the **Project Admin** and go to **Workspaces** page. - - -2. Select the **Workspace Name** to be restored. - - -3. From the selected Workspace overview, select **Backups** from the top menu. - - -4. The Backup option lists all the backups scheduled for the selected Workspace. Towards the name of the backup, click the meatball (three horizontal dots) button to open the restore wizard. - - -5. Click on the **Restore Backup** option to complete the wizard: - * Choose of the namespaces to be restored - * Three options are available to filter the resources to be restored: - * **Include Cluster Resources** - To restore all the cluster scoped resources. - * **Preserve Node Ports** - To preserve ports for node port service running in the cluster. - * **Restore PVs** - To restore the persistent volumes. - - **Note**: Check **Include Cluster Resource** and **Restore PVs** options together. - - -6. Make the appropriate choice of resources as per user requirements to complete the wizard. - - -
- - - - -# Workspace Quota - -Palette enables the users to limit resource usage within the workspace optionally. The Quota is specified in terms of the maximum CPU and memory. Therefore, the resource utilization within the namespace should be below the Quota allocated across all the clusters. - -
- -## To set your Resource Quota: - -1. During [Step: 3 Associate Namespaces](/workspace/adding-a-new-workspace#3.associatenamespaces) of Namespace creation, **Workspace Quota** can be set by giving the **Maximum CPU** and **Maximum Memory**. Then, all the clusters launched within the Namespace can use the set Quota. - - -2. Namespace Quota can be set for an already deployed workspace as: - `Workspace Settings -> Namespaces -> Workspace Quota` - -### Workspace Quota Notes: - -* The quota allocated to the workspace scope is split across all the namespaces under that workspace per their resource requirements. - - -* The palette allows quotas to be allocated to individual namespaces under a specific workspace. In that case, individual clusters belonging to that namespace can utilize the quota per their resource requirements. When a namespace is allocated with a quota, all the clusters belonging to that namespace get allocated with that resource quota individually. - - **Example**: If Namespace palette-ns belongs to two (2) clusters, p1 and p2, and palette-ns is allocated a quota of 1 CPU and 1 Gb memory, each of p1 and p2 gets allocated 1 CPU and 1 GB memory individually. - - -* Palette allows quota to be allocated to individual clusters under a specific workspace. In that case, the allocated quota should not exceed the namespace quota. - - -* To set an unlimited quota, set the quota value as -1. - * If -1 is set as the quota for a cluster, then we cannot set a quota for the workspace to which the cluster belongs. - * If -1 is set as the quota for a Workspace, then we cannot set a quota for the clusters belonging that Workspace. - - -
- - -# Regex for Namespaces - -Palette leverages Regex Pattern matching to select multiple namespaces to apply Role binding concurrently. When we have many namespaces to be configured for role binding, the user can provide a Regex pattern matching multiple namespaces instead of giving a single namespace. This will help select all the namespaces matching the given Regex pattern to be selected together for role binding. - -## Use Cases - -1. A Regex pattern that start and end with " / ", will select all the workspace names matching the given Regex pattern. - - **Example:** `/^palette-ns/` -
- -2. A Regex pattern that starts with `negation symbol(~)`, will select all the namespaces that *does not match* with the regex expression given. - - **Example:** `~/^(kube|cluster|capi|jet|cert)[-].+/` - -**Note**: No spaces to be added between the `~` operator and the `expression`. - -
- - - -# Workspace Role Binding - -Workspace Role Binding is a Project scope operation. There are two available options for setting up Roll Binding for a Workspace: - -* **Cluster** to create a RoleBinding with cluster-wide scope (ClusterRoleBinding). - - -* **Namespaces** to create a RoleBinding within namespaces scope (RoleBinding). - -Palette users can choose role creation based on their resource requirements. - -## Configure cluster role bindings - -* Login to Palette as Project admin and select the Workspace to which the Role Binding need to configured. - - -* Select Settings -> Cluster - - -* Select the clusters from the workspace to Role Bind. - - -* Click on “Add new binding” to open the “Add Cluster Role Binding” wizard. Fill in the following details: - * Role Name: Define a custom role name to identify the cluster role - * Subjects: Subjects are a group of users, services, or teams using the Kubernetes API. It defines the operations a user, service, or a team can perform. There are three types of subjects: - * Subject Type: - * Users: These are global and meant for humans or processes living outside the cluster. - * Groups: Set of users. - * Service Accounts: Kubernetes uses service accounts to authenticate and authorize requests by pods to the Kubernetes API server. These are namespaced and meant for intra-cluster processes running inside pods. - * Subject Name: Custom name to identify a subject. -A single RoleBinding can have multiple subjects. - - -* “Confirm” the information to complete the creation of the ClusterRoleBinding. - -## Configure role bindings: Namespace Scope - -Users can now allocate CPU and Memory [quotas](/workspace/workload-features#workspacequota) for each **namespace** at the cluster level. - -* Login to Palette as Project admin and select the Workspace to which the Role Binding need to be configured. - - -* Select Cluster Settings -> Namespace. - - -* Create a namespace with a custom name and add it to the list of the namespace by clicking on “add to the list”. - - -* [Allocate resources](/workspace/workload-features#workspacequota) to the created namespace (CPU and Memory). - - -* Click on “Add new binding” to open the “Add ClusterRoleBinding” wizard. Fill in the following details: - * Namespace: Select the namespace from the drop-down (the list will display the namespaces created during the previous step. - * Role Type: Select the role type from the drop-down. Either Role or Cluster Role. - - -A RoleBinding may reference any Role in the same namespace. Alternatively, a RoleBinding can reference a ClusterRole and bind that ClusterRole to the namespace of the RoleBinding. For example, if you want to bind a ClusterRole to all the namespaces in your cluster, you use a ClusterRoleBinding. - - -* Role Name: Define a custom role name to identify the cluster role - - -* Subjects: Subjects are a group of users, services, or teams using the Kubernetes API. It defines the operations a user, service, or group can perform. There are three types of subjects: - * Subject Type: - * Users: These are global, and meant for humans or processes living outside the cluster. - * Groups: Set of users. - * Service Accounts: Kubernetes uses service accounts to authenticate and authorize requests by pods to the Kubernetes API server. These are name spaced and meant for intra-cluster processes running inside pods. - * Subject Name: Custom name to identify a subject. -A single RoleBinding can have multiple subjects. - - -* “Confirm” the information to complete the creation of the RoleBinding. - - - - - - -# Restricted Container Images - -Palette users can restrict a few container images from getting deployed into a specific Namespace. This helps the tenants from accidentally installing a delisted or unwanted container to that specific namespace. - -
- -## Restrict container images to a workspace - - To restrict a container image for a particular namespace within the workspace: - -1. During [Step: 4 Settings](/workspace/adding-a-new-workspace#4.settings) of workspace creation, select the **Container Images** tab from the left ribbon. - - -2. Click on **+ Add New Container Image** and provide the **Namespace** and **Restricted Images**. Multiple images can be restricted within a namespace by separating them with commas. -
- -## Restrict container images to a deployed workspace - -The user can add a list of restricted images to an already deployed workspace as: - -1. **Workspace Settings** > **Container Images** - - -2. Click on **Add New Container Image** and provide the **Namespace** and **Restricted Images**. Multiple images can be restricted within a Namespace by separating them with commas. - - - - - -
-
- -
-
-
- - - - - diff --git a/content/docs/06-integrations.mdx b/content/docs/06-integrations.mdx deleted file mode 100644 index 00ec051f60..0000000000 --- a/content/docs/06-integrations.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Packs List" -metaTitle: "Packs List" -metaDescription: "Learn about packs that Palette offers and choose from Pallette packs." -icon: "teams" -hideToC: true -fullWidth: true -hideToCSidebar: true ---- - -import {Content} from "shared/layouts/Default"; -import Tabs from "shared/components/ui/Tabs"; -import Packs from "shared/components/common/Integrations/Packs" -import AppTiers from "shared/components/common/Integrations/AppTiers" - - - -# Overview -Palette provides packs that are tailored for specific uses to support the core infrastructure a cluster needs and add-on packs to extend Kubernetes functionality. Each pack you add to a cluster profile is considered a layer in the profile. - -When you create a cluster profile, you choose the type of pack you want to add: **Full**, **Infrastructure**, or **Add-on**. **Full** refers to the combination of **Infrastructure** and **Add-on** packs. - -When you choose **Infrastructure** or **Add-on**, Palette presents only packs that provide functionality for the selected pack type. When you choose **Full**, Palette presents all the packs so you can build your cluster profile from the base layer up. To learn more about cluster profiles, check out the [Cluster Profiles](/cluster-profiles) guide. - -To learn more about individual packs, use the search bar below to find a specific option. Alternatively, you can use the filter buttons to display available options. To learn about pack update and deprecation schedules, review [Maintenance Policy](/integrations/maintenance-policy). - - diff --git a/content/docs/06-integrations/00-antrea-cni.md b/content/docs/06-integrations/00-antrea-cni.md deleted file mode 100644 index a6198ccdf8..0000000000 --- a/content/docs/06-integrations/00-antrea-cni.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: 'Antrea CNI' -metaTitle: 'Antrea CNI' -metaDescription: 'Antrea CNI network pack for Palette Kubernetes Clusters' -hiddenFromNav: true -type: "integration" -category: ['network', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/antrea/blobs/sha256:3c5704caf6652c63374282cbf413f8e73a77c4efbc49f375c19c73f8e2ec4148?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Antrea CNI - -Palette supports Antrea controller network interface (CNI) for VMware Kubernetes clusters. Antrea CNI enables each pod to have exclusive IP addresses from the subnet with direct accessibility. - -Antrea leverages [Open vSwitch](https://www.openvswitch.org/) to implement pod networking and security features. Open vSwitch enables Antrea to implement Kubernetes network policies efficiently. - -
- -# Supported Versions - -**1.9.x** - -
- -# Prerequisites - -- Enable the integrated NodeIPAM controller in the Antrea manifest: ``NodeIPAM:enable``. -
- -- When deploying a cluster using Palette, use the ``podCIDR`` parameter in the Pack section of the Kubernetes manifest. The classless inter-domain routing (CIDR) IP specified in the Kubernetes manifest always takes precedence. - -
- -- When deploying a cluster using ``kubeadm init`` to use Antrea CIDRs, you would specify the ``--pod-network-cidr `` option and provide the IP address with the CIDR. For example: - -
- - ``--pod-network-cidr=10.244.0.0/16`` - -
- - - -The CIDR IP specified in Palette using the ``podCIDR`` parameter in the Kubernetes manifest always takes precedence. - -If you wish to use Antrea CIDRs, the ``podCIDR`` and ``serviceCIDR`` parameters must be blank in the Kubernetes manifest. - -To avoid overlapping your pod network with any of your host networks, you should think of a suitable CIDR block to specify if you deploy a cluster using ``kubeadm init`` or as a replacement in your network plugin's YAML. - - - -
- -- The Open vSwitch kernel module must be present on every Kubernetes node. - - -# Parameters - -The Antrea CNI pack supports the following parameters. - -| Parameter | Description | Required (Y/N) | -|-----------|-------------|---------| -| nodeIPAM:enable | Enables the integrated NodeIPAM controller in the Antrea manifest. The default is `false`. | Y | -| clusterCIDRs | CIDR ranges for pods in the cluster. The CIDRs can be either IPv4 or IPv6. You can specify up to one CIDR for each IP family. | N | -| serviceCIDR | IPv4 CIDR ranges reserved for Services. | N | -| serviceCIDRv6 | IPv6 CIDR ranges reserved for Services. | N | -| nodeCIDRMaskSizeIPv4 | Mask size for IPv4 Node CIDR in IPv4 or dual-stack cluster. | N | -| nodeCIDRMaskSizeIPv6 | Mask size for IPv6 Node CIDR in IPv6 or dual-stack cluster. | N | -| NodeIPAM | The feature toggle for ``antrea-controller``. The default is `false`. If you use CIDR ranges, set this to ``true``. | N | -| ServiceExternalIP | The feature toggle for ``antrea-agent`` and ``antrea-controller``. If you use the LoadBalancer service, set this to ``true``. | N | - - -# Usage - -Kubernetes network policies are supported by default. - -Antrea supports LoadBalancer services. Typically, implementing LoadBalancer services requires an external load balancer that is implemented by the Kubernetes cloud provider. - -Antrea provides two options for supporting LoadBalancer services without using an external load balancer: - -
- -- Using Antrea’s built-in external IP management for Services of type LoadBalancer. - -- Leveraging MetalLB. - -For detailed information, refer to Antrea’s [Service of type LoadBalancer](https://antrea.io/docs/v1.9.0/docs/service-loadbalancer) documentation. - -To learn more about using MetalLB, review [Using MetalLB with Antrea](https://antrea.io/docs/v1.9.0/docs/service-loadbalancer/#using-metallb-with-antrea). - -
- -# Troubleshooting - -If routing problems occur or some hosts cannot communicate outside their subnet, this indicates overlapping IP addresses or conflicting CIDR IPs. - -Ensure you have provided a non-overlapping IP address for your pod network in Palette's Kubernetes manifest using the ``podCIDR`` parameter. The CIDR IP specified with the ``podCIDR`` parameter in the Kubernetes manifest always takes precedence. - -If you wish to use Antrea CIDRs and have deployed a cluster using Palette, ensure that you have done the following: - -- Removed any value for ``podCIDR`` and ``serviceCIDR`` in the Kubernetes manifest. -- Provided a non-overlapping IP address for your pod network. - -
- -# Terraform - -You can reference the Antrea CNI pack in Terraform with a data resource. - -```tf -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "antrea" { - name = "antrea" - version = "1.9.0" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - -# References - -- [Antrea Service of type LoadBalancer](https://antrea.io/docs/v1.9.0/docs/service-loadbalancer) -- [MetalLB](https://metallb.universe.tf) -- [Antrea](https://antrea.io/) -- [Antrea IPAM Capabilities](https://antrea.io/docs/v1.6.1/docs/antrea-ipam/) -- [Using MetalLB with Antrea](https://antrea.io/docs/v1.9.0/docs/service-loadbalancer/#using-metallb-with-antrea) - -
- -
- diff --git a/content/docs/06-integrations/00-argo-cd.md b/content/docs/06-integrations/00-argo-cd.md deleted file mode 100644 index 023834c0dc..0000000000 --- a/content/docs/06-integrations/00-argo-cd.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: 'Argo CD' -metaTitle: 'Argo CD' -metaDescription: 'Argo CD for Spectro Cloud Palette' -hiddenFromNav: true -type: "integration" -category: ['system app', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/argo-cd/blobs/sha256:647cd3df6fec421e6580589ea7229762d8e828c77036f835f14f4c15c2a44c4c?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -[Argo CD](https://argo-cd.readthedocs.io/en/stable/) is a declarative, GitOps continuous delivery tool for Kubernetes. Argo CD follows the GitOps pattern of using Git repositories as the source of truth for defining the desired application state. Argo CD automates the deployment of the desired application states in the specified target environments. Application deployments can track updates to branches, tags, or pinned to a specific version of manifests at a Git commit. Start using Argo CD with Palette today by consuming this pack. - - -# Prerequisites - -- Kubernetes 1.7+ - -# Version Supported - - - - -* **3.3.5** - - - - - - - -* **3.2.6** - - - - - -# Notable Parameters - -| Parameter | Description | -|-----------------------|------------------------------------------------------------------------------------------------| -| global.image.repository | The repository that is the source of truth. | -| global.image.tag | The image tag to pull. | -| global.image.imagePullPolicy | If defined, a imagePullPolicy applied to all ArgoCD deployments. Defaults to ` IfNotPresent` | -| global.securityContext | A list of security contexts -|imagePullSecrets| If defined, uses a Secret to pull an image from a private Docker registry or repository. -|hostAliases| A list of mapping between IP and hostnames that will be injected as entries in the pod's hosts files - - - -# References - -[Argo CD](https://argo-cd.readthedocs.io/en/stable/) diff --git a/content/docs/06-integrations/00-aws-autoscaler.md b/content/docs/06-integrations/00-aws-autoscaler.md deleted file mode 100644 index 5e21222f01..0000000000 --- a/content/docs/06-integrations/00-aws-autoscaler.md +++ /dev/null @@ -1,365 +0,0 @@ ---- -title: 'AWS Cluster Autoscaler' -metaTitle: 'AWS Cluster Autoscaler' -metaDescription: 'AWS Cluster Autoscaler for Spectro Cloud Palette' -hiddenFromNav: true -type: "integration" -category: ['system app', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/aws-cluster-autoscaler/blobs/sha256:f86813591b3b63b3afcf0a604a7c8c715660448585e89174908f3c6a421ad8d8?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# AWS Cluster Autoscaler - -Palette supports autoscaling for AWS EKS clusters by using the AWS Cluster Autoscaler pack. -The Cluster Autoscaler dynamically scales cluster resources. It monitors the workload and provisions or shuts down cluster nodes to maximize the cluster's performance and make it more resilient to failures. It resizes the Kubernetes cluster in the following two conditions: -
- -* Scale-up: The Cluster Autoscaler triggers a scale-up operation if insufficient cluster resources lead to multiple pod failures. The pods become eligible for scheduling on the new nodes. The Cluster Autoscaler checks for pod failures every 30 seconds and schedules impacted pods on new nodes. Scaling up will not happen when the given pods have node affinity. - - -* Scale-down: The Cluster Autoscaler triggers a scale-down operation if nodes are underutilized for ten continuous minutes, and their pods are eligible for rescheduling on other available nodes. The node utilization threshold for scaling down a node defaults to 50% of the node's capacity. The Cluster Autoscaler calculates the node utilization threshold based on CPU and memory utilization. In scenarios where the node is underutilized, the Cluster Autoscaler migrates the pods from underutilized nodes to other available nodes and then shuts down the underutilized nodes. - - -Cluster Autoscaler pack is deployed as a [*Deployment*](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) in your cluster and utilizes [Amazon EC2 Auto Scaling Groups](https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html) to manage node groups. - - - -# Versions Supported - - - - - -## Prerequisites - -* Kubernetes 1.24.x or higher. - - -* Permission to create an IAM policy in the AWS account you use with Palette. - - -* IAM policy - A [Full Cluster Autoscaler Features](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#full-cluster-autoscaler-features-policy-recommended) IAM policy must be attached to the EKS cluster's node group. The policy must allow the Cluster Autoscaler to scale the cluster's node groups. - - There are two ways to achieve this prerequisite. You can define the policy as a *customer-managed* policy in the AWS account and use its Amazon Resource Name (ARN) in the cluster profile. Alternatively, you can attach the IAM policy as an *inline* policy to the node group if you have already deployed your cluster. Refer to the [Usage](#usage) section below to learn more. - - -* Updated Kubernetes layer manifest - The Kubernetes pack's manifest should be updated with the newly created IAM policy ARN. The YAML code block below displays the `managedMachinePool.roleAdditionalPolicies` section to update in the Kubernetes pack's manifest. Refer to the [Usage](#usage) section below for more details with an example. -
- - ```yaml - managedMachinePool: - #roleName: {{ name of the self-managed role | format "${string}" }} - ## A list of additional policies to attach to the node group role - roleAdditionalPolicies: - - {{ arn for the policy1 | format "${string}" }} - ``` -
- -## Usage - -Cluster Autoscaler helps improve your cluster's performance and makes your cluster more resilient to failures. It automatically adjusts the number of nodes in your cluster based on the current workload. In other words, Cluster Autoscaler monitors the resource utilization, such as CPU and memory, and the number of pods active in your cluster and scales the cluster when either of these events occurs: -
- -- Multiple pods fail due to resource contention. In this case, the Cluster Autoscaler will provision more nodes. - - -- Nodes are underutilized for a specific period. In this case, the Cluster Autoscaler will reschedule the pods onto other nodes and shut down the underutilized node. -
- - -### Deploy Cluster Autoscaler -To deploy the Cluster Autoscaler pack, you must first define an IAM policy in the AWS account associated with Palette. - -Next, update the cluster profile to specify the IAM policy ARN in the Kubernetes pack's manifest. Palette will attach that IAM policy to your cluster's node group during deployment. Note that Palette automatically creates two IAM roles in the AWS account when you deploy an EKS cluster. One role is for the cluster, and another for the cluster's node group. The cluster's IAM role name will have the following naming convention, `[your-cluster-name]-iam-service-role`, and the node group's IAM role name will follow the `ng-role_worker-pool-[random-string]` naming convention. - -The following steps provide detailed instructions for deploying the Cluster Autoscaler pack. - -
- -1. Define the new IAM policy using the policy outlined below, and give it a name, for example, *PaletteEKSClusterAutoscaler*. -
- - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:DescribeInstanceTypes", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:DescribeImages", - "ec2:GetInstanceTypesFromInstanceRequirements", - "eks:DescribeNodegroup" - ], - "Resource": ["*"] - } - ] - } - ``` - - -2. Copy the IAM policy ARN to the clipboard for the next step. For example, the policy ARN will be similar to `arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler`. - - -3. In your cluster profile, and update the `managedMachinePool.roleAdditionalPolicies` section in the Kubernetes pack's manifest with the newly created IAM policy ARN. The snapshot below displays the specific section to update with the policy ARN. - - ![A snapshot displaying the ARN added to the Kubernetes pack's manifest.](/integrations_aws-cluster-autoscaler_k8s-manifest.png) - - For example, the code block below displays the updated `managedMachinePool.roleAdditionalPolicies` section with a sample policy ARN, `"arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler"`. Before you use the following code block, replace the ARN below with yours. -
- - ```yaml - managedMachinePool: - # roleName: {{ name of the self-managed role | format "${string}" }} - # A list of additional policies to attach to the node group role - roleAdditionalPolicies: - - "arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler" - ``` -
- - - - If you do not want to update the Kubernetes pack's manifest, you can add an *inline* IAM policy to the cluster's node group post deployment. Refer to this [AWS guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html#add-policies-console) on how to embed an inline policy for a user or role. Refer to the [AWS IAM documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) for the differences between managed and inline policies. - - - - -4. In the cluster deployment wizard, when you are in the **Nodes configuration** section, you must enter the minimum and maximum number of worker pool nodes, and the instance type (size) that suits your requirement. - - You must provide the node count limits because the Cluster Autoscaler uses an Auto Scaling Group to manage the cluster's node group. An Auto Scaling Group requires a minimum and maximum count and the selection of an instance type. You can choose an instance type that suits your requirement. - - For example, the snapshot below displays the cluster's minimum and maximum capacity. - - ![A snapshot displaying the minimum and maximum node count in Palette.](/integrations_aws-cluster-autoscaler_node-count.png) - -
- -### Resize the Cluster - -To better understand the scaling behavior of the Cluster Autoscaler and its impact on a cluster, do the following exercise to gain firsthand experience with the scaling behavior. - -In the following example scenario, you will first create a cluster with large-sized worker pool instances. Next, you will manually reduce the instance size, leading to insufficient resources for existing pods and multiple pod failures in the cluster. As a result, the Cluster Autoscaler will provision new smaller-sized nodes with enough capacity to accommodate the current workload and reschedule those contending pods on new nodes. Also, the new nodes' count will be within the minimum and maximum limit you specified for the worker pool. - - -Use the following steps to trigger the pod rescheduling event manually: -
- -1. In the cluster deployment wizard, while defining the **Nodes configuration**, choose a large-sized instance type. For example, you can choose your worker pool to have instance size **t3.2xlarge** (8 vCPUs, 32 GB RAM) or higher. - - -2. After your cluster is successfully deployed, navigate to the **Nodes** tab in the cluster details page in Palette, and note the count and size of nodes. The snapshots below display one node of the type **t3.2xlarge** in the worker pool of a successfully deployed cluster. - - ![A snapshot displaying one node of the type **t3.2xlarge** in the worker pool.](/integrations_aws-cluster-autoscaler_one-node.png) - - - -3. Manually reduce the instance size in the worker-pool configuration to a **t3.medium** (2 vCPUs, 8 GB RAM). The snapshot below displays how to edit the instance size in the node pool configuration. - - ![A snapshot displaying how to edit node pool configuration.](/integrations_aws-cluster-autoscaler_edit-node.png) - - -4. Wait for a few minutes for the new nodes to provision. Reducing the node size will make the Cluster Autoscaler shut down the large node and provision smaller-sized nodes with enough capacity to accommodate the current workload. Also, the new node count will be within the minimum and maximum limit you specified for the worker pool configuration wizard. - - The following snapshot displays two new nodes of the size **t3.medium** spin up automatically. These two smaller-sized nodes will be able to handle the same workload as a single larger-sized node. - - ![A snapshot displaying new nodes of the size **t3.medium** spin up automatically, *collectively* providing enough capacity to accommodate the current workload. ](/integrations_aws-cluster-autoscaler_two-nodes.png) -
- -
- - - -## Prerequisites - -* Kubernetes 1.19.x or higher. - - -* Permission to create an IAM policy in the AWS account you use with Palette. - - -* IAM policy - A [Full Cluster Autoscaler Features](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#full-cluster-autoscaler-features-policy-recommended) IAM policy must be attached to the EKS cluster's node group. The policy must allow the Cluster Autoscaler to scale the cluster's node groups. - - There are two ways to achieve this prerequisite. You can define the policy as a *customer-managed* policy in the AWS account and use its Amazon Resource Name (ARN) in the cluster profile. Alternatively, you can attach the IAM policy as an *inline* policy to the node group if you have already deployed your cluster. Refer to the [Usage](#usage) section below to learn more. - - -* Updated Kubernetes layer manifest - The Kubernetes pack's manifest should be updated with the newly created IAM policy ARN. The YAML code block below displays the `managedMachinePool.roleAdditionalPolicies` section to update in the Kubernetes pack's manifest. Refer to the [Usage](#usage) section below for more details with an example. -
- - ```yaml - managedMachinePool: - #roleName: {{ name of the self-managed role | format "${string}" }} - ## A list of additional policies to attach to the node group role - roleAdditionalPolicies: - - {{ arn for the policy1 | format "${string}" }} - ``` -
- -## Usage - -Cluster Autoscaler helps improve your cluster's performance and makes your cluster more resilient to failures. It automatically adjusts the number of nodes in your cluster based on the current workload. In other words, Cluster Autoscaler monitors the resource utilization, such as CPU and memory, and the number of pods active in your cluster and scales the cluster when either of these events occurs: -
- -- Multiple pods fail due to resource contention. In this case, the Cluster Autoscaler will provision more nodes. - - -- Nodes are underutilized for a specific period. In this case, the Cluster Autoscaler will reschedule the pods onto other nodes and shut down the underutilized node. -
- - -### Deploy Cluster Autoscaler -To deploy the Cluster Autoscaler pack, you must first define an IAM policy in the AWS account associated with Palette. - -Next, update the cluster profile to specify the IAM policy ARN in the Kubernetes pack's manifest. Palette will attach that IAM policy to your cluster's node group during deployment. Note that Palette automatically creates two IAM roles in the AWS account when you deploy an EKS cluster. One role is for the cluster, and another for the cluster's node group. The cluster's IAM role name will have the following naming convention, `[your-cluster-name]-iam-service-role`, and the node group's IAM role name will follow the `ng-role_worker-pool-[random-string]` naming convention. - -The following steps provide detailed instructions for deploying the Cluster Autoscaler pack. - -
- -1. Define the new IAM policy using the policy outlined below, and give it a name, for example, *PaletteEKSClusterAutoscaler*. -
- - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeTags", - "ec2:DescribeInstanceTypes", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": ["*"] - }, - { - "Effect": "Allow", - "Action": [ - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "ec2:DescribeImages", - "ec2:GetInstanceTypesFromInstanceRequirements", - "eks:DescribeNodegroup" - ], - "Resource": ["*"] - } - ] - } - ``` - - -2. Copy the IAM policy ARN to the clipboard for the next step. For example, the policy ARN will be similar to `arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler`. - - -3. In your cluster profile, and update the `managedMachinePool.roleAdditionalPolicies` section in the Kubernetes pack's manifest with the newly created IAM policy ARN. The snapshot below displays the specific section to update with the policy ARN. - - ![A snapshot displaying the ARN added to the Kubernetes pack's manifest.](/integrations_aws-cluster-autoscaler_k8s-manifest.png) - - For example, the code block below displays the updated `managedMachinePool.roleAdditionalPolicies` section with a sample policy ARN, `"arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler"`. Before you use the following code block, replace the ARN below with yours. -
- - ```yaml - managedMachinePool: - # roleName: {{ name of the self-managed role | format "${string}" }} - # A list of additional policies to attach to the node group role - roleAdditionalPolicies: - - "arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler" - ``` -
- - - - If you do not want to update the Kubernetes pack's manifest, you can add an *inline* IAM policy to the cluster's node group post deployment. Refer to this [AWS guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html#add-policies-console) on how to embed an inline policy for a user or role. Refer to the [AWS IAM documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) for the differences between managed and inline policies. - - - - -4. In the cluster deployment wizard, when you are in the **Nodes configuration** section, you must enter the minimum and maximum number of worker pool nodes, and the instance type (size) that suits your requirement. - - You must provide the node count limits because the Cluster Autoscaler uses an Auto Scaling Group to manage the cluster's node group. An Auto Scaling Group requires a minimum and maximum count and the selection of an instance type. You can choose an instance type that suits your requirement. - - For example, the snapshot below displays the cluster's minimum and maximum capacity. - - ![A snapshot displaying the minimum and maximum node count in Palette.](/integrations_aws-cluster-autoscaler_node-count.png) - -
- -### Resize the Cluster - -To better understand the scaling behavior of the Cluster Autoscaler and its impact on a cluster, do the following exercise to gain firsthand experience with the scaling behavior. - -In the following example scenario, you will first create a cluster with large-sized worker pool instances. Next, you will manually reduce the instance size, leading to insufficient resources for existing pods and multiple pod failures in the cluster. As a result, the Cluster Autoscaler will provision new smaller-sized nodes with enough capacity to accommodate the current workload and reschedule those contending pods on new nodes. Also, the new nodes' count will be within the minimum and maximum limit you specified for the worker pool. - - -Use the following steps to trigger the pod rescheduling event manually: -
- -1. In the cluster deployment wizard, while defining the **Nodes configuration**, choose a large-sized instance type. For example, you can choose your worker pool to have instance size **t3.2xlarge** (8 vCPUs, 32 GB RAM) or higher. - - -2. After your cluster is successfully deployed, navigate to the **Nodes** tab in the cluster details page in Palette, and note the count and size of nodes. The snapshots below display one node of the type **t3.2xlarge** in the worker pool of a successfully deployed cluster. - - - ![A snapshot displaying one node of the type **t3.2xlarge** in the worker pool.](/integrations_aws-cluster-autoscaler_one-node.png) - - -3. Manually reduce the instance size in the worker-pool configuration to a **t3.medium** (2 vCPUs, 8 GB RAM). The snapshot below displays how to edit the instance size in the node pool configuration. - - ![A snapshot displaying how to edit node pool configuration.](/integrations_aws-cluster-autoscaler_edit-node.png) - - -4. Wait for a few minutes for the new nodes to provision. Reducing the node size will make the Cluster Autoscaler shut down the large node and provision smaller-sized nodes with enough capacity to accommodate the current workload. Also, the new node count will be within the minimum and maximum limit you specified for the worker pool configuration wizard. - - The following snapshot displays two new nodes of the size **t3.medium** spin up automatically. These two smaller-sized nodes will be able to handle the same workload as a single larger-sized node. - - - ![A snapshot displaying new nodes of the size **t3.medium** spin up automatically, *collectively* providing enough capacity to accommodate the current workload. ](/integrations_aws-cluster-autoscaler_two-nodes.png) -
- - -
- -
- -# Troubleshooting - -If you are facing the `LimitExceeded: Cannot exceed quota for PoliciesPerRole:10` error in the cluster deployment logs, it may be because the default IAM role Palette creates for the node group already has 10 policies attached to it, and you are trying to attach one more. By default, your AWS account will have a quota of 10 managed policies per IAM role. To fix the error, follow the instruction in this [AWS guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entities) to request a quota increase. - - - -If you encounter an `executable aws-iam-authenticator not found` error in your terminal when attempting to access your EKS cluster from your local machine, it may be due to the [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator) plugin missing from your local environment. You can find the installation steps for the -aws-iam-authenticator in the following [install guide](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html). - - -# References - -- [Cluster Autoscaler on AWS](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) - - -- [Amazon EKS Autoscaling](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html) - - -- [AWS IAM Authenticator Plugin](https://github.com/kubernetes-sigs/aws-iam-authenticator) diff --git a/content/docs/06-integrations/00-aws-ebs.md b/content/docs/06-integrations/00-aws-ebs.md deleted file mode 100644 index 3e9fe993c7..0000000000 --- a/content/docs/06-integrations/00-aws-ebs.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: 'AWS-EBS' -metaTitle: 'AWS EBS Integration with Spectro Cloud' -metaDescription: 'AWS EBS storage add on into Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64','fips'] -logoUrl: 'https://registry.spectrocloud.com/v1/csi-aws/blobs/sha256:f86813591b3b63b3afcf0a604a7c8c715660448585e89174908f3c6a421ad8d8?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# AWS EBS - -AWS Elastic Block Store is an easy to use, high performance block storage at any scale. It helps in the easy deployment, management, and scaling of the most demanding and high-performance tenant workloads. AWS EBS also ensures availability with replication and durability. - -# Prerequisites - -The following permissions needs to be attached to the AWS cloud account: - -- The AWS managed policy `AmazonEBSCSIDriverPolicy`. - -- [EBSCSIKMSEncryptionPolicy](/integrations/aws-ebs#ebscsikmsencryptionpolicy) (custom policy name), if the user wants to enable EBS encryption. - -# Versions Supported - - - - - -* **1.12.0** - - - - - -* **1.10.0** - - - - - -* ** 1.8.0** - - - - - -* ** 1.5.1** - - - - - -## EBSCSIKMSEncryptionPolicy - -
- -``` -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "kms:GenerateDataKeyWithoutPlaintext", - "kms:CreateGrant" - ], - "Resource": "*" - } - ] -} -``` - -## Notable Parameters - -| Name | Supported Values | Default Value | Description | -| --- | --- | --- | --- | -| storageType | gp2, sc1, st1, io1 | gp2 | AWS Volume type to be used. | -| reclaimPolicy | Delete, Retain | Delete | Defines whether volumes will be retained or deleted. | -| allowVolumeExpansion | true, false | true | Flag to allow resizing a volume. | -| isDefaultClass | true, false | true | Flag to denote if this StorageClass will be the default. | -| volumeBindingMode | WaitForFirstConsumer, Immediate | WaitForFirstConsumer | Controls when volumeBinding and dynamic provisioning should happen. | -| encrypted | true, false | true | Denotes whether the EBS volume should be encrypted or not. | -| kmsKeyId (optional) | The full Amazon Resource Name of the key to use when encrypting the volume. | -- | If you don't provide the full Amazon Resource Name but **encrypted** is true, AWS [generates a key](https://kubernetes.io/docs/concepts/storage/storage-classes/#aws-ebs). | - - -You can view the full parameter list [here](https://github.com/kubernetes-sigs/aws-ebs-csi-driver#createvolume-parameters). - - -Storage classes that Palette creates are named "spectro-storage-class" and can be fetched from kubectl using the following CLI command: -
- -```bash -kubectl get storageclass -``` - -# References - -[AWS EBS](https://aws.amazon.com/ebs/) - - -[AWS EBS Storage Class Details](https://kubernetes.io/docs/concepts/storage/storage-classes/#aws-ebs) diff --git a/content/docs/06-integrations/00-aws-efs.md b/content/docs/06-integrations/00-aws-efs.md deleted file mode 100644 index bad72cfe80..0000000000 --- a/content/docs/06-integrations/00-aws-efs.md +++ /dev/null @@ -1,400 +0,0 @@ ---- -title: 'AWS-EFS' -metaTitle: 'AWS EFS Integration with Palette' -metaDescription: 'AWS EFS storage add on into Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-aws-efs/blobs/sha256:5d1eb98bb847489f341beda1407c14442854ab8e5910d0cc8da1a63636057927?type=image/png' - ---- - - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - - -# AWS EFS - - You can access information from an Amazon Elastic File System (Amazon EFS) volume within a specific region, no matter which availability zone it's in. The cluster can be distributed across availability zones instead of having it in one location and replicating it multiple times. - -Palette handles setting up the AWS EFS as a volume with ease when adding the PersistentVolume storage container. Palette will dynamically provision the AWS EFS storage layer for the worker node. - -## Versions Supported - - - - - -## Prerequisites - -- Create the Identity and Access Management (IAM) role that allows the driver to manage AWS EFS access points. The [Introducing Amazon EFS CSI dynamic provisioning](https://aws.amazon.com/blogs/containers/introducing-efs-csi-dynamic-provisioning/) blog provides information on `EFSCSIControllerIAMPolicy`. - -- An AWS EFS file system is available. Check out the guide [Create your Amazon EFS file system](https://docs.aws.amazon.com/efs/latest/ug/gs-step-two-create-efs-resources.html) if you need additional guidance. - -- Create your EKS cluster using static provisioning. Static provisioning requires you to create a virtual private cloud (VPC), subnets, route tables, internet gateway and NAT gateways in the AWS console. - - You can use the same VPC or a different one for EFS: - - - Using the same VPC for EFS ensures EFS is reachable from your EKS cluster. We recommend using the same VPC because it doesn't require peering. - - - If you use a different VPC for EFS, you need to peer the VPC with the VPC on which the EKS cluster is running.

- -- The security group associated with your EFS file system must have an inbound rule that allows Network File System (NFS) traffic (port 2049) from the CIDR for your cluster's VPC. - - -## Parameters - -The table lists commonly used parameters you can configure when adding this pack. - -| Parameter | Description | Default | -|-----------------|-----------------------|------------------------------| -| storageClassName | AWS Volume type to be used. | spectro-storage-class | -| isDefaultClass | Toggle for Default class. | true | -| fileSystemId | The file system under which access points are created. Create the file system prior to this setup. This is a required field and needs to be set to a pre-created AWS EFS volume. Other values can use the default setting. | Set this to an AWS EFS volume you have already created. | -| provisioningMode | Type of volume provisioned by AWS EFS. For now, this is the only access point supported. | efs-ap | -| directoryPerms | Directory permissions for Access Point root directory creation. | 700 | -| gidRangeStart | Starting range of the Portable Operating System Interface (POSIX) group Id to be applied for access point root directory creation (optional). | 1000 | -| gidRangeEnd | End range of the POSIX group Id (optional). | 2000 | -| basePath | Path under which access points for dynamic provisioning is created. If this parameter is not specified, access points are created under the root directory of the file system. | `/base_efs` | - - - -## Usage - -There are two ways to add AWS EFS to Palette: - -- Add EFS as a base CSI layer in a cluster profile. -- Add EFS as an Add-on layer, which will create a new storage class using the AWS EFS file system. - - -### Policy Information - -You must create a policy that allows you to use EFS from your IAM account. You can use the following JSON to create the policy.

- -```yaml -{ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "Action": [ - "elasticfilesystem:DescribeAccessPoints", - "elasticfilesystem:DescribeFileSystems" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "elasticfilesystem:CreateAccessPoint" - ], - "Resource": "*", - "Condition": { - "StringLike": { - "aws:RequestTag/efs.csi.aws.com/cluster": "true" - } - } - }, - { - "Effect": "Allow", - "Action": "elasticfilesystem:DeleteAccessPoint", - "Resource": "*", - "Condition": { - "StringEquals": { - "aws:ResourceTag/efs.csi.aws.com/cluster": "true" - } - } - } - ] -} -``` - -### Storage Class - -Palette creates storage classes named *spectro-storage-class*. You can view a list of storage classes using this kubectl command: -
- -```bash -kubectl get storageclass -``` - -### PersistentVolumeClaim - -A PersistentVolumeClaim (PVC) is a request made by a pod for a certain amount of storage from the cluster. It acts as a link between the pod and the storage resource, allowing the pod to use the storage. You can learn details about a PVC, as shown in the following output, when you use the `kubectl describe pvc` command. -
- -```bash -kubectl describe pvc my-efs-volume -``` - -```yaml - -Name: efs - -Namespace: default - -StorageClass: aws-efs - -Status: Pending - -Volume: - -Labels: - -Annotations: kubectl.kubernetes.io/last-applied-configuration: -{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{"volume.beta.kubernetes.io/ -storage-class":"aws-efs"},"name":"..."} - -volume.beta.kubernetes.io/storage-class: aws-efs - -Finalizers: [kubernetes.io/pvc-protection] - -Capacity: - -Access Modes: - -Events: -| Type | Reason | Age | From | Message | -| ------- | ------------------ | ------------------ | --------------------------- | ------------------------ | -| Warning | ProvisioningFailed | 43s (x12 over 11m) | persistentvolume-controller | no volume plugin matched | -Mounted By: - -``` - -
- - - - -## Prerequisites - -- Create the Identity and Access Management (IAM) role that allows the driver to manage AWS EFS access points. The [Introducing Amazon EFS CSI dynamic provisioning](https://aws.amazon.com/blogs/containers/introducing-efs-csi-dynamic-provisioning/) blog provides information on `EFSCSIControllerIAMPolicy`. - -- An AWS EFS file system is available. Check out the guide [Create your Amazon EFS file system](https://docs.aws.amazon.com/efs/latest/ug/gs-step-two-create-efs-resources.html) if you need additional guidance. - -- Create your EKS cluster using static provisioning. Static provisioning requires you to create a virtual private cloud (VPC), subnets, route tables, internet gateway and NAT gateways in the AWS console. - - You can use the same VPC or a different one for EFS: - - - Using the same VPC for EFS ensures EFS is reachable from your EKS cluster. We recommend using the same VPC because it doesn't require peering. - - - If you use a different VPC for EFS, you need to peer the VPC with the VPC on which the EKS cluster is running.

- -- The security group associated with your EFS file system must have an inbound rule that allows Network File System (NFS) traffic (port 2049) from the CIDR for your cluster's VPC. - - -## Parameters - -The table lists commonly used parameters you can configure when adding this pack. - -| Parameter | Description | Default | -|-------------------------|--------------------------------------------------------|---------------------------------------------| -| storageClassName | AWS Volume type to be used. | spectro-storage-class | -| isDefaultClass | Toggle for Default class. | true | -| fileSystemId | The file system under which access points are created. Create the file system prior to this setup. This is a required field and needs to be set to a pre-created AWS EFS volume. Other values can use the default setting. | Set this to an AWS EFS volume you have already created. | -| provisioningMode | Type of volume provisioned by AWS EFS. For now, this is the only access point supported. | efs-ap | -| directoryPerms | Directory permissions for Access Point root directory creation. | 700 | -| gidRangeStart | Starting range of the Portable Operating System Interface (POSIX) group Id to be applied for access point root directory creation (optional). | 1000 | -| gidRangeEnd | End range of the POSIX group Id (optional). | 2000 | -| basePath | Path under which access points for dynamic provisioning is created. If this parameter is not specified, access points are created under the root directory of the file system. | `/base_efs` | - - -## Usage - -There are two ways to add AWS EFS to Palette: - -- Add EFS as a CSI layer in AWS/EKS. -- Add EFS as an Add-on layer, which will create a new storage class using the AWS EFS file system. - - -### Policy Information - -You must create a policy that allows you to use EFS from your IAM account. You can use the following JSON to create the policy.

- -```yaml -{ - "Version": "2012-10-17", - "Statement": [{ - "Effect": "Allow", - "Action": [ - "elasticfilesystem:DescribeAccessPoints", - "elasticfilesystem:DescribeFileSystems" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "elasticfilesystem:CreateAccessPoint" - ], - "Resource": "*", - "Condition": { - "StringLike": { - "aws:RequestTag/efs.csi.aws.com/cluster": "true" - } - } - }, - { - "Effect": "Allow", - "Action": "elasticfilesystem:DeleteAccessPoint", - "Resource": "*", - "Condition": { - "StringEquals": { - "aws:ResourceTag/efs.csi.aws.com/cluster": "true" - } - } - } - ] -} -``` - -### Storage Class - -Palette creates storage classes named *spectro-storage-class*. You can view a list of storage classes using this kubectl command: - -
- -```bash -kubectl get storageclass -``` - -### PersistentVolumeClaim - -A PersistentVolumeClaim (PVC) is a request made by a pod for a certain amount of storage from the cluster. It acts as a link between the pod and the storage resource, allowing the pod to use the storage. You can learn details about a PVC by using the `kubectl describe pvc` command, as the following example output shows. -
- -```bash -kubectl describe pvc my-efs-volume -``` - -```yaml - -Name: efs - -Namespace: default - -StorageClass: aws-efs - -Status: Pending - -Volume: - -Labels: - -Annotations: kubectl.kubernetes.io/last-applied-configuration: -{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{"volume.beta.kubernetes.io/ -storage-class":"aws-efs"},"name":"..."} - -volume.beta.kubernetes.io/storage-class: aws-efs - -Finalizers: [kubernetes.io/pvc-protection] - -Capacity: - -Access Modes: - -Events: -| Type | Reason | Age | From | Message | -| ------- | ------------------ | ------------------ | --------------------------- | ------------------------ | -| Warning | ProvisioningFailed | 43s (x12 over 11m) | persistentvolume-controller | no volume plugin matched | -Mounted By: - -``` - -
-
- - -# Troubleshooting - -Some basic troubleshooting steps you can take if you receive errors in your pods when mounting an Amazon EFS volume in your Amazon EKS cluster are to verify you have the following: -
- - - An Amazon EFS file system created with a mount target in each of the worker node subnets. - - A valid EFS storage class definition using the efs.csi.aws.com provisioner. - - A valid PersistentVolumeClaim (PVC) definition and PersistentVolume definition. This isn't needed if you're using dynamic provisioning. - - The Amazon EFS CSI driver installed in the cluster. - -The following list provides more specific details to help you troubleshoot issues when mounting an Amazon EFS volume. CSI driver pod logs are also available to determine the cause of the mount failures. If the volume is failing to mount, the efs-plugin logs are available. -
- -- **Mount Targets:** Verify the mount targets are configured correctly. Be sure to create the EFS mount targets in each Availability Zone where the EKS worker nodes are running. - -- **Allow NFS Traffic:** Verify the security group associated with your EFS file system and worker nodes allows NFS traffic. - - - The security group that's associated with your EFS file system must have an inbound rule that allows NFS traffic (port 2049) from the CIDR for your cluster's VPC. - - - The security group that's associated with your worker nodes where the pods are failing to mount the EFS volume must have an outbound rule that allows NFS traffic (port 2049) to the EFS file system. - -- **Subdirectories:** If you're mounting the pod to a subdirectory, verify the subdirectory is created in your EFS file system. When you add sub paths in persistent volumes, the EFS CSI driver doesn't create the subdirectory path in the EFS file system as part of the mount operation. Subdirectories must be present before you start the mount operation. - -- **DNS server:** Confirm the cluster's Virtual Private Cloud (VPC) uses the Amazon DNS server. To verify the DNS server, log in to the worker node and issue the following command, replacing ```region``` with your AWS Region: - -
- - ```bash - nslookup fs-4fxxxxxx.efs.region.amazonaws.com - - ``` - -- **Permissions:** Verify you have "iam" mount options in the persistent volume definition when using a restrictive file system policy. In some cases, the EFS file system policy is configured to restrict mount permissions to specific IAM roles. In this case, the EFS mount helper in the EFS CSI driver requires the ```-o iam``` mount option during the mount operation. Include the **spec.mountOptions** property:

- - ```bash - spec: - mountOptions: - - iam - ``` -- **IAM role:** Verify the Amazon EFS CSI driver controller service account associates with the correct IAM role and the IAM role has the required permissions. - - Run the following command:
- - ```bash - kubectl describe sa efs-csi-controller-sa -namespace kube-system - ``` - - You should see this annotation: - - ```bash - eks\.amazonaws\.com/role-arn"="arn:aws:iam::111122223333:role/AmazonEKS_EFS_CSI_Driver_Policy - ``` -- **Driver Pods:** Verify the EFS CSI driver pods are running. Issue the following command to display a list of controller pods and node pods running in your cluster: - - ```bash - kubectl get all -label app.kubernetes.io/name=aws-efs-csi-driver -namespace kube-system - ``` - -- **File system won't mount:** Verify the EFS mount operation from the EC2 worker node where the pod is failing to mount the file system. Log in to the Amazon EKS worker node where the pod is scheduled. Then, use the EFS mount helper to try to manually mount the EFS file system to the worker node. You can run the following command to test: - - ```bash - sudo mount -types -efs -options tls file-system-dns-name efs-mount-point/ - ``` - -You can find more information in Amazon's [Troubleshoot Amazon EFS](https://aws.amazon.com/premiumsupport/knowledge-center/eks-troubleshoot-efs-volume-mount-issues/) guide. - - -# Terraform - -When using this Pack as a base layer, you need the following terraform code. - -``` -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "csi-aws-efs" { - name = "aws-efs" - version = "1.4.0" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# References - -[Amazon EFS CSI Driver](https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html)
-[Amazon Elastic File System](https://aws.amazon.com/efs/)
-[Amazon EFS Tutorial and Examples](https://github.com/aws-samples/amazon-efs-tutorial)
-[IAM Policy Example](https://raw.githubusercontent.com/kubernetes-sigs/aws-ebs-csi-driver/master/docs/example-iam-policy.json)
-[Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/)
diff --git a/content/docs/06-integrations/00-azure-cni.md b/content/docs/06-integrations/00-azure-cni.md deleted file mode 100644 index 7c49491ac8..0000000000 --- a/content/docs/06-integrations/00-azure-cni.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Azure CNI' -metaTitle: 'Azure CNI' -metaDescription: 'Azure CNI network pack for Palette AKS Clusters' -hiddenFromNav: true -type: "integration" -category: ['network', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-azure/blobs/sha256:0787b7943741181181823079533cd363884a28aa0651715ea43408bdc77a5c51?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Azure CNI - -Palette supports Azure Container Network Interface (CNI) networking for Azure Kubernetes Service (AKS) clusters. Azure CNI enables each pod to have exclusive IP addresses from the subnet with direct accessibility. - -To allocate unique IP addresses to individual pods, advanced forethought needs to be put in. As per the maximum pods supported by a node, [IP addresses need to be reserved](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni#plan-ip-addressing-for-your-cluster) in advance. The default [maximum number](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni#maximum-pods-per-node) of pods per node varies between kubenet and Azure CNI networking and based on the method of cluster deployment. - - -## Versions Supported - - - - - -**1.4.0** - - - - - - -# Azure CNI Policy Support - -Network Policy is a Kubernetes specification that defines access policies for communication between pods. By default, AKS cluster pods can send and receive traffic without limitations. However, to ensure security, rules to control traffic flow can be defined. Network Policies define an ordered set of rules to send and receive traffic and applies them to a collection of pods that match one or more label selectors. Palette enables Network Policies to be included as part of a wider manifest that also creates a deployment or service. Palette leverages two (2) Network Policies from Azure CNI: - -
- -* **azure**: Azure's own implementation, called Azure Network Policy. - -* **calico**: An open-source network and network security solution founded by [Tigera](https://www.tigera.io/). - - -Palette users can choose any one of the above Network Policies and provide it to the pack YAML file as `networkPolicy` as given below: - -
-
- -```yaml -pack: - # The Network policy for ingress and egress traffic between pods in a cluster. Supported values are none, azure, calico - networkPolicy: "none" -``` -
- - - Provide the networkPolicy value as none if no policy to be applied. - - - -
- - -## Azure and Calico Policies and their Capabilities - -|Capability |Azure |Calico| -|-----------|-------|------| -|Supported platforms|Linux|Linux, Windows Server 2019 and 2022| -|Supported networking options|Azure CNI|Azure CNI (Linux, Windows Server 2019 and 2022) and kubenet (Linux)| -|Compliance with Kubernetes specification|All policy types supported| All policy types supported| -|Additional features| None |Extended policy model consisting of Global Network Policy, Global Network Set, and Host Endpoint. For more information on using the calicoctl CLI to manage these extended features, see calicoctl user reference guide.| -|Support|Supported by Azure Support and Engineering team|Calico community support.| -|Logging|Rules added or deleted in IP Tables are logged on every host under `/var/log/azure-npm.log`|For more information, see [Calico component logs](https://projectcalico.docs.tigera.io/maintenance/troubleshoot/component-logs)| - - -Make sure to use Azure CNI with the Windows operating system as the -kubenet is not available for the Windows environment. - - -
-
- -## References -* [Azure CNI Git](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) -* [Azure CNI](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni) diff --git a/content/docs/06-integrations/00-azure-disk.md b/content/docs/06-integrations/00-azure-disk.md deleted file mode 100644 index b535535583..0000000000 --- a/content/docs/06-integrations/00-azure-disk.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: 'Azure-Disk' -metaTitle: 'Azure Disk Integration with Spectro Cloud' -metaDescription: 'Azure Disk storage add on into Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-azure/blobs/sha256:0787b7943741181181823079533cd363884a28aa0651715ea43408bdc77a5c51?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - - - -# Azure Disk - -Azure Disk storage is designed to be used with Azure virtual machines for the tenant workloads. It offers high-performance, durable block storage with sub-millisecond latency and throughput for transaction-intensive workloads. - - -## Versions Supported - - - - - -* **1.25.0** -* **1.20.0** - - - - -* **1.0.0** - - - - -## Notable Parameters - -| Name | Supported Values | Default Value | Description | -| --- | --- | --- | --- | -| storageaccounttype | Standard_LRS, Premium_LRS | Standard_LRS | The storage account type to use | -| kind | managed, shared, dedicated | managed | The disk kind | -| reclaimPolicy | Delete, Retain | Delete | Defines whether volumes will be retained or deleted | -| allowVolumeExpansion | true, false | true | Flag to allow resizing volume | -| isDefaultClass | true, false | true | Flag to denote if this StorageClass will be the default | -| volumeBindingMode | WaitForFirstConsumer, Immediate | WaitForFirstConsumer | Controls when volumeBinding and dynamic provisioning should happen | - -**References:** - -https://kubernetes.io/docs/concepts/storage/storage-classes/#azure-disk-storage-class - - - -# Further Info - -More info about Storage classes can be found in the following links: - -https://kubernetes.io/docs/concepts/storage/storage-classes/ - -# Troubleshooting - -Storage classes created by Spectro will be with the name "spectro-storage-class" and can be fetched from kubectl using the following CLI command: - -```bash -kubectl get storageclass -``` diff --git a/content/docs/06-integrations/00-byoos.md b/content/docs/06-integrations/00-byoos.md deleted file mode 100644 index cb40149a32..0000000000 --- a/content/docs/06-integrations/00-byoos.md +++ /dev/null @@ -1,290 +0,0 @@ ---- -title: "Bring Your Own OS (BYOOS)" -metaTitle: "Bring your own OS (BYOOS)" -metaDescription: "Bring Your Own OS (BYOOS) pack in Palette." -hiddenFromNav: true -type: "integration" -category: ['operating system', 'amd64'] -logoUrl: "https://registry.dev.spectrocloud.com/v1/spectro-proxy/blobs/sha256:b6081bca439eeb01a8d43b3cb6895df4c088f80af978856ddc0da568e5c09365?type=image/png" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Bring Your Own Operating System (BYOOS) - -The [Bring Your Own Operating System (BYOOS)](/cluster-profiles/byoos) enables you to use a custom Operating System (OS) with Palette. Palette comes with several operating systems out-of-the-box, but the existing OS list may not meet all users' needs. - -Using your custom OS provides several benefits, including the ability to control your own dependencies, improve performance, and ensure compatibility with your existing applications. With BYOOS, you can choose the OS that best fits your needs, whether it's a commercial or open-source distribution, and integrate it with your Kubernetes clusters. The BYOOS pack can be used with both Edge and non-Edge environments. - -# Versions Supported - -**1.0.x** - -
- - - - - -## Prerequisites - - -
- -- The Edge Provider images you have created and uploaded to a container registry. Refer to the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide for steps on how to create the Edge artifacts and how to upload your custom OS to a registry. - - -- Palette 3.3.0 or greater. - -## Parameters - -The BYOS Edge OS pack supports the following parameters. - -### Parameters - -| Parameter | Description | Type | Default Value | -|----------------------|--------------------------------------------------------|------| ------------- | -| `pack:content` | Specifies the content of the **BYOS Edge OS** pack. | map | N/A | -| `pack.content.images` | Specifies a list of OS images to use with the pack. | list | N/A | -| `pack.content.images.image` | An OS image to use with the pack. | string| `'{{.spectro.pack.edge-native-byoi.options.system.uri}}'`| -| `pack.drain:` | Specifies the drain configuration for the node. | map | N/A -| `pack.drain.cordon` | Specifies whether to cordon the node. | boolean | `false` | -| `pack.drain.timeout` | The time in seconds to attempt draining the node before aborting the operation. A zero value indicates no timeout window and to continue waiting indefinitely. | integer | `60` | -| `pack.drain.gracePeriod` | The time in seconds each pod has to terminate gracefully. If negative, the default value specified in the pod will be used. | integer | `60` | -| `pack.drain.ignoreDaemonSets` | Specifies whether to ignore DaemonSets. | boolean | `false` | -| `pack.drain.deleteLocalData` | Specifies whether to continue if there are pods using the emptyDir volume. If enabled local data will be deleted during a drainage operation. | boolean | `false` | -| `pack.drain.force` | Specifies whether to continue if there are pods that do not declare a controller. | boolean | `false` | -| `pack.drain.disableEviction` | Specifies whether to allow a force drain and use delete, including if pod eviction is supported. This option will bypass checking if [*PodDisruptionBudgets*](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#pod-disruption-budgets) are allocated. Use this option with caution. | boolean | `false` | -| `pack.drain.skipWaitForDeleteTimeout` | Specifies whether to skip waiting for the pod to terminate if the pod *DeletionTimestamp* is older than the specified number of seconds. The number of seconds must be greater than zero to skip. | integer | `60` | -| `system.uri` | The system URI specifies the location of the BYOOS image. | string| - - -
- - - ```yaml hideClipboard - pack: - content: - images: - - image: '{{.spectro.pack.edge-native-byoi.options.system.uri}}' - # Below config is default value, please uncomment if you want to modify default values - #drain: - #cordon: true - #timeout: 60 # The length of time to wait before giving up, zero means infinite - #gracePeriod: 60 # Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used - #ignoreDaemonSets: true - #deleteLocalData: true # Continue even if there are pods using emptyDir (local data that will be deleted when the node is drained) - #force: true # Continue even if there are pods that do not declare a controller - #disableEviction: false # Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution - #skipWaitForDeleteTimeout: 60 # If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip. - options: - system.uri: "" - system.uri: example.io/my-images/example-custom-os:v1.4.5 - ``` - -## Usage - -BYOOS enables you to use a custom OS for your Edge host. You can use this feature to customize the desired specifications of your OS layer in the Edge host. You can reference the custom OS through the BYOOS pack. - - -To use a custom OS, you must include all the Edge artifacts and provider images required by the Edge Installer in the custom OS. Refer to the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide for steps on how to create a custom OS that includes all the required components for the Edge Installer. - - -Select the BYOOS pack and fill out the required parameters during the cluster profile creation process. The `system.uri` parameter specifies the location of the BYOOS image. Refer to the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide to learn how to create Edge Artifacts. - -![A view of the Kubernetes pack editor with a YAML configuration](/clusters_site-deployment_model-profile_byoos-pack-yaml.png) - -
- -### Node Drainage - -The BYOOS pack supports node drainage. Node drainage is the process of cordoning and removing workloads from a node. Cordoning a node prevents new pods from being scheduled on the node. Draining a node gracefully terminates all pods on the node and reschedules them on other healthy nodes. - -To enable node drainage, you must include the `pack.drain` parameter block and set the `pack.drain.cordon` parameter to `true`. - -
- - ```yaml - pack: - drain: - cordon: true - ``` - -You can customize the node drainage process by using the additional parameters in the `pack.drain` parameter block. Refer to the [parameters](#parameters) section for a list of the supported parameters. - -You can change the node drainage behavior after a cluster is deployed by updating the cluster profile and applying the changes to the cluster. - -
- - - -Changing the node drainage behavior after a cluster is deployed will trigger a rolling update on the cluster nodes. Each node will reboot so that the new node drainage behavior can be applied. - - - - - - -
- - - -## Prerequisites - -To use the non-Edge BYOOS pack, you must have the following: - -
- -- A custom OS that you created. Refer to the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide to learn how to create a custom OS for Palette. - -## Parameters - -The following is a list of parameters required when using the BYOOS pack. - -
- -| Parameter | Description | Type | -|----------------------|--------------------------------------------------------|---| -| `osImageOverride` | The image ID used as the base OS layer. This is the image ID as assigned in the infrastructure environment the image belongs to. Example: `ami-0f4804aff4cf9c5a2` | string| -| `osName` | The name of the OS distribution. Example: `rhel` | string | -| `osVersion` | The version of the OS distribution. Example: `"8"` | string | - -## Usage - -Use the BYOOS pack when selecting the OS layer during the cluster profile creation. Use the following information to find the BYOOS pack. - -* Pack Type: OS -* Registry: Public Repo -* Pack Name: Bring Your Own OS (BYO-OS) -* Pack Version: 1.0.x or higher - - - - -Check out the [Create Cluster Profile](/cluster-profiles/task-define-profile/) guide to learn how to create a cluster profile. - - - -
- - -Fill out the required parameters with information about your custom OS, such as the ID, OS distribution, and version. - -
- -```yaml -pack: - osImageOverride: "ami-0f4804aff4cf9c5a2" - osName: "rhel" - osVersion: "8" -``` - - - -
- - - ![View of the cluster profile wizard](/clusters_byoos_image-builder_cluster-profile-byoos-yaml.png) - - - - - -Check out the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide to learn to create a custom image for Palette. - - ---- - -
- -Image creation tools are available to help you create custom OS images for the infrastructure provider you are using. The following is a list of commonly used tools for creating a custom OS: - -
- -* [AWS EC2 Image Builder](https://aws.amazon.com/image-builder/). - - -* [Azure VM Image Builder](https://learn.microsoft.com/en-us/azure/virtual-machines/image-builder-overview?tabs=azure-powershell). - - -* [HashiCorp Packer](https://developer.hashicorp.com/packer). - - -* [Kubernetes Image Builder (KIB)](https://image-builder.sigs.k8s.io/introduction.html). - - -
- -
- - -# Terraform - - - - -You can retrieve details about the BYOOS Edge OS agent pack using the following Terraform code. - -
- - -```terraform -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "byoos" { - name = "edge-native-byoi" - version = "1.0.0" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -``` - -
- - - - -You can retrieve details about the BYOOS pack by using the following Terraform code. - -
- -```terraform -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "byoos" { - name = "generic-byoi" - version = "1.0.0" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -``` - -
- -
- -# References - -- [Create a Custom Cluster Profile with BYOOS](/clusters/edge/site-deployment/model-profile) - - -- [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) - - -- [Model Edge Native Cluster Profile](/clusters/edge/site-deployment/model-profile) - - -- [AWS EC2 Image Builder](https://aws.amazon.com/image-builder/) - - -- [Azure VM Image Builder](https://learn.microsoft.com/en-us/azure/virtual-machines/image-builder-overview?tabs=azure-powershell) - - -- [HashiCorp Packer](https://developer.hashicorp.com/packer) - - -- [Kubernetes Image Builder (KIB)](https://image-builder.sigs.k8s.io/introduction.html) \ No newline at end of file diff --git a/content/docs/06-integrations/00-calico.md b/content/docs/06-integrations/00-calico.md deleted file mode 100644 index a798f91833..0000000000 --- a/content/docs/06-integrations/00-calico.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: 'Calico' -metaTitle: 'Calico Networking in Spectro Cloud' -metaDescription: 'Choosing Calico for Kubernetes Networking within the Spectro Cloud console' -hiddenFromNav: true -type: "integration" -category: ['network', 'amd64', 'fips'] -logoUrl: 'https://registry.spectrocloud.com/v1/cni-calico/blobs/sha256:9a08103ccd797857a81b6ce55fa4f84a48bcb2bddfc7a4ff27878819c87e1e30?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Calico - -Palette Network Pack(s) helps provision resources for setting up Cluster networking in Kubernetes. Design goals for the Kubernetes network model can be found [here](https://kubernetes.io/docs/concepts/cluster-administration/networking/#the-kubernetes-network-model). - -[Project Calico](http://docs.projectcalico.org/) is an open-source container networking provider and network policy engine. - -Calico provides highly scalable networking and network policy solution for connecting Kubernetes pods based on the same IP networking principles as the internet, for both Linux (open source) and Windows (proprietary - available from [Tigera](https://www.tigera.io/essentials/)). Calico can be deployed without encapsulation or overlays to provide high-performance, high-scale data center networking. Calico also provides a fine-grained, intent-based network security policy for Kubernetes pods via its distributed firewall. - -Calico manifest used for networking does the following: - -* Installs the `calico/node` container on each host using a DaemonSet. -* Installs the Calico CNI binaries and network config on each host using a DaemonSet. -* Runs `calico/kube-controllers` as a deployment. -* The `calico-etcd-secrets` secret, which optionally allows for providing etcd TLS assets. -* The `calico-config` ConfigMap, which contains parameters for configuring the install. - - -Limitations: -AWS, VMWare supports IP-in-IP encapsulation type. -Azure supports VXLAN encapsulation type. - - -# Versions Supported - - - - - -* **3.24.5** -* **3.24.1** -* **3.24.0** - - - - - -* ** 3.23.0** - - - - -* ** 3.22.0** - - - - - -* **3.19.0** - - - - - -* **3.16.0** - - - - - -* **3.10.2** - - - - - -* **3.9.4** - - - - - -# Notable Parameters - -| Name | Supported Values | Default value | Description | -| --- | --- | --- | --- | -| calico.encapsulationType | `CALICO_IPV4POOL_IPIP`, `CALICO_IPV4POOL_VXLAN` | `CALICO_IPV4POOL_IPIP` - AWS, VMware clouds | The encapsulation type to be used for networking (depends on the cloud) | -| | | `CALICO_IPV4POOL_VXLAN` - Azure cloud | | -| calico.encapsulationMode | `Always, CrossSubnet, Never` | Always | The mode to use the IPv4 POOL created at start up | -| calico.calicoNetworkCIDR | CIDR range | `192.168.0.0/16` | CIDR range to be assigned for Pods. This range should match the `podCIDR` range specified in the Kubernetes layer | - -# Troubleshooting - -* A daemon set is installed and so a calico-node pod should run on all the nodes in the cluster to provide networking. -* For any issues with networking, check calico-node and calico-kube-controller pods on the cluster. diff --git a/content/docs/06-integrations/00-centos.md b/content/docs/06-integrations/00-centos.md deleted file mode 100644 index 5484e9c89f..0000000000 --- a/content/docs/06-integrations/00-centos.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 'CentOS' -metaTitle: 'CentOS Operating System' -metaDescription: 'Choosing CentOS as an Operating System within the Spectro Cloud Console' -hiddenFromNav: true -type: "integration" -category: ['operating system', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/centos-vsphere/blobs/sha256:fe51960e2a05745b7b9217e244e47fac401edcdb184b500d75cc537cecb81ef1?type=image/png' ---- - -import WarningBox from 'shared/components/WarningBox'; - - - -# CentOS - -CentOS Linux distribution is a stable, predictable, manageable and reproducible platform derived from the sources of Red Hat Enterprise Linux (RHEL). It provides a rich base platform for open source communities to build upon. Spectro Cloud provides CentOS as a development framework for its users. - -## Version Supported - -**CentOS 7.7** - - -## References - -https://wiki.centos.org/ - -https://www.centos.org/about/ diff --git a/content/docs/06-integrations/00-certmanager.md b/content/docs/06-integrations/00-certmanager.md deleted file mode 100644 index 7fb8a82f19..0000000000 --- a/content/docs/06-integrations/00-certmanager.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: 'cert-manager' -metaTitle: 'cert-manager' -metaDescription: 'cert-manager Security pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['security', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/certmanager/blobs/sha256:7882e13d7056781a0195ec15e3b9fa5d4b4bb7f8b4e2c32cc5e254e2295c6a16?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# cert-manager - -cert-manager adds certificates and certificate issuers as resource types in Kubernetes clusters, and simplifies the process of obtaining, renewing and using those certificates. It can issue certificates from a variety of supported sources, including Let’s Encrypt, HashiCorp Vault, and Venafi as well as private PKI. It also takes care of the certificate validity and attempts to renew certificates before expiry. - -## Versions Supported - - - - - -**1.9.1** - - - - - - -**1.8.1** - - - - - -**1.7.1** - - - - - -**1.4.0** - - - - - -**1.1.0** - - - - - - -## References - -https://cert-manager.io/docs/ diff --git a/content/docs/06-integrations/00-cilium-tetragon.md b/content/docs/06-integrations/00-cilium-tetragon.md deleted file mode 100644 index 9c602deab4..0000000000 --- a/content/docs/06-integrations/00-cilium-tetragon.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: 'Cilium-Tetragon' -metaTitle: 'cilium-Tetragon' -metaDescription: 'Cilium Tetragon monitoring pack for Spectro Cloud Palette' -hiddenFromNav: true -type: "integration" -category: ['monitoring', 'amd64'] -logoUrl: 'https://soak.stage.spectrocloud.com/assets/monitoring_layer.3b14cf5b.svg' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Cilium Tetragon - -[Tetragon](https://github.com/cilium/tetragon) is an eBPF based security observability and runtime enforcement. eBPF is used to safely and efficiently extend the kernel's capabilities without requiring changing the kernel source code or loading kernel modules. Tetragon is a Cilium community open-source project that enables profound visibility with filtering and aggregation with the eBPF collector support to deliver visibility at depth with minimal overhead. - -Palette supports Cilium Tetragon as an add-on pack for monitoring services. Refer to the [create cluster profile](/cluster-profiles/task-define-profile#overview) page for more information on how to use an add-on pack. - -
- -## Versions Supported - - - - - -**0.8.** - - - - - - - -## References - -https://github.com/cilium/tetragon diff --git a/content/docs/06-integrations/00-cilium.md b/content/docs/06-integrations/00-cilium.md deleted file mode 100644 index 4575eb7a3a..0000000000 --- a/content/docs/06-integrations/00-cilium.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: 'Cilium' -metaTitle: 'cilium' -metaDescription: 'Cilium network pack for Spectro Cloud Palette' -hiddenFromNav: true -type: "integration" -category: ['network', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/cni-cilium/blobs/sha256:dbc239ac739ea2939ef41dd0743b82281bc82c360326cd7c536f73f0053e2cd2?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Cilium - -Palette Network Pack(s) helps provision resources for setting up Cluster networking in Kubernetes. For more Kubernetes network model design goals visit [here](https://kubernetes.io/docs/concepts/cluster-administration/networking/#the-kubernetes-network-model). - -Palette supports **Cilium**, an open-source software for securing and observing network connectivity between cloud-native container workloads. Cilium is underpinned by a Linux Kernel technology called eBPF, to enable dynamic and strong security visibility and control logic within Linux. As eBPF runs within the Linux Kernel, Cilium security policies are applied and updated independent of the application code or container configuration. - -The Cilium agent runs on all clusters and servers to provide networking, security and observability to the workload running on that node. - -## Prerequisite - -* If the user is going for the BYO (Bring your own) Operating system use case then, HWE (Hardware Enabled) Kernel or a Kernel that supports [eBPF](https://ebpf.io/) modules needs to be provisioned. - -**Palette OS images are by default provisioned with the above pre-requisite.** - -## Versions Supported - - - - - -**1.10.9** - - - - - - -## References - -https://docs.cilium.io/en/stable/ diff --git a/content/docs/06-integrations/00-citrix-ipam.md b/content/docs/06-integrations/00-citrix-ipam.md deleted file mode 100644 index 64c06403af..0000000000 --- a/content/docs/06-integrations/00-citrix-ipam.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: 'Citrix IPAM' -metaTitle: 'Citrix IPAM' -metaDescription: 'Citrix IPAM Load Balancer pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['load balancers', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/lb-citrix-adc/blobs/sha256:17f8ebc0dc69d329a39e5d27fc0ce3574034d18ab1776fabda396c5403b0bd86?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Citrix IPAM and Ingress controller - -The integration helps with IP address management and provides load balancing capabilities for external services deployed on Kubernetes, especially for on-premise deployments. - -## Versions Supported - - - - -* **1.7.6** - - - - -## Components - -Integration deploys the following components: - -* IPAM controller. -* Ingress controller. - -## Notable parameters - -| Name | Default Value | Description | -| --- | --- | --- | -| vip.addresses | | The IP address range to be used for external Services | -| vip.namespace | citrix-system | The namespace for IPAM controller | -| citrix-k8s-ingress-controller.namespace | citrix-system | The namespace for Citrix Ingress controller | -| citrix-k8s-ingress-controller.clusterPrefix | | The prefix for resources to load balance applications from multiple clusters | -| citrix-k8s-ingress-controller.nsip | | The IP address of the Citrix ADC | -| citrix-k8s-ingress-controller.username | | Username to connect to Citrix IDC | -| citrix-k8s-ingress-controller.password | | Password to connect to Citrix IDC | - -## References - -[Citrix IPAM Controller](https://developer-docs.citrix.com/projects/citrix-k8s-ingress-controller/en/latest/crds/vip/) - -[Citrix Ingress controller](https://developer-docs.citrix.com/projects/citrix-k8s-ingress-controller/en/latest/network/type_loadbalancer/#expose-services-of-type-loadbalancer-using-an-ip-address-from-the-citrix-ipam-controller) diff --git a/content/docs/06-integrations/00-cloudanix.md b/content/docs/06-integrations/00-cloudanix.md deleted file mode 100644 index 02cf3b2bc7..0000000000 --- a/content/docs/06-integrations/00-cloudanix.md +++ /dev/null @@ -1,195 +0,0 @@ ---- -title: 'Cloudanix' -metaTitle: 'cloudanix' -metaDescription: 'The Cloudanix security pack provides a dashboard that displays threats and unusual behavior in Kubernetes containers in Palette' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['security', 'amd64'] -logoUrl: 'https://cloudanix-assets.s3.amazonaws.com/static/cloudanix-logo-p.png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Cloudanix - -The Cloudanix pack is an add-on security pack that provides a dashboard to help you detect threats and unusual behavior in your Kubernetes clusters. Cloudanix detects the following. - -
- -- Files added or modified in sensitive directories - - -- SSH into a container - - -- Modifications to shell configuration files - - -- Attempts to read sensitive files that contain credential information - - -- Crypto mining - -The Cloudanix dashboard provides an interactive interface that displays the mapping between threat events and associated container, pod, and node workloads. Additionally, Cloudanix identifies the user who initiated an activity identified as a threat and the command that was used. - -You can also start Jira workflows and target specific workloads from the Cloudanix dashboard. - -# Versions Supported - - - - - -## Prerequisites - -- CPUs: 0.5 -- Memory: 256 MiB -- Kubernetes 1.19.x to 1.25.x -- Kernel version 4.5 and higher - -## Parameters - -The Cloudanix pack has the following parameters, which are auto-filled based on Palette user information. - -| Name | Description | -| --- | --- | -| ``userEmail`` | The email address of the user who created the cluster and cluster profile. | -| ``partnerIdentifier`` | A Cloudanix unique identifier for Spectro Cloud. | -| ``organizationId`` | The organization tenant ID in Palette. | -| ``userName`` | Palette user name. | -| ``accountName`` | Palette cloud account name. | -| ``accountType`` | Cloud account type such as AWS or GCP, Azure, or others. | -| ``accountId`` | The user's cloud account ID. | -| ``clusterName`` | The name of the cluster. | -| ``clusterIdentifier`` | The cluster's unique identifier. | -| ``clusterDomain`` | The Palette cloud account type such as AWS, GCP, Azure, or others. | - -## Usage - -This Helm Chart installs four Cloudanix services to enable container security capabilities: - -
- -- **config-cron**: A job that runs periodically in a Kubernetes cluster to maintain the configuration of Cloudanix inventory and threat services. -- **misconfig-cron**: A job that captures Kubernetes misconfigurations and displays them on the Cloudanix dashboard. -- **inventory-service**: An inventory service that detects any new Kubernetes resources and displays them on the Cloudanix dashboard. -- **threat-service**: A threat service that exports threat events and affected Kubernetes resources, which are visible on the Cloudanix dashboard. - - - -From the **Workloads** page, click the **Risks** tab to view a list of failed threat rules. You can exclude resources, such as pods and containers, from the risk findings. - -
- -### Kubernetes 1.25 and higher - -When you use the Cloudanix 1.0.x pack with Kubernetes 1.25 and higher, you need to add the **Spectro Namespace Labeler** add-on pack to your cluster profile. After you create the cluster profile, you then apply it to your cluster. - -Use the following information to find the **Spectro Namespace Labeler** add-on pack. - -- **Pack Type**: System App -- **Registry**: Public Repo -- **Pack Name**: Spectro Namespace Labeler -- **Pack Version**: 1.0.x or higher - - -Below is the YAML file for the **Spectro Namespace Labeler** add-on pack. No action is required. -
- - ```yaml - pack: - namespace: cluster-{{ .spectro.system.cluster.uid }} - - charts: - spectro-namespace-labeler: - namespace: cluster-{{ .spectro.system.cluster.uid }} - - labels: - cloudanix: pod-security.kubernetes.io/enforce=privileged,pod-security.kubernetes.io/enforce-version=v1.26 - ``` - -As a final step, apply the cluster profile to your cluster. - -
- - - -## Prerequisites - -- CPUs: 0.5 -- Memory: 256 MiB -- Kubernetes 1.19.x to 1.24.x -- Kernel version 4.5 and higher - -## Parameters - -The Cloudanix pack has the following parameters, which are auto-filled based on Palette user information. - -| Name | Description | -| --- | --- | -| ``userEmail`` | The email address of the user who created the cluster and cluster profile. | -| ``partnerIdentifier`` | A Cloudanix unique identifier for Spectro Cloud. | -| ``organizationId`` | The organization tenant ID in Palette. | -| ``userName`` | Palette user name. | -| ``accountName`` | Palette cloud account name. | -| ``accountType`` | Cloud account type such as AWS or GCP, Azure, or others. | -| ``accountId`` | The user's cloud account ID. | -| ``clusterName`` | The name of the cluster. | -| ``clusterIdentifier`` | The cluster's unique identifier. | -| ``clusterDomain`` | The Palette cloud account type such as AWS, GCP, Azure, or others. | - -## Usage - -This Helm Chart installs four Cloudanix services to enable container security capabilities: - -
- -- **config-cron**: A job that runs periodically in a Kubernetes cluster to maintain the configuration of Cloudanix inventory and threat services. -- **misconfig-cron**: A job that captures Kubernetes misconfigurations and displays them on the Cloudanix dashboard. -- **inventory-service**: An inventory service that detects any new Kubernetes resources and displays them on the Cloudanix dashboard. -- **threat-service**: A threat service that exports threat events and affected Kubernetes resources which are visible on the Cloudanix dashboard. - -From the **Workloads** page, click the **Risks** tab to view a list of failed threat rules. You can exclude resources, such as pods and containers, from the risk findings. - -
- -
- -# Terraform - -``` hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "cloudanix" { - name = "cloudanix" - version = "0.0.6" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# References - -[Cloudanix](https://docs.cloudanix.com/introduction) - -
- -
- -
- - -
-
- - - - - diff --git a/content/docs/06-integrations/00-collectord.md b/content/docs/06-integrations/00-collectord.md deleted file mode 100644 index 7c3c94819b..0000000000 --- a/content/docs/06-integrations/00-collectord.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: 'Outcold Solutions' -metaTitle: 'Outcold Solutions' -metaDescription: 'Outcold Solutions - Monitoring pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['monitoring', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/outcold-monitoring/blobs/sha256:3140960d1f39649ad821cfc59450d3c164079b03d15387b2e638eae07442af41?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Outcold Solutions - Monitoring Kubernetes - -Integration provides Kubernetes monitoring solution that includes log aggregation, performance and system metrics, metrics from the control plane and application metrics, a dashboard for reviewing network activity, and alerts to notify you about cluster or application performance issues. - -## Versions Supported - - - - -* **5.0.0** - - - - -## Prerequisites - -This integration forwards logs and metrics to [Splunk](https://www.splunk.com/). Pre-requisites for Splunk are -1. [Install Kubernetes Monitoring application](https://www.outcoldsolutions.com/docs/monitoring-kubernetes/v5/installation/#install-monitoring-kubernetes-application) -2. [Enable HTTP Event Collector (HEC) in Splunk](https://www.outcoldsolutions.com/docs/monitoring-kubernetes/v5/installation/#enable-http-event-collector-in-splunk) -3. Make sure to configure the forwarder settings below while setting up the pack - -```YAML -[general] - -acceptEULA = false - -license = - -fields.kubernetes_cluster = - - -... - -# Splunk output -[output.splunk] - -# Splunk HTTP Event Collector url -url = - -# Splunk HTTP Event Collector Token -token = - -# Allow invalid SSL server certificate -insecure = false - -# Path to CA certificate -caPath = - -# CA Name to verify -caName = - -``` -## Components - -The following workloads gets deployed on collectorforkubernetes namespace, by default -* Collectorforkubernetes - Daemonset -* Collectorforkubernetes Master - Daemonset -* Collectorforkubernetes Addon - Deployment - -## References - -* https://www.outcoldsolutions.com/docs/monitoring-kubernetes/v5/ -* https://www.outcoldsolutions.com/docs/monitoring-kubernetes/v5/installation/ diff --git a/content/docs/06-integrations/00-dex.md b/content/docs/06-integrations/00-dex.md deleted file mode 100644 index f8d7e65360..0000000000 --- a/content/docs/06-integrations/00-dex.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Dex' -metaTitle: 'Dex' -metaDescription: 'Dex Authentication pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['authentication', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/dex/blobs/sha256:78e381fe12509ed94c7c19cd6f6fc4e896ec66485364644dc1a40229fcf9d90d?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Dex - -Dex is an identity service to drive authentication for Kubernetes API Server through the [OpenID Connect](https://openid.net/connect/) plugin. Clients such as kubectl can act on behalf of users who can log in to the cluster through any identity provider that dex supports. - - -## Versions Supported - - - - - -* **2.35.1** - - - - - -* **2.30.0** - - - - - -* **2.28.0** - - - - - - * **2.25.0** - - - - - - * **2.21.0** - - - - -## Components - -Dex integration in Spectro Cloud will deploy the following components: - -* Dex. -* Dex Client (dex-k8s-authenticator). - -The integration will create self-signed certificates, will cross-configure Dex, Dex Client components & will set appropriate flags on the Kubernetes API Server. - -# Ingress - -Follow below steps to configure Ingress on Dex - -1. Change Dex serviceType from "LoadBalancer" to "ClusterIP" (line #112) -2. Ingress (line #118) - * Enable Ingress; Change enabled from false to "true" - * Set Ingress rules like annotations, path, hosts, etc. - -Follow below steps to configure Ingress on Dex Client - -1. Change dex-k8s-authenticator serviceType from "LoadBalancer" to "ClusterIP" (line #312) -2. Ingress (line #320) - * Enable Ingress; Change enabled from false to "true" - * Set Ingress rules like annotations, path, hosts, etc. - -With these config changes, you can access Dex, Dex Client service(s) on the Ingress Controller LoadBalancer hostname / IP - - -## References - -- [Dex](https://github.com/dexidp/dex) -- [Dex Documentation](https://dexidp.io/docs/) -- [Dex K8s Authenticator](https://github.com/mintel/dex-k8s-authenticator) diff --git a/content/docs/06-integrations/00-external-dns.md b/content/docs/06-integrations/00-external-dns.md deleted file mode 100644 index 56c27ab21c..0000000000 --- a/content/docs/06-integrations/00-external-dns.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: 'ExternalDNS' -metaTitle: 'ExternalDNS' -metaDescription: 'ExternalDNS pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['load balancers', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/external-dns/blobs/sha256:1bfd6dceb0b50efee4068cd6321511f6b24be86e2d613e0a8206e716ba7aea3f?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# ExternalDNS - -The integration helps configure public DNS servers with information about Kubernetes services to make them discoverable. - -# Prerequisites - -Providers have to be set up for this pack to get deployed and work seamlessly. For a list of supported providers and the prerequisites to be set up, visit [providers](https://github.com/kubernetes-sigs/external-dns#status-of-providers) section - -## Versions Supported - - - - -* **0.13.1** -* **0.12.2** - - - - -* **0.7.2** - - - - - -## Components - -Integration deploys the following components: -* External DNS - -## References - -* https://github.com/kubernetes-sigs/external-dns -* https://github.com/bitnami/charts/tree/master/bitnami/external-dns - -## ExternalDNS for Services on AWS Route53 Example - -### Setup prerequisites for AWS Route53 - -* Create the following IAM policy in the AWS account. This is needed for externalDNS to list and create Route53 resources. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "route53:ChangeResourceRecordSets" - ], - "Resource": [ - "arn:aws:route53:::hostedzone/*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "route53:ListHostedZones", - "route53:ListResourceRecordSets" - ], - "Resource": [ - "*" - ] - } - ] -} -``` -* Create an IAM role and associate the policy created above. Make a note of the role ARN which will be used in ExternalDNS deployment later -* Setup hosted zone in AWS Route53 - ```bash - # Create a DNS zone through AWS CLI - aws route53 create-hosted-zone --name "external-dns-test.my-org.com." --caller-reference "external-dns-test-$(date +%s)" - ``` - -### Deploy ExternalDNS on the cluster - -* Add ExternalDNS pack to the desired profile and deploy it to the cluster. - You may want to configure the following in pack values.yaml - * Configure AWS provider details (line #86) - * Credentials, Zone Type - * AssumeRoleArn with the Role ARN created above - - * Configure txtOwnerId with the ID of the hosted zone created above (line #366) - ```bash - aws route53 list-hosted-zones-by-name --output json --dns-name "external-dns-test.my-org.com." | jq -r '.HostedZones[0].Id' - ``` - * Optionally change externalDNS policy and logLevel - -### Deploy Ingress Controller on the cluster - -* Deploy one of the Ingress Controller on the cluster - -### Deploy Applications with Ingress on the cluster - -* Add Prometheus-Operator addon to the same profile where ExternalDNS is added - * Change serviceType to ClusterIP (line #408) - * Enable Ingress for the add-on packs. In this example, let us use Prometheus-Operator integration. - Ingress config for Grafana will look like the following: - ```yaml - #Ingress config - ingress: - ## If true, Grafana Ingress will be created - ## - enabled: true - - hosts: - - grafana.external-dns-test.my-org.com - - ## Path for grafana ingress - path: / - ``` - When Prometheus-Operator gets deployed in the Cluster, Ingress resource for Grafana will also get created and will look like - ```yaml - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - name: grafana-ingress - namespace: monitoring - spec: - rules: - - host: grafana.external-dns-test.my-org.com - http: - paths: - - backend: - serviceName: grafana - servicePort: 80 - path: / - status: - loadBalancer: - ingress: - - hostname: a9a2eadb64c8e4c2fb37a1f69afb0a30-330939473.us-west-2.elb.amazonaws.com - ``` - -### Verify ExternalDNS (Ingress example) - - * If all goes well, after 2 minutes, ExternalDNS would have inserted 2 records on your hosted zone - ```bash - aws route53 list-resource-record-sets --output json --hosted-zone-id "/hostedzone/ZEWFWZ4R16P7IB" \ - --query "ResourceRecordSets[?Name == 'grafana.external-dns-test.my-org.com.']|[?Type == 'A']" - ``` - * After which, if you access http://grafana.external-dns-test.my-org.com on your browser, you should be able to see Grafana login page - -### Troubleshooting - -* Make sure Ingress resource gets created for the Applications deployed and a LoadBalancer hostname / IP address is set on the Ingress resource -* Check the `external-dns` pod for any issues with ExternalDNS not inserting records. If required, change `logLevel` to debug to see additional info on the logs diff --git a/content/docs/06-integrations/00-external-secrets-operator.md b/content/docs/06-integrations/00-external-secrets-operator.md deleted file mode 100644 index 2ce7e4c3a2..0000000000 --- a/content/docs/06-integrations/00-external-secrets-operator.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: 'External Secrets Operator' -metaTitle: 'external-secrets-operator' -metaDescription: 'external-secrets-operator pack in Palette' -hiddenFromNav: true -type: "integration" -category: ['authentication', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/external-secrets-operator/blobs/sha256:ee6f7f347d381852582f688c70b2564b0a346c2b2ed1221310889075a4453c6d?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# External Secrets Operator - -External Secrets Operator (ESO) is a Kubernetes operator that integrates external secret management -systems like AWS Secrets Manager, HashiCorp Vault, Google Secrets Manager, or Azure Key Vault. The operator reads information from external APIs and automatically injects the values into a Kubernetes Secret. - -You can use the External-Secrets-Operator Add-on pack as an authenticator in Palette. - - - -Starting from Palette version 3.1, Palette no longer supports upgrades to Kubernetes External Secrets since this is reaching end of life. Migrate or switch to using External Secrets operator instead. - - - - -# Versions Supported - - - - - -* **0.7.1** -* **0.6.0** - - - - - -* **0.5.6** - - - - - -### Sample SecretStore YAML file - -
- -```yml -apiVersion: [external-secrets.io/v1beta1](http://external-secrets.io/v1beta1) -kind: ExternalSecret -metadata: - name: vault-example # Custom name -spec: - refreshInterval: "15s" - secretStoreRef: - name: vault-backend # Custom value - kind: SecretStore - target: - name: mysecretfoobar - data: - - secretKey: foobar - remoteRef: - key: secret/foo # custom value - property: my-value # custom value - -``` - -### Sample ExternalSecret YAML file - -
- -```yml -apiVersion: external-secrets.io/v1beta1 -kind: SecretStore -metadata: - name: custom-name -spec: - provider: - vault: - server: "http://12.34.567.133:0000" # custom server end point - path: "secret" # custom path - version: "v2" # custom version - auth: - # points to a secret that contains a vault token - # https://www.vaultproject.io/docs/auth/token - tokenSecretRef: - name: "vault-token1" # Custom name and key - key: "token1" ---- -apiVersion: v1 -kind: Secret -metadata: - name: vault-token1 -data: - token: cm9vdA== # "root" # custome value -``` - -# References - -[Amazon IAM-Policy-Examples-ASM-Secrets](https://docs.aws.amazon.com/mediaconnect/latest/ug/iam-policy-examples-asm-secrets.html) - -[External Secrets](https://github.com/external-secrets/external-secrets) diff --git a/content/docs/06-integrations/00-falco.md b/content/docs/06-integrations/00-falco.md deleted file mode 100644 index 57314223fe..0000000000 --- a/content/docs/06-integrations/00-falco.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: 'Falco' -metaTitle: 'Falco Integration with Spectro Cloud' -metaDescription: 'Integration of the Falco add on into Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['security', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/falco/blobs/sha256:4e37461d0a31959ca8af65128329750ca3417e883e7e4ba17ee085b01a383a27?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Falco - -Falco integration is a behavioral activity monitor designed to detect anomalous activity in your applications. You can use Falco to monitor the run-time security of your Kubernetes applications and internal components. - -## Versions Supported - - - - - -* **1.16.3** - - - - - -* **1.0.11** -* **1.0.10** - - - - - -* **1.13.1** - - - - - - - -## References - -https://github.com/falcosecurity/charts/tree/master/falco
-https://sysdig.com/blog/falco-helm-chart/ diff --git a/content/docs/06-integrations/00-fluentbit.md b/content/docs/06-integrations/00-fluentbit.md deleted file mode 100644 index 67bed9cf88..0000000000 --- a/content/docs/06-integrations/00-fluentbit.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 'Fluentbit' -metaTitle: 'Fluentbit' -metaDescription: 'Fluentbit Monitoring pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['logging', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/fluentbit/blobs/sha256:012fbab20e3427b6c1f6a73d2ea0b4cc43cf60991774c4800ddf3e23c4b64544?type=image/png' ---- - -import WarningBox from 'shared/components/WarningBox'; - -# Fluentbit - -Fluent-Bit is a multi-platform log forwarder. The default integration will help forward logs from the Kubernetes cluster to an external ElasticSearch cluster - -## Version - -* **1.9.6** - -## Contents - -Fluent-Bit is installed as a DaemonSet & so, an instance of fluent-bit will be running on all the nodes in the cluster. - -## References - -https://github.com/helm/charts/tree/master/stable/fluent-bit diff --git a/content/docs/06-integrations/00-frp.md b/content/docs/06-integrations/00-frp.md deleted file mode 100644 index edc45d8575..0000000000 --- a/content/docs/06-integrations/00-frp.md +++ /dev/null @@ -1,705 +0,0 @@ ---- -title: 'Spectro Proxy' -metaTitle: 'Spectro Cloud Fast Reverse Proxy' -metaDescription: 'Fast Reverse Proxy Authentication pack in Spectro Cloud-Spectro Proxy' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['authentication', 'amd64', 'fips'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/spectro-proxy/blobs/sha256:b6081bca439eeb01a8d43b3cb6895df4c088f80af978856ddc0da568e5c09365?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Spectro Proxy -Spectro Proxy is a pack that enables the use of a reverse proxy with a Kubernetes cluster. The reverse proxy allows you to connect to the cluster API of a Palette-managed Kubernetes cluster in private networks or clusters configured with private API endpoints. The reverse proxy managed by Spectro Cloud is also known as the forward reverse proxy (FRP). - -The reverse proxy has a server component and a client component. The reverse proxy server is publicly available and managed by Spectro Cloud. The client is deployed inside your Palette-managed Kubernetes cluster and connects to the reverse proxy server. When you add the Spectro Proxy pack to a cluster profile, a couple of things happen: - -
- -- The kubeconfig file is updated with the reverse proxy address instead of pointing directly to the cluster's API address. The following is an example of a kubeconfig file where the `server` attribute points to the reverse proxy. - -
- - ```yaml hideClipboard coloredLines=4-5 - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS...... - server: https://cluster-61a578b5259452b88941a1.proxy.spectrocloud.com:443 - name: example-server - contexts: - # The remainder configuration is omitted for brevity. - ``` - -- Any requests to the Kubernetes API server, such as kubectl commands, will be routed to the reverse proxy. The reverse proxy forwards the request to the intended client, which is the cluster's API server. The cluster's API server authenticates the request and replies with the proper response. - - -You can attach this pack to a [cluster profile](/cluster-profiles). The pack installs the Spectro Proxy client in the workload clusters and configures the cluster's API server to point to a managed proxy server. - -
- - - - -This pack can be combined with the [Kubernetes dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) pack to expose the Kubernetes dashboard. To learn more about exposing the Kubernetes dashboard, check out the [Enable Kubernetes Dashboard](/clusters/cluster-management/kubernetes-dashboard) guide. - -
- -
- -## Network Connectivity - - -The host cluster's network configuration defines who can access the host cluster from a network perspective. If a user is in the same network as the cluster, the user may be able to access the host cluster without needing a forward proxy. However, if the user is on a different network, the host cluster's network configuration may limit the user's ability to connect to the host cluster and may require the use of a forward proxy. - -From a network configuration perspective, a cluster can be in a private or a public network. Host clusters deployed in a network that does not allow inbound internet access are considered private. Whereas the clusters deployed in a network with both inbound and outbound access to the internet are considered public. The following are the three possible network connectivity scenarios: - -
- -* The cluster and the user are in the same private network. - - -* The cluster and the user are in different private networks. - - -* The cluster is in a public network. - -
- -![An overview of the three different connectivity scenarios](/integrations_frp_conection_overview.png) - -
- - -The following table summarizes the network connectivity requirements for each scenario and whether the Spectro Proxy is required. - -
- -| **Scenario** | **Description** | **Requires Spectro Proxy?** | -|----------|-------------|------------------------| -| Private cluster in the same network | The cluster is deployed with a private endpoint, and the user is also in the same network. | ❌ | -| Private cluster in a different network | The cluster is deployed with a private endpoint, and the user is in a different network. | ✅ | -| Public cluster in a different network | The cluster is deployed with a public endpoint, and the user is in a different network. | ❌ | - -
- -To learn more about how the Spectro Proxy interacts with clusters in a public or private network environment and when the Spectro Proxy is required, select the tab that matches your use case. - - -
- - - - - - - -Networks labeled as private do not allow inbound internet access. Inbound network requests to the network are allowed only if the connection originated from the internal network. If you are in a different network than the cluster, you can connect to the cluster's API server through the Spectro Proxy. The Spectro Proxy allows you to connect to the cluster's API server although you are not in the same network as the cluster. - - -
- - - -Users that are in a different network than the cluster require the Spectro Proxy server to connect to the cluster's API server. Otherwise, requests to the cluster's API server will fail due to a lack of network connectivity. - - - - -The Spectro Proxy client is installed by the Spectro Proxy pack. The client is deployed in the cluster and connects to the Spectro Proxy server. The Spectro Proxy server is a managed service that is publicly available and managed by Spectro Cloud. The Spectro Proxy server forwards the request to the cluster's API server. The cluster's API server authenticates the request and replies with the proper response. - -The kubeconfig files generated for the host cluster are updated with the Spectro Proxy server's address. When you or other users issue a kubectl command, the request is routed to the Spectro Proxy server. The following is an example of a kubeconfig file where the SSL certificate and server address attribute point to the Spectro Proxy. - - -The following diagram displays the network connection flow of a user attempting to connect to a cluster with private endpoints. The user is in a different network than the cluster. - -
- -1. The user issues a kubectl command to the cluster's API server. - - -2. The request is routed to the Spectro Proxy server. The Spectro Proxy client inside the host cluster has an established connection with the cluster's API server. - - -3. The Spectro Proxy server forwards the request to the cluster's API server located in a different network. The cluster's API server authenticates the request and replies with the proper response. - - -![Private cluster in a different network.](/integrations_frp_conection_private-different-network.png) - -Depending on what type of infrastructure provider you are deploying the host cluster in, you may have to specify the Spectro Proxy server's SSL certificate in the Kubernetes cluster's configuration. Refer to the [Usage](#usage) section below for more information. - - -
- - - - -Networks labeled as private do not allow inbound internet access. Inbound network requests to the network are allowed only if the connection originated from the internal network. If you are in the same network as the cluster, you can connect directly to the cluster's API server. The term "same network" means that from a network perspective, requests can reach the cluster's API server without having to traverse the internet. - - -
- - - -Users in the same network as the cluster do not require the Spectro Proxy server to connect to the cluster's API server. - - - -![Private cluster in the same network.](/integrations_frp_conection_private-same-network.png) - - - -
- - - - -Clusters deployed in a network with both inbound and outbound access to the internet are considered public. - -
- - - - Clusters deployed in a public network do not require the Spectro Proxy to connect to the cluster's API server. - - - -When a cluster has public endpoints, you can query the cluster's Kubernetes API server from any network with internet access. The following diagram displays the network connection flow of a user attempting to connect to a cluster with public endpoints. Any user with access to the internet can connect to the cluster's API server. - -![A public cluster connection path](/integrations_frp_conection_public_connection.png) - - - -
- - -
- ----- - - -
- -## FIPS Support - -Starting with version 1.3.0, the Spectro Proxy pack is compiled using FIPS-approved libraries and components. Previous versions of the Spectro Proxy pack are not FIPS compiled and are not considered FIPS compliant. - -# Versions Supported - - - - - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. - - -## Parameters - -The Spectro Proxy supports the following parameters. - -| Parameter | Description | Default | -|-------------------------|--------------------------------------------------------|---------------------------------------------| -| namespace | The Kubernetes namespace to install the Spectro Proxy. | `cluster-{{ .spectro.system.cluster.uid }}` | -| server | The Kubernetes server. | `{{ .spectro.system.reverseproxy.server }}` | -| clusterUid | The Kubernetes cluster identifier. | `{{ .spectro.system.cluster.uid }}` | -| subdomain | The Kubernetes cluster subdomain identifier. | `cluster-{{ .spectro.system.cluster.uid }}` | - - -The Kubernetes dashboard integration supports the following parameters. - -| Parameter | Description | Default | -|-----------------|---------------------------------------------|---------| -| enabled | Enable the dashboard. | `false` | -| useInsecurePort | Use unsecure port (HTTP) for communication. | `false` | - - -The VMware dashboard integration supports the following parameters. - - -| Parameter | Description | Default | -|-----------------|---------------------------------------------|---------| -| enabled | Enable the dashboard. | `false` | - - -## Usage - -To use this pack, you have to add it to your cluster profile. You can also add the Spectro Proxy pack when you create the cluster profile. Check out the [Create Cluster Profile](/cluster-profiles/task-define-profile) guide to learn more about cluster profile creation. - -Depending on the type of cluster, the usage guidance varies. Select the tab that corresponds to the kind of cluster you have. Use the following definitions to help you identify the type of cluster. - -
- - -- **Palette Deployed**: A brand new IaaS cluster that is deployed or will be deployed through Palette. An IaaS cluster is a Kubernetes cluster with a control plane that is not managed by a third party or cloud vendor but is completely managed by Palette. Google GKE and Tencent TKE fall into this category. Clusters in this category get an additional entry in the Kubernetes configuration that adds the reverse proxy certificate (CA) to the API server configuration. - -
- -- **Imported Cluster**: An imported cluster or a non-IaaS cluster with a control plane that a third party manages. Azure AKS and AWS EKS fall in this category, as both Palette and the cloud provider partially manage the clusters. Clusters that fall under this category get the default kubeconfig CA replaced with the CA from the proxy server. Additionally, the kubeconfig authentication method is changed to a bearer token. To support the bearer token method, a new service account is created in the cluster with a role binding that allows Kubernetes API requests to pass through the reverse proxy and connect with the cluster API server. - - - - - - - -
- - - - -Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. This will also result in Kubernetes control plane nodes getting repaved. - - - -Add the following extra certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `kubeadmconfig.apiServer` parameter section. - -
- -```yaml -certSANs: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` - - -The following is an example configuration of the Kubernetes Pack manifest getting updated with the certificate SAN value: - -![frp-cert-san-example](/docs_integrations_frp_cert-san-example.png) - - -For RKE2 and k3s edge-native clusters, add the following configuration to the Kubernetes pack under the `cluster.config` parameter section. -
- -```yaml -tls-san: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` - -![TLS-SAN configuration example](/docs_integrations_frp_tls-san-example.png) - -
- - -
- - -
- - - - -Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. - - - - -Add the Spectro Proxy pack to a cluster profile without making any configuration changes. Use the pack as is. - -
- - - -Set the parameter `k8sDashboardIntegration.enabled` to true if you intend to expose the Kubernetes dashboard. -Review the [Enable Kubernetes Dashboard](/integrations/spectro-k8s-dashboard) guide for more information. - - - -
- -
- - - - -
- - - - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. - - -## Parameters - -The Spectro Proxy supports the following parameters. - -| Parameter | Description | Default | -|-------------------------|--------------------------------------------------------|---------------------------------------------| -| namespace | The Kubernetes namespace to install the Spectro Proxy. | `cluster-{{ .spectro.system.cluster.uid }}` | -| server | The Kubernetes server. | `{{ .spectro.system.reverseproxy.server }}` | -| clusterUid | The Kubernetes cluster identifier. | `{{ .spectro.system.cluster.uid }}` | -| subdomain | The Kubernetes cluster subdomain identifier. | `cluster-{{ .spectro.system.cluster.uid }}` | - - -The Kubernetes dashboard integration supports the following parameters. - -| Parameter | Description | Default | -|-----------------|---------------------------------------------|---------| -| enabled | Enable the dashboard. | `false` | -| useInsecurePort | Use unsecure port (HTTP) for communication. | `false` | - - -## Usage - -To use this pack, you have to add it to your cluster profile. You can also add the Spectro Proxy pack when you create the cluster profile. Check out the [Create Cluster Profile](/cluster-profiles/task-define-profile) guide to learn more about cluster profile creation. - -Depending on the type of cluster, the usage guidance varies. Select the tab that corresponds to the kind of cluster you have. Use the following definitions to help you identify the type of cluster. - -
- - -- **Palette Deployed**: A brand new IaaS cluster that is deployed or will be deployed through Palette. An IaaS cluster is a Kubernetes cluster with a control plane that is not managed by a third party or cloud vendor but is completely managed by Palette. Google GKE and Tencent TKE fall into this category. Clusters in this category get an additional entry in the Kubernetes configuration that adds the reverse proxy certificate (CA) to the API server configuration. - -
- -- **Imported Cluster**: An imported cluster or a non-IaaS cluster with a control plane that a third party manages. Azure AKS and AWS EKS fall in this category, as both Palette and the cloud provider partially manage the clusters. Clusters that fall under this category get the default kubeconfig CA replaced with the CA from the proxy server. Additionally, the kubeconfig authentication method is changed to a bearer token. To support the bearer token method, a new service account is created in the cluster with a role binding that allows Kubernetes API requests to pass through the reverse proxy and connect with the cluster API server. - - - - - - - -
- - - - -Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. This will also result in Kubernetes control plane nodes getting repaved. - - - -Add the following extra certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `kubeadmconfig.apiServer` parameter section. - -
- -```yaml -certSANs: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` - -The following is an example configuration of the Kubernetes Pack manifest getting updated with the certificate SAN value: - -![frp-cert-san-example](/docs_integrations_frp_cert-san-example.png) - - -For RKE2 and k3s edge-native clusters, add the following configuration to the Kubernetes pack under the `cluster.config` parameter section. -
- -```yaml -tls-san: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` - -![TLS-SAN configuration example](/docs_integrations_frp_tls-san-example.png) - -
- - -
- - -
- - - - -Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. - - - - -Add the Spectro Proxy pack to a cluster profile without making any configuration changes. Use the pack as is. - -
- - - -Set the parameter `k8sDashboardIntegration.enabled` to true if you intend to expose the Kubernetes dashboard. -Review the [Enable Kubernetes Dashboard](/clusters/cluster-management/reverse-proxy-dashboard) guide for more information. - - - -
- -
- - - - -
- - - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. - - -## Parameters - -The Spectro Proxy supports the following parameters. - -| Parameter | Description | Default | -|-------------------------|--------------------------------------------------------|---------------------------------------------| -| namespace | The Kubernetes namespace to install the Spectro Proxy. | `cluster-{{ .spectro.system.cluster.uid }}` | -| server | The Kubernetes server. | `{{ .spectro.system.reverseproxy.server }}` | -| clusterUid | The Kubernetes cluster identifier. | `{{ .spectro.system.cluster.uid }}` | -| subdomain | The Kubernetes cluster subdomain identifier. | `cluster-{{ .spectro.system.cluster.uid }}` | - - -The Kubernetes dashboard integration supports the following parameters. - -| Parameter | Description | Default | -|-----------------|---------------------------------------------|---------| -| enabled | Enable the dashboard. | `false` | -| useInsecurePort | Use unsecure port (HTTP) for communication. | `false` | - -## Usage - -To use this pack, you have to add it to your cluster profile. You can also add the Spectro Proxy pack when you create the cluster profile. Check out the [Create Cluster Profile](/cluster-profiles/task-define-profile) guide to learn more about cluster profile creation. - -Depending on the type of cluster, the usage guidance varies. Select the tab that corresponds to the kind of cluster you have. Use the following definitions to help you identify the type of cluster. - -
- - -- **Palette Deployed**: A brand new IaaS cluster that is deployed or will be deployed through Palette. An IaaS cluster is a Kubernetes cluster with a control plane that is not managed by a third party or cloud vendor but is completely managed by Palette. Google GKE and Tencent TKE fall into this category. Clusters in this category get an additional entry in the Kubernetes configuration that adds the reverse proxy certificate (CA) to the API server configuration. - -
- -- **Imported Cluster**: An imported cluster or a non-IaaS cluster with a control plane that a third party manages. Azure AKS and AWS EKS fall in this category, as both Palette and the cloud provider partially manage the clusters. Clusters that fall under this category get the default kubeconfig CA replaced with the CA from the proxy server. Additionally, the kubeconfig authentication method is changed to a bearer token. To support the bearer token method, a new service account is created in the cluster with a role binding that allows Kubernetes API requests to pass through the reverse proxy and connect with the cluster API server. - - - - - - - -
- - - - -Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. This will also result in Kubernetes control plane nodes getting repaved. - - - -Add the following extra certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `kubeadmconfig.apiServer` parameter section. - -
- -```yaml -certSANs: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` - -The following is an example configuration of the Kubernetes Pack manifest getting updated with the certificate SAN value: - -![frp-cert-san-example](/docs_integrations_frp_cert-san-example.png) - - -For RKE2 and k3s edge-native clusters, add the following configuration to the Kubernetes pack under the `cluster.config` parameter section. -
- -```yaml -tls-san: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` - -![TLS-SAN configuration example](/docs_integrations_frp_tls-san-example.png) - -
- - -
- - -
- - - - -Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. - - - - -Add the Spectro Proxy pack to a cluster profile without making any configuration changes. Use the pack as is. - -
- - - -Set the parameter `k8sDashboardIntegration.enabled` to true if you intend to expose the Kubernetes dashboard. -Review the [Enable Kubernetes Dashboard](/clusters/cluster-management/reverse-proxy-dashboard) guide for more information. - - - -
- -
- - - - -
- - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. - - -## Parameters - -The Spectro Proxy supports the following parameters. - -| Parameter | Description | Default | -|-------------------------|--------------------------------------------------------|---------------------------------------------| -| namespace | The Kubernetes namespace to install the Spectro Proxy. | `cluster-{{ .spectro.system.cluster.uid }}` | -| server | The Kubernetes server. | `{{ .spectro.system.reverseproxy.server }}` | -| clusterUid | The Kubernetes cluster identifier. | `{{ .spectro.system.cluster.uid }}` | -| subdomain | The Kubernetes cluster subdomain identifier. | `cluster-{{ .spectro.system.cluster.uid }}` | - - -## Usage - -To use this pack, you have to add it to your cluster profile. You can also add the Spectro Proxy pack when you create the cluster profile. Check out the [Create Cluster Profile](/cluster-profiles/task-define-profile) guide to learn more about cluster profile creation. - -Depending on the type of cluster, the usage guidance varies. Select the tab that corresponds to the kind of cluster you have. Use the following definitions to help you identify the type of cluster. - -
- - -- **Palette Deployed**: A brand new IaaS cluster that is deployed or will be deployed through Palette. An IaaS cluster is a Kubernetes cluster with a control plane that is not managed by a third party or cloud vendor but is completely managed by Palette. Google GKE and Tencent TKE fall into this category. Clusters in this category get an additional entry in the Kubernetes configuration that adds the reverse proxy certificate (CA) to the API server configuration. - -
- -- **Imported Cluster**: An imported cluster or a non-IaaS cluster with a control plane that a third party manages. Azure AKS and AWS EKS fall in this category, as both Palette and the cloud provider partially manage the clusters. Clusters that fall under this category get the default kubeconfig CA replaced with the CA from the proxy server. Additionally, the kubeconfig authentication method is changed to a bearer token. To support the bearer token method, a new service account is created in the cluster with a role binding that allows Kubernetes API requests to pass through the reverse proxy and connect with the cluster API server. - - - - - - - -
- - - - -Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. This will also result in Kubernetes control plane nodes getting repaved. - - - -Add the following extra certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `kubeadmconfig.apiServer` parameter section. -
- -```yaml -certSANs: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` - -The following is an example configuration of the Kubernetes Pack manifest getting updated with the certificate SAN value: - -![frp-cert-san-example](/docs_integrations_frp_cert-san-example.png) - - - -For RKE2 and k3s edge-native clusters, add the following configuration to the Kubernetes pack under the `cluster.config` parameter section. -
- -```yaml -tls-san: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` - -![TLS-SAN configuration example](/docs_integrations_frp_tls-san-example.png) - -
- - -
- - -
- - - - -Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. - - - - -Add the Spectro Proxy pack to a cluster profile without making any configuration changes. Use the pack as is. - -
- - - -Set the parameter `k8sDashboardIntegration.enabled` to true if you intend to expose the Kubernetes dashboard. -Review the [Enable Kubernetes Dashboard](/clusters/cluster-management/reverse-proxy-dashboard) guide for more information. - - - -
- -
- - -
-
- - -# Troubleshooting - -Troubleshooting scenarios related to the Spectro Proxy. -
- -### x509 Unknown Authority Error - -If you encounter an x509 unknown authority error when deploying a cluster with the Spectro Proxy. - -
- -```shell hideClipboard -Unable to connect to connect the server: X509: certiticate signed by unknown authorit signed by -``` - -The workaround for this error is to wait a few moments for all the kubeconfig configurations to get propagated to Palette. The Palette cluster agent sends the original kubeconfig to Palette, followed by the modified kubeconfig containing the reverse proxy settings. If you attempt to open up a web shell session or interact with cluster API during the initialization process, you will receive an x509 error. Once Palette receives the kubeconfig file containing the cluster's reverse proxy configurations from the cluster agent, the x509 errors will disappear. - - -# Terraform - -You can reference the Spectro Proxy pack in Terraform with a data resource. - -```tf -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "spectro-proxy" { - name = "spectro-proxy" - version = "1.2.0" - type = "operator-instance" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# References - -- [Enable Kubernetes Dashboard](/clusters/cluster-management/kubernetes-dashboard) - - -- [Terraform Data Resource](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) diff --git a/content/docs/06-integrations/00-gce.md b/content/docs/06-integrations/00-gce.md deleted file mode 100644 index 2fe3185cbc..0000000000 --- a/content/docs/06-integrations/00-gce.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: 'GCE-Persistent-Disk' -metaTitle: 'GCE-Persistent-Disk' -metaDescription: 'GCE Persistent Disk storage pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-gcp/blobs/sha256:af4cf7923e75f0ca1fe109f423ff0551855019edfc1d8772653cede454ef87ea?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# GCE Persistent Disk - -The GCE Persistent disk are reliable, high-performance block storage for virtual machine instances. They are designed for high durability. It provides redundant data storage to ensure data integrity. The key features of GCE Persistent Disk are: - -* Disk Clones -* High Durability -* Resizable Volumes -* Independent Volumes -* Snapshots -* Machine Images - - -## Versions Supported - - - - - -**1.7.1** - - - - - -**1.0** - - - - - - -# References - -[Google Cloud Persistent Disk](https://cloud.google.com/persistent-disk#section-7) diff --git a/content/docs/06-integrations/00-generic-vm-libvirt.md b/content/docs/06-integrations/00-generic-vm-libvirt.md deleted file mode 100644 index 093c53b873..0000000000 --- a/content/docs/06-integrations/00-generic-vm-libvirt.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: 'generic-vm-libvirt' -metaTitle: 'Generic Virtual Machines Libvirt' -metaDescription: 'Choosing Libvirt Generic Virtual Machine within the Palette console' -hiddenFromNav: true -type: "integration" -category: ['system app', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/generic-vm-libvirt/blobs/sha256:23e1ba27947158ccf1ae36913601011508a55103ce1bdb517a175d752fb35eea?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -Generic-VM-Libvirt is a Palette Add-on pack used to simplify deploying the virtual machine applications from a cluster profile or a system profile. Generic-VM-Libvirt extracts all Terraform constructs inside the pack and exposes nothing but the values. Users will then have the ability to modify the add-on pack for the different applications. - -## Version Supported - - - - -* **1.0.2** -* **1.0.0** - - - - -
- -## Configuring Palette Generic VM Libvirt Add-on - -To configure the Generic-VM-Libvirt add-on pack for the application cluster, begin by editing the manifest namespace value. - -`cluster-{{ .spectro.system.cluster.uid }}` - -**Example** - -```yaml -namespace: jet-system -``` - -If multiple instances of this pack have to be deployed on the cluster for different virtual machine applications, then modify '`spectrocloud.com/display-name`' and '`releaseNameOverride`' with distinctive names to make it unique across all the packs in the cluster. - -
- - -```yaml -spectrocloud.com/display-name: vm-app-1 -releaseNameOverride: -``` -
- -## Generic-VM-Libvirt Pack Manifest - -
- -```yaml -pack: - # for app cluster, namespace value should be "cluster-{{ .spectro.system.cluster.uid }}" - namespace: jet-system - - # if multiple instance of this pack has to be deployed on the cluster for different vm applications - # then modify 'spectrocloud.com/display-name' and 'releaseNameOverride' with unique names to make it - # unique across all the packs in the cluster - # spectrocloud.com/display-name: vm-app-1 - # releaseNameOverride: - # generic-vm-libvirt: vm-app-1 - -charts: - generic-vm-libvirt: - providers: - source: "dmacvicar/libvirt" - version: "0.6.14" - name: vm-app-1 - image: https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img - - # uncomment the below line and comment the above line if the image is present within the host. - # image="/opt/spectrocloud/ubuntu-16.04-server-cloudimg-amd64-disk1.img" - hardware: - cpu: 2 - memory: 6 #in GB - network: ["br-int"] - rootDisk: - size: 50 #in GB - pool: ehl_images - dataDisks: - - size: 20 #in GB - pool: ehl_data - persisted: true - - size: 25 #in GB - pool: ehl_data - persisted: true - cloudInit: - userData: | - #cloud-config - # vim: syntax=yaml - # Note: Content strings here are truncated for example purposes. - ssh_pwauth: true - chpasswd: - list: - - ubuntu:welcome - expire: false - metaData: | - networkConfig: | - version: 2 - ethernets: - ens3: - dhcp4: true - - # preExecCmd & postExecCmd gets executed in each reconcile loop which runs at an interval of ~2 mins - # If you want to run some command or script only whenever VM is getting creating or after VM is destroyed - # then you can use 'preVMInitCmd' and 'postVMInitCmd' respectively - # preExecCmd: "bash /var/files/pre-exec.sh" - # postExecCmd: "bash /var/files/pre-exec.sh" - - # preVMInitCmd & postVMInitCmd gets executed only when VM is being created/recreated and after VM is created/recreated respectively - preVMInitCmd: "" - postVMInitCmd: "" - - # For first time deployment, preVMDestroyCmd won't be invoked. If there is any change in cloud-init then vm resource will get recreated, - # and 'preVMDestroyCmd' will be invoked before deleting VM and once preVMDestroyCmd gets executed successfully, then only VM resource will be deleted. - # Once VM is deleted then before another VM is created, preVMInitCmd will be invoked - - # preVMDestroyCmd can also be uded to ssh into vm or call the rest api for the application running inside vm before vm is terminated - # or to download the file and use it later once new vm is provisioned - preVMDestroyCmd: "" - - # extraDomainHclConfig: | - # cpu { - # mode = "host-passthrough" - # } - - # mounts section can be used to mount data inside existing config maps or secrets into the pod as files where pre and post - # hooks are executed - # so that data present in config map or secret can be accessed while executing pre and post exec hooks - mounts: - configMap: - # - name: system-config - # path: /data/system-config - # - name: system-config-2 - # path: /data/system-config-2 - secret: - # - name: system-config - # path: /data/system-config - # - name: system-config-2 - # path: /data/system-config-2 - - # envs section can be used to inject data inside existing config maps or secrets into the pod as env variables - # where pre and post hooks are executed - # so that data present in config map or secret can be accessed while executing pre and post exec hooks - envs: - configMap: - # - name: database-app-config - # env: DATABASE_USER - # dataKey: "db.user" - secret: - # - name: database-app-secret - # env: DATABASE_PASSWORD - # dataKey: "db.password" - - # files present in below section will be added to the pod and will be accessible while executing - # pre and post exec hooks and absolute file path would be '/var/files/' - files: - # - name: pre-exec.sh - # content: | - # #!/bin/bash - # echo "I am pre exec" - # - name: post-exec.sh - # content: | - # #!/bin/bash - # echo "I am post exec" -``` - -
-
- - -# Virtual Machine Hooks - -The Generic-VM-Libvirt pack supports various hooks, while deploying VM applications and supports multiple use-cases of customizing workflow, as customers require. - - -
- -## Using preExecCmd and postExecCmd - -The **preExecCmd** and **postExecCmd** commands will be executed in every pod reconciliation. The loop runs at approximately a 2-minute interval. - -If you want to run the command or script only, whenever the virtual machine is getting created or after the virtual machine is destroyed, use **preVMInitCmd** and **postVMInitCmd**, respectively. - -
- -```yaml -preExecCmd: "bash /var/files/pre-exec.sh" -``` - -```yaml -postExecCmd: "bash /var/files/pre-exec.sh" -``` - -
- -## Using preVMInitCmd and postVMInitCmd - -The **preVMInitCmd** command is executed, only when the virtual machine is being created or recreated. Likewise, the **postVMInitCmd** command is executed only after the virtual machine is created or recreated. - -**Note**: These commands will not be executed in each reconciliation. - -
- -```yaml -preVMInitCmd: "echo 'Hey! Hang on tight. I am gonna create a VM.'" -``` - -```yaml -postVMInitCmd: "echo 'Ooho! VM is created.'" -``` - -
- -## Using preVMDestroyCmd - -Any command or script provided in this virtual machine hook will execute before the VM gets destroyed. It will be executed only when the VM is being deleted. A virtual machine deletion can happen for any reason, like changing anything in cloud-init or removing the pack from the profile. - -
- -```yaml -preVMDestroyCmd: "" -``` - -
- - -During a first-time deployment, preVMDestroyCmd will not be invoked. However, if there is any change in cloud-init, then the VM resource will be recreated, preVMDestroyCmd will be invoked before deleting the VM, and once preVMDestroyCmd is executed successfully, only then will the VM resource be deleted. - -
-
-Once the virtual machine is deleted and before another virtual machine is created, preVMInitCmd will be invoked. -
- -
- -## Files - -Files presented in this section will be added to the pod, where the pre-and-post exec hooks are executed. - -
- -```yaml -files: -# - name: pre-exec.sh -# content: | -# #!/bin/bash -# echo "I am pre exec" -# - name: post-exec.sh -# content: | -# #!/bin/bash -# echo "I am post exec" -# extraDomainHclConfig: | -# cpu { -# mode = "host-passthrough" -# } -``` - -
- - -## Mounts - -Mount the data inside the existing configuration maps or secrets into the pod as files, where pre-and-post hooks are executed. This allows the data present in the configuration map or the secrets file to be accessible while running pre-and-post exec hooks. - -
- -```yaml -mounts: - configMap: - # - name: system-config - # path: /data/system-config - # - name: system-config-2 - # path: /data/system-config-2 - secret: - # - name: system-config - # path: /data/system-config - # - name: system-config-2 - # path: /data/system-config-2 -``` - -
- -## Environment Variables - -The ENVS section can inject data inside the existing config maps or secrets into the pod as environment variables, where pre-and post-hooks are executed so that data present in the config map or the secret file can be accessed while running pre-and-post exec hooks. - -
- -```yaml -envs: - configMap: - # - name: database-app-config - # env: DATABASE_USER - # dataKey: "db.user" - secret: - # - name: database-app-secret - # env: DATABASE_PASSWORD - # dataKey: "db.password" -``` - - -## References - -[Libvirt Apps](https://libvirt.org/apps.html) - -
-
diff --git a/content/docs/06-integrations/00-generic-vm-vsphere.md b/content/docs/06-integrations/00-generic-vm-vsphere.md deleted file mode 100644 index 8a748b32d3..0000000000 --- a/content/docs/06-integrations/00-generic-vm-vsphere.md +++ /dev/null @@ -1,343 +0,0 @@ ---- -title: 'generic-vm-vsphere' -metaTitle: 'Generic Virtual Machine vSphere' -metaDescription: 'Choosing vSphere Generic Virtual Machine within the Palette console' -hiddenFromNav: true -type: "integration" -category: ['system app', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/generic-vm-vsphere/blobs/sha256:3b121dca3cbc7fed0153d3e1c8c3df20076ec200e091085a3a281ba08cb2261e?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -Generic-VM-vSphere is a Palette Add-on pack used to simplify deploying the virtual machine resource from a cluster profile or a system profile. Generic-VM-vSphere extracts all Terraform constructs inside the pack and exposes nothing but the values. Users will then have the ability to modify the add-on pack for the different applications. - -
- -## Version Supported - - - - -* **1.0.4** -* **1.0.0** - - - - -
- -## Configuring Palette Generic-VM-vSphere Add-on - -To configure the Generic-VM-vSphere Add-on pack for the application cluster, the namespace value should be as follows: - -`cluster-{{ .spectro.system.cluster.uid }}` -
- -```yaml -namespace: cluster-{{ .spectro.system.cluster.uid }} -``` - -If multiple instances of this pack has to be deployed on the cluster for different virtual machine applications, then modify '`spectrocloud.com/display-name`' and '`releaseNameOverride`' with different names to make it unique across all the packs in the cluster. - -
- -```yaml -spectrocloud.com/display-name: vm-app-1 -releaseNameOverride: -``` -
-
- - -## Generic-VM-vSphere Pack Manifest - -
- -```yaml -pack: - # for app cluster, namespace value should be "cluster-{{ .spectro.system.cluster.uid }}" - namespace: jet-system - - # if multiple instance of this pack has to be deployed on the cluster for different vm applications - # then modify 'spectrocloud.com/display-name' and 'releaseNameOverride' with unique names to make it - # unique across all the packs in the cluster - # spectrocloud.com/display-name: vm-app-1 - # releaseNameOverride: - # generic-vm-vsphere: vm-app-1 - -charts: - generic-vm-vsphere: - providers: - source: "hashicorp/vsphere" - version: "2.2.0" - name: vm-app-1 - hardware: - cpu: 2 - memory: 6 #in GB - dataDisks: - - size: 20 #in GB - - size: 25 #in GB - - - # To use an image from a remote url please uncomment the below lines and comment out the vmTemplate section. - # ovaTemplate: - # remote_ovf_url: "https://192.168.100.12:8443/artifactory/generic-eis-all/ehl-guest/sles-154-cloud-kube-v1.21.10-20220718141926-014.ova" - # name: system-cluster-ova - # network: - # - name: "VM Network" - # value: "VLAN-909" - # disk: - # size: 40 - vmTemplate: "spectro-templates/ubuntu-focal-20.04-cloudimg-20220207" - guestId: "ubuntu64Guest" #ubuntu64Guest for ubuntu, sles15_64Guest for sles etc - scsiType: "lsilogic" - - cloudInit: - # cloud init properties can be injected in vsphere via guest extra config (guestExtraConfig) or via vapp properties (vAppProperties) - # if cloud init type is vAppProperties then add data in vAppProperties: section and leave guestExtraConfig commented - # else if cloud init type is guestExtraConfig then add data in guestExtraConfig: section and leave vAppProperties commented - type: guestExtraConfig # valid values is one of ["vAppProperties" or "guestExtraConfig"] - userData: | - #cloud-config - # vim: syntax=yaml - # Note: Content strings here are truncated for example purposes. - ssh_pwauth: true - chpasswd: - list: - - ubuntu:welcome - expire: false - metaData: | - - networkConfig: | - version: 2 - ethernets: - ens3: - dhcp4: true - - guestExtraConfig: - "guestinfo.network_config": base64encode(data.template_file.network_config.rendered) - "guestinfo.network_config.encoding": "base64" - "guestinfo.userdata": base64encode(data.template_file.user_data.rendered) - "guestinfo.userdata.encoding": "base64" - - vAppProperties: - #instance-id: vm-app-1 - #hostname: vm-app-1 - #public-keys: "ssh-rsa AAAAB3....NGJwwlOmNrw== spectro@spectro" - #password: abcde12345 - #user-data: data.template_file.user_data.rendered - - # 'extraVMHclConfig' can be used to provide extra configuration in the virtual machine and config should be provided in HCL - # format - # extraVMHclConfig: | - # cdrom { - # client_device = true - # } - - # preExecCmd & postExecCmd gets executed in each reconcile loop which runs at an interval of ~2 mins - # If you want to run some command or script only whenever VM is getting creating or after VM is destroyed - # then you can use 'preVMInitCmd' and 'postVMInitCmd' respectively - # preExecCmd: "bash /var/files/pre-exec.sh" - # postExecCmd: "bash /var/files/pre-exec.sh" - - # preVMInitCmd & postVMInitCmd gets executed only when VM is being created/recreated and after VM is created/recreated respectively - preVMInitCmd: "" - postVMInitCmd: "" - - # For first time deployment, preVMDestroyCmd won't be invoked. If there is any change in cloud-init then vm resource will get recreated, - # and 'preVMDestroyCmd' will be invoked before deleting VM and once preVMDestroyCmd gets executed successfully, then only VM resource will be deleted. - # Once VM is deleted then before another VM is created, preVMInitCmd will be invoked - - # preVMDestroyCmd can also be used to ssh into vm or call the rest api for the application running inside vm before vm is terminated - # or to download the file and use it later once new vm is provisioned - preVMDestroyCmd: "" - - # mounts section can be used to mount data inside existing config maps or secrets into the pod as files where pre and post - # hooks are executed - # so that data present in config map or secret can be accessed while executing pre and post exec hooks - mounts: - configMap: - # - name: system-config - # path: /data/system-config - # - name: system-config-2 - # path: /data/system-config-2 - secret: - # - name: system-config - # path: /data/system-config - # - name: system-config-2 - # path: /data/system-config-2 - - # envs section can be used to inject data inside existing config maps or secrets into the pod as env variables - # where pre and post hooks are executed - # so that data present in config map or secret can be accessed while executing pre and post exec hooks - envs: - configMap: - # - name: database-app-config - # env: DATABASE_USER - # dataKey: "db.user" - secret: - # - name: database-app-secret - # env: DATABASE_PASSWORD - # dataKey: "db.password" - - # files present in below section will be added to the pod and will be accessible while executing - # pre and post exec hooks and absolute file path would be '/var/files/' - files: - # - name: pre-exec.sh - # content: | - # #!/bin/bash - # echo "I am pre exec" - # - name: post-exec.sh - # content: | - # #!/bin/bash - # echo "I am post exec" -``` - -
-
- -# Virtual Machine Hooks - -The Generic-VM-vSphere pack supports various hooks while deploying VM applications and supports multiple use-cases of customizing workflow, as customers require. - -
- - -## Using extraVMHclConfig - -The extraVMHclConfig command can be used to provide an extra configuration in the virtual machine and the configuration file should be provided in HashiCorp Configuration Language (HCL) format. - -```terraform -# extraVMHclConfig: | -# cdrom { -# client_device = true -# } -``` - -## Using preExecCmd and postExecCmd - -The **preExecCmd** and **postExecCmd** commands will be executed in every pod reconciliation. The loop runs at approximately a 2-minute interval. - -**preExecCMD** and **postVMInitCmd** are used to execute commands or scripts prior to virtual machine creation and after virtual machine creation respectively. - -
- -```yaml -preExecCmd: "bash /var/files/pre-exec.sh" -``` - -```yaml -postExecCmd: "bash /var/files/pre-exec.sh" -``` - -
- -## Using preVMInitCmd and postVMInitCmd - -The **preVMInitCmd** command is executed, only when the virtual machine is being created or recreated. Likewise, the **postVMInitCmd** command is executed only after the virtual machine is created or recreated. - -**Note**: These commands will not be executed in each reconciliation. - -
- -```yaml -preVMInitCmd: "echo 'Hey! Hang on tight. I am gonna create a VM.'" -``` - -```yaml -postVMInitCmd: "echo 'Ooho! VM is created.'" -``` - -
- -## Using preVMDestroyCmd - -Any command or script provided in this virtual machine hook will execute before the virtual machine is destroyed. It will be executed only when the VM is getting deleted. A virtual machine deletion can happen for any reason, like changing anything in cloud-init or removing the pack from the profile. - -
- -```yaml -preVMDestroyCmd: "" -``` -
- - -During a first-time deployment, preVMDestroyCmd won't be invoked. However, if there is any change in cloud-init, then the VM resource will be recreated, preVMDestroyCmd will be invoked before deleting the VM, and once preVMDestroyCmd is executed successfully, only then the VM resource will be deleted. - -
-
-Once the VM is deleted and before another virtual machine is created, preVMInitCmd will be invoked. -
- -
-
- -## Mounts - -Mount the data inside the existing configuration map or secret into the pod as files, where pre-and-post hooks are executed. This allows the data present in the configuration map or the secrets file to be accessible while running pre-and-post exec hooks. - - -
- -```yaml -mounts: - configMap: - # - name: system-config - # path: /data/system-config - # - name: system-config-2 - # path: /data/system-config-2 - secret: - # - name: system-config - # path: /data/system-config - # - name: system-config-2 - # path: /data/system-config-2 -``` - -
- -## Environment Variables - -The ENVS section can inject data inside the existing config maps or secrets into the pod as environment variables, where pre-and post-hooks are executed so that data present in the config map or the secret file can be accessed while running pre-and-post exec hooks. - -
- -```yaml -envs: - configMap: - # - name: database-app-config - # env: DATABASE_USER - # dataKey: "db.user" - secret: - # - name: database-app-secret - # env: DATABASE_PASSWORD - # dataKey: "db.password" -``` - -
- -## Files - -Files present in this section will be added to the pod and will be accessible while executing pre-and-post execution hooks and absolute file path would be '/var/files/'. - -
- -```yaml -files: -# - name: pre-exec.sh -# content: | -# #!/bin/bash -# echo "I am pre exec" -# - name: post-exec.sh -# content: | -# #!/bin/bash -# echo "I am post exec" -``` - -
diff --git a/content/docs/06-integrations/00-grafana-spectrocloud-dashboards.md b/content/docs/06-integrations/00-grafana-spectrocloud-dashboards.md deleted file mode 100644 index f5fa7c59db..0000000000 --- a/content/docs/06-integrations/00-grafana-spectrocloud-dashboards.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: 'Spectro Cloud Grafana Dashboards' -metaTitle: 'Spectro Cloud Grafana Dashboards' -metaDescription: 'Learn more about the Spectro Cloud Grafana dashboard and how to use it.' -hiddenFromNav: true -type: "integration" -category: ['monitoring', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/spectrocloud-grafana-dashboards/blobs/sha256:a48c9929480a8c463e409e7563279f97d80e674c5cc91cb81c47454aea2c203d?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -The Spectro Cloud Grafana Dashboards is an addon pack that exposes internal cluster resource metrics. You can access the information exposed by the pack in Grafana by visiting the **Spectro Cloud / Spectro Clusters** dashboard. The following information is exposed by the Spectro Cloud Grafana Dashboards.

- -- Status of all cluster profile layers. - - -- CPU and Memory usage of the cluster and all the pods in the cluster. - - -- Cluster health status. - - -- The cluster's node health status and uptime. - -
- - -![A grafana dashboard view of the cluster metric displaying pack status](/clusters_cluster-management_grafana_spectro_metrics.png) - - -# Versions Supported - -**1.0.X** - -# Prerequisites - -* A host cluster that has the [Prometheus Operator pack](/integrations/prometheus-operator) `v45.4.X` or greater installed. Check out [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) for instructions on how to deploy a monitoring stack. - - -* A cluster profile with the [Prometheus Cluster Metrics](/integrations/prometheus-cluster-metrics) pack `v3.4.X` or greater installed. - - -# Usage - -The Spectro Cloud Grafana Dashboards require no additional configuration and the pack is designed to work out-of-the-box. - -You can learn how to add the Spectro Cloud Grafana Dashboards to your cluster by following the steps outlined in the [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent/). - -
- - - -Pods without the defined attributes `request` and `limit` will not display metrics data in the Grafana out-of-the-box Kubernetes Pods dashboard. - - - - - -# Terraform - - -```terraform -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "spectro-cloud-grafana-dashboards" { - name = "spectrocloud-grafana-dashboards" - version = "1.0.0" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - -# References - -- [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent/). - - -- [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) - - -- [Prometheus Operator pack](/integrations/prometheus-operator) - - -- [Prometheus Agent](/integrations/prometheus-agent) \ No newline at end of file diff --git a/content/docs/06-integrations/00-heartbeat.md b/content/docs/06-integrations/00-heartbeat.md deleted file mode 100644 index ac28be8ba2..0000000000 --- a/content/docs/06-integrations/00-heartbeat.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'heart-beat' -metaTitle: 'heart-beat' -metaDescription: 'Heart Beat monitoring pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['monitoring', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/heartbeat/blobs/sha256:19fec69ae172c3e54d5fb09c176517cf7bfeb1bc740bde65c200e14115510313?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Heartbeat - -Heartbeat is a lightweight daemon installed to a remote server for periodically checking the status of the services and determine whether they are currently available and reachable. Heartbeat is useful to verify that your service level agreements are met during the service uptime. - -## Versions Supported - - - - - -**1.0.0** - - - - -## References - -https://www.elastic.co/guide/en/beats/heartbeat/current/index.html diff --git a/content/docs/06-integrations/00-istio.md b/content/docs/06-integrations/00-istio.md deleted file mode 100644 index 3f8d669db6..0000000000 --- a/content/docs/06-integrations/00-istio.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'Istio' -metaTitle: 'Istio Service Mesh in Spectro Cloud' -metaDescription: 'Choosing Istio as a Service Mesh app within the Spectro Cloud console' -hiddenFromNav: true -type: "integration" -category: ['service mesh', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/istio/blobs/sha256:c80cf596d4859261ab892e987f835bd11161bd139dd8e4147b652c6b93924cb2?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Istio Operator - -This Integration aims to automate and simplify the rollout of the various Istio components which helps with service mesh use cases. - -## Versions Supported - - - - - -* **1.14.3** -* **1.14.1** - - - - - -* **1.6.2** - - - - - -## Contents - -The integration deploys the Istio Operator with the 'demo' profile which deploys the following components: - -* Istiod -* Istio Ingress Gateway -* Istio Egress Gateway -* Grafana -* Prometheus -* Istio Tracing -* Kiali - -For more information on the profiles, view the [official document](https://istio.io/latest/docs/setup/additional-setup/config-profiles/). - -## References - -https://istio.io/
-https://github.com/istio/operator diff --git a/content/docs/06-integrations/00-kibana.md b/content/docs/06-integrations/00-kibana.md deleted file mode 100644 index 88a3ed5002..0000000000 --- a/content/docs/06-integrations/00-kibana.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: 'Kibana' -metaTitle: 'Elasticsearch-Fluentd-Kibana' -metaDescription: 'Kibana Monitoring pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['logging', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/elastic-fluentd-kibana/blobs/sha256:3b6d6486eb216d46164fc8b7cb784b0be6b851a85726f18bdf4450d5ed1386eb?type=image/png' ---- - -import WarningBox from 'shared/components/WarningBox'; - -# Elasticsearch-Fluentd-Kibana - -The logging integration installs a production-grade ElasticSearch cluster with Kibana and Fluentd by default on the Kubernetes cluster. This integration provides a rich set of logging features like forwarding, aggregating & parsing logs from the Kubernetes cluster. - -## Contents - -The default integration deployed will have the following components: - -* ElasticSearch Master (3 replicas). -* ElasticSearch Data (2 replicas). -* ElasticSearch Client (2 replicas). -* ElasticSearch Curator. -* Fluentd (one per node). -* Kibana. - -# Ingress - -Follow below steps to configure Ingress on Kibana - -1. Change serviceType from "LoadBalancer" to "ClusterIP" (line #643) -2. Ingress (line #670) - * Enable Ingress; change enabled from false to "true" - * Set Ingress rules like annotations, path, hosts, etc. - -With these config changes, you can access Kibana service on the Ingress Controller LoadBalancer hostname / IP - -## References - -https://github.com/helm/charts/tree/master/stable/elasticsearch -https://github.com/helm/charts/tree/master/stable/fluentd -https://github.com/helm/charts/tree/master/stable/kibana diff --git a/content/docs/06-integrations/00-kong.md b/content/docs/06-integrations/00-kong.md deleted file mode 100644 index c0af11acb3..0000000000 --- a/content/docs/06-integrations/00-kong.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: 'Kong' -metaTitle: 'Kong' -metaDescription: 'Kong Ingress pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['ingress', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/kong/blobs/sha256:600f20583f85ccad4c515e51542f74aa9acb851d5b03ecb0e7b3435eb51ecf56?type=image/png' ---- - -import WarningBox from 'shared/components/WarningBox'; -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Kong Ingress Controller - -The Kong integration is an Ingress Controller for Kubernetes that configures ingress with a load balancer. You can use the Kong as an application load balancer for your application. - -## Version Supported - - - - - -* **2.13.1** - - - - - -* **1.4.0** - - - - - -## Components - -The integration adds the Kong Ingress Controller, which exposes a Kubernetes service of type LoadBalancer. - -## References - -[Kong Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers)
-https://github.com/Kong/kubernetes-ingress-controller diff --git a/content/docs/06-integrations/00-kubebench.md b/content/docs/06-integrations/00-kubebench.md deleted file mode 100644 index deabdd64a6..0000000000 --- a/content/docs/06-integrations/00-kubebench.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: 'kube-bench' -metaTitle: 'kube-bench' -metaDescription: 'kube-bench security pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['security', 'amd64'] -logoUrl: https://registry-addon.spectrocloud.com/v1/kube-bench/blobs/sha256:28c233e5ad884d5356a183c37f323263eb4acca860c28b326ecd99094b500c31?type=image/png ---- - - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - - -# kube-bench - -Palette executes kube-bench, a CIS Benchmark scanner by Aqua Security, for every Kubernetes pack to ensure the master and worker nodes are configured securely. It is available as an Add-on layer within Palette. - -kube-bench runs against a series of checks specified in a `controls` YAML file. For more information on how to write tests and config files, refer to the [controls](https://github.com/aquasecurity/kube-bench/blob/main/docs/controls.md) section. - - -## Versions Supported - - - - - - -* **0.6.8** - - - - -## References - -https://github.com/aquasecurity/kube-bench/blob/main/docs/running.md#running-kube-bench diff --git a/content/docs/06-integrations/00-kubehunter.md b/content/docs/06-integrations/00-kubehunter.md deleted file mode 100644 index 137bc264d2..0000000000 --- a/content/docs/06-integrations/00-kubehunter.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: 'kube-hunter' -metaTitle: 'kube-hunter' -metaDescription: 'kube-hunter monitoring pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['monitoring', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/kubehunter/blobs/sha256:6b6b9138fa056677646712a888192498247f71aa421edd27b25458a8fbf8af0c?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Kube Hunter - -Kube Hunter is an open-source tool that hunts for security issues in your Kubernetes clusters. -It’s designed to increase awareness and visibility of the security controls in Kubernetes environments. Kube-hunter probes a domain or address range for open Kubernetes-related ports, and tests for any configuration issues that leave your cluster exposed to attackers. It can be run on a machine in the cluster, and select the option to probe all the local network interfaces. - -## Versions Supported - - - - - -**1.0.3** - - - - -## References - -https://kube-hunter.aquasec.com/ diff --git a/content/docs/06-integrations/00-kubernetes-dashboard.md b/content/docs/06-integrations/00-kubernetes-dashboard.md deleted file mode 100644 index 85b4e753d5..0000000000 --- a/content/docs/06-integrations/00-kubernetes-dashboard.md +++ /dev/null @@ -1,716 +0,0 @@ ---- -title: "Kubernetes Dashboard" -metaTitle: "Kubernetes Dashboard" -metaDescription: "Learn how to manage Kubernetes clusters and applications deployed in them by using the Kubernetes Dashboard Monitoring pack." -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['monitoring', 'amd64'] -logoUrl: "https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Kubernetes Dashboard - -The [Kubernetes Dashboard](https://github.com/kubernetes/dashboard) add-on pack is a general-purpose, web-based UI that allows you to manage clusters and the applications deployed in them. - -
- -## Versions Supported - - - - - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](/integrations/frp) reverse proxy. - - -- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](/clusters/cluster-management/cluster-rbac#createrolebindings) guide to learn more. - -## Parameters - -| Name | Supported Values | Default Values | Description | -| --- | --- | --- | --- | -| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | -| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | -| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | -| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | -| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| -| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk. | - - - -Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](/integrations/spectro-k8s-dashboard) guide. - - - -## Usage - -To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. -- **Pack Type**: Monitoring -- **Registry**: Public Repo -- **Pack Name**: Kubernetes Dashboard -- **Pack Version**: 2.0.x or higher - -The Kubernetes Dashboard pack requires the [Spectro Proxy](/integrations/frp) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. - - -### Access Kubernetes Dashboard - -When connected to the cluster remotely, issue the following command to establish a connection to deploy the Kubernetes Dashboard on port 8080. - - -
- -```bash -kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 -``` - -To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. - -From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: - -
- -```bash -kubectl -namespace kubernetes-dashboard describe secret \ -$(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') -``` - -The following example shows the command output with the token value. - -
- -```yaml -Name: kubernetes-dashboard-token-h4lnf -Namespace: kubernetes-dashboard -Labels: -Annotations: kubernetes.io/service-account.name: kubernetes-dashboard - kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 - -Type: kubernetes.io/service-account-token - -Data -==== -ca.crt: 1029 bytes -namespace: 20 bytes -token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw -``` - -
- -### Configure Ingress - -Use the following steps to configure ingress for the Kubernetes Dashboard pack. - -
- -1. Ensure the `service.type` parameter is set to "ClusterIP". - - -2. To enable ingress, set the `ingress.enabled` parameter to "true". - - -3. Set ingress rules, such as annotations, path, hosts, and any other rules. - -This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. - -Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. - -
- -### Configure LoadBalancer - -Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. - -
- - - - - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](/integrations/frp) reverse proxy. - - -- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](/clusters/cluster-management/cluster-rbac#createrolebindings) guide to learn more. - -## Parameters - -| Name | Supported Values | Default Values | Description | -| --- | --- | --- | --- | -| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | -| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | -| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | -| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | -| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| -| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk. | - - -Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](/integrations/spectro-k8s-dashboard) guide. - - - -## Usage - -To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. -- **Pack Type**: Monitoring -- **Registry**: Public Repo -- **Pack Name**: Kubernetes Dashboard -- **Pack Version**: 2.0.x or higher - -The Kubernetes Dashboard pack requires the [Spectro Proxy](/integrations/frp) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. - -
- -### Access Kubernetes Dashboard - -When connected to the cluster remotely, run the following command to establish a connection to deploy the Kubernetes Dashboard on port 8080. - -
- -```bash -kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 -``` - -To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. - -From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: - -
- -```bash -kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') -``` - -The following example shows the command output with the token value. - -
- -```yaml -Name: kubernetes-dashboard-token-h4lnf -Namespace: kubernetes-dashboard -Labels: -Annotations: kubernetes.io/service-account.name: kubernetes-dashboard - kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 - -Type: kubernetes.io/service-account-token - -Data -==== -ca.crt: 1029 bytes -namespace: 20 bytes -token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw -``` - -
- -### Configure Ingress - -Use the following steps to configure ingress for the Kubernetes Dashboard pack. - -
- -1. Ensure the `service.type` parameter is set to "ClusterIP". - - -2. To enable ingress, set the `ingress.enabled` parameter to "true". - - -3. Set ingress rules, such as annotations, path, hosts, and any other rules. - -This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. - -Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. - -
- -### Configure LoadBalancer - -Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. - -
- - - - - - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](/integrations/frp) reverse proxy. - - -- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](/clusters/cluster-management/cluster-rbac#createrolebindings) guide to learn more. - -## Parameters - -| Name | Supported Values | Default Values | Description | -| --- | --- | --- | --- | -| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | -| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | -| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | -| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | -| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| -| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk.| - - -Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](/integrations/spectro-k8s-dashboard) guide. - - - -## Usage - -To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. -- **Pack Type**: Monitoring -- **Registry**: Public Repo -- **Pack Name**: Kubernetes Dashboard -- **Pack Version**: 2.0.x or higher - -The Kubernetes Dashboard pack requires the [Spectro Proxy](/integrations/frp) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. - -
- -### Access Kubernetes Dashboard - -When connected to the cluster remotely, run the following command to establish a connection to to deploy the Kubernetes Dashboard on port 8080. - -
- -```bash -kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 -``` - -To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. - -From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: - -
- -```bash -kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') -``` - -The following example shows the command output with the token value. - -
- -```yaml -Name: kubernetes-dashboard-token-h4lnf -Namespace: kubernetes-dashboard -Labels: -Annotations: kubernetes.io/service-account.name: kubernetes-dashboard - kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 - -Type: kubernetes.io/service-account-token - -Data -==== -ca.crt: 1029 bytes -namespace: 20 bytes -token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw -``` - -
- -### Configure Ingress - -Use the following steps to configure ingress for the Kubernetes Dashboard pack. - -
- -1. Ensure the `service.type` parameter is set to "ClusterIP". - - -2. To enable ingress, set the `ingress.enabled` parameter to "true". - - -3. Set ingress rules, such as annotations, path, hosts, and any other rules. - -This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. - -Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. - -
- -### Configure LoadBalancer - -Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. - - -
- - - - - - - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](/integrations/frp) reverse proxy. - - -- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](/clusters/cluster-management/cluster-rbac#createrolebindings) guide to learn more. - -## Parameters - -| Name | Supported Values | Default Values | Description | -| --- | --- | --- | --- | -| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | -| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | -| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | -| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | -| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| -| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk.| - - - -Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](/integrations/spectro-k8s-dashboard) guide. - - - - -## Usage - -To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. -- **Pack Type**: Monitoring -- **Registry**: Public Repo -- **Pack Name**: Kubernetes Dashboard -- **Pack Version**: 2.0.x or higher - -The Kubernetes Dashboard pack requires the [Spectro Proxy](/integrations/frp) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. - -
- -### Access Kubernetes Dashboard - -When connected to the cluster remotely, run the following command to establish a connection to to deploy the Kubernetes Dashboard on port 80. - -
- -```bash -kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 -``` - -To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. - -From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: - -
- -```bash -kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') -``` - -The following example shows the command output with the token value. - -
- -```yaml -Name: kubernetes-dashboard-token-h4lnf -Namespace: kubernetes-dashboard -Labels: -Annotations: kubernetes.io/service-account.name: kubernetes-dashboard - kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 - -Type: kubernetes.io/service-account-token - -Data -==== -ca.crt: 1029 bytes -namespace: 20 bytes -token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw -``` - -
- -### Configure Ingress - -Use the following steps to configure ingress for the Kubernetes Dashboard pack. - -
- -1. Ensure the `service.type` parameter is set to "ClusterIP". - - -2. To enable ingress, set the `ingress.enabled` parameter to "true". - - -3. Set ingress rules, such as annotations, path, hosts, and any other rules. - -This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. - -Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. - -
- -### Configure LoadBalancer - -Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. - -
- - - - - - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](/integrations/frp) reverse proxy. - - -- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](/clusters/cluster-management/cluster-rbac#createrolebindings) guide to learn more. - -## Parameters - -| Name | Supported Values | Default Values | Description | -| --- | --- | --- | --- | -| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | -| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | -| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | -| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | -| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| -| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk.| - - - -Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](/integrations/spectro-k8s-dashboard) guide. - - - -## Usage - -To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. -- **Pack Type**: Monitoring -- **Registry**: Public Repo -- **Pack Name**: Kubernetes Dashboard -- **Pack Version**: 2.0.x or higher - -The Kubernetes Dashboard pack requires the [Spectro Proxy](/integrations/frp) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. - -
- -### Access Kubernetes Dashboard - -When connected to the cluster remotely, run the following command to establish a connection to to deploy the Kubernetes Dashboard on port 80. - -
- -```bash -kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 -``` - -To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. - -From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: - -
- -```bash -kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') -``` - -The following example shows the command output with the token value. - -
- -```yaml -Name: kubernetes-dashboard-token-h4lnf -Namespace: kubernetes-dashboard -Labels: -Annotations: kubernetes.io/service-account.name: kubernetes-dashboard - kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 - -Type: kubernetes.io/service-account-token - -Data -==== -ca.crt: 1029 bytes -namespace: 20 bytes -token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw -``` - -
- -### Configure Ingress - -Use the following steps to configure ingress for the Kubernetes Dashboard pack. - -
- -1. Ensure the `service.type` parameter is set to "ClusterIP". - - -2. To enable ingress, set the `ingress.enabled` parameter to "true". - - -3. Set ingress rules, such as annotations, path, hosts, and any other rules. - -This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. - -Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. - -
- -### Configure LoadBalancer - -Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. - - -
- - - - - - -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](/integrations/frp) reverse proxy. - - -- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](/clusters/cluster-management/cluster-rbac#createrolebindings) guide to learn more. - -## Parameters - -| Name | Supported Values | Default Values | Description | -| --- | --- | --- | --- | -| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | -| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | -| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | -| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | -| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| -| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk.| - - - -Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](/integrations/spectro-k8s-dashboard) guide. - - - -## Usage - -To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. -- **Pack Type**: Monitoring -- **Registry**: Public Repo -- **Pack Name**: Kubernetes Dashboard -- **Pack Version**: 2.0.x or higher - -The Kubernetes Dashboard pack requires the [Spectro Proxy](/integrations/frp) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. - -
- -### Access Kubernetes Dashboard - -When connected to the cluster remotely, run the following command to establish a connection to to deploy the Kubernetes Dashboard on port 80. - -
- -```bash -kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 -``` - -To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. - -From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: - -
- -```bash -kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') -``` - -The following example shows the command output with the token value. - -
- -```yaml -Name: kubernetes-dashboard-token-h4lnf -Namespace: kubernetes-dashboard -Labels: -Annotations: kubernetes.io/service-account.name: kubernetes-dashboard - kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 - -Type: kubernetes.io/service-account-token - -Data -==== -ca.crt: 1029 bytes -namespace: 20 bytes -token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw -``` - -
- -### Configure Ingress - -Use the following steps to configure ingress for the Kubernetes Dashboard pack. - -
- -1. Ensure the `service.type` parameter is set to "ClusterIP". - - -2. To enable ingress, set the `ingress.enabled` parameter to "true". - - -3. Set ingress rules, such as annotations, path, hosts, and any other rules. - -This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. - -Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. - -
- -### Configure LoadBalancer - -Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. - -
- -
- - -# Troubleshooting - -- If the Kubernetes Dashboard is not accessible, check the dashboard pod for errors and ensure the dashboard service is in the **Running** state. - - -- When the namespace is customized while deploying the Kubernetes Dashboard, replace the namespace values in the commands shown above. - - -# Terraform - -You can reference the Kubernetes Dashboard pack in Terraform with a data resource. - -
- -```tf -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "spectro-proxy" { - name = "k8s-dashboard" - version = "2.7.0" - type = "monitoring" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# References - -- [Kubernetes Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) - - -- [Open Source Kubernetes Dashboard Documentation](https://github.com/kubernetes/dashboard/tree/master/docs) diff --git a/content/docs/06-integrations/00-kubernetes-edge.md b/content/docs/06-integrations/00-kubernetes-edge.md deleted file mode 100644 index 8f86cd9a30..0000000000 --- a/content/docs/06-integrations/00-kubernetes-edge.md +++ /dev/null @@ -1,1630 +0,0 @@ ---- -title: "Palette eXtended Kubernetes - Edge" -metaTitle: "Palette eXtended Kubernetes - Edge" -metaDescription: "Learn about the Palette eXtended Kubernetes - Edge pack and how you can use it your host clusters in an edge environment." -hiddenFromNav: true -type: "integration" -category: ["kubernetes", 'amd64', 'fips'] -logoUrl: "https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Palette eXtended Kubernetes - Edge - -The Palette eXtended Kubernetes - Edge (PXK-E) pack supports Kubernetes clusters set up on Edge hosts installed in isolated locations like grocery stores, restaurants, and similar locations, versus a data center or cloud environment. We offer PXK-E as a core pack in Palette. - -Review our [Maintenance Policy](/integrations/maintenance-policy) to learn about pack update and deprecation schedules. - -
- -## What is PXK-E? - -PXK-E is a customized version of the open-source Cloud Native Computing Foundation (CNCF) distribution of Kubernetes. This Kubernetes distribution is customized and optimized for edge computing environments and can be deployed through Palette. PXK-E is the Kubernetes distribution Palette defaults to when deploying Edge clusters. - -PXK-E differs from the upstream open-source Kubernetes version by optimizing for operations in an edge computing environment. PXK-E also differentiates itself by using the Kairos open-source project as the base operating system (OS). PXK-E’s use of Kairos means the OS is immutable, which significantly improves the security posture and reduces potential attack surfaces. - -Another differentiator of PXK-E is the carefully reviewed and applied hardening of the OS and Kubernetes. The hardening ranges from removing unused OS kernel modules to using an OS configuration that follows industry best practices. Our custom Kubernetes configuration addresses common deployment security pitfalls and implements industry best practices. - -With PXK-E, you can manage automatic OS upgrades while retaining immutability and the flexibility to roll out changes safely. The A/B partition architecture of Kairos allows for new OS and dependency versions to be installed in a separate partition and mounted at runtime. You can fall back to use the previous partition if issues are identified in the new partition. - -PXK-E manages the underlying OS and the Kubernetes layer together, which reduces the challenge of upgrading and maintaining two separate components. - -PXK-E allows you to apply different flavors of container storage interfaces (CSI) and container network interfaces (CNI). Other open-source Kubernetes distributions such as MicroK8s, RKE2, and K3s come with a default CSI and CNI. There is additional complexity and overhead when you want to consume different interface plugins with traditional Kubernetes distributions. Using PXK-E, you select the interface plugin you want to apply without additional overhead and complexity. - -There are no changes to the Kubernetes source code used in PXK-E, and it follows the same versioning schema as the upstream open-source Kubernetes distribution. - -
- - - - -We also offer Palette eXtended Kubernetes (PXK) for cloud and data center deployments. For more information, refer to the [Palette eXtended Kubernetes](/integrations/kubernetes) guide to learn more about PXK. - - - -# Versions Supported - - - - - - -## Prerequisites - -- A minimum of 2 CPU and 4GB Memory. - - -## Parameters - -| Parameter | Description | -|----------------------------------------------------------|----------------------------------------------------| -| `cluster.config.clusterConfiguration.apiServer.extraArgs` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port.| -| `cluster.config.clusterConfiguration.apiServer.extraVolumes` | This parameter describes extra volumes for the Kubernetes API server, such as `audit-log` and `audit-policy`. | -| `cluster.config.clusterConfiguration.controllerManager.extraArgs` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | -| `cluster.config.clusterConfiguration.etcd.local.dataDir` | This parameter specifies the data directory for etcd, the distributed key-value store that Kubernetes uses to persist cluster state. | -| `cluster.config.clusterConfiguration.networking.podSubnet` | The IP subnet range to assign to pods. Default: 192.168.0.0/16 | -| `cluster.config.clusterConfiguration.networking.serviceSubnet` | The IP subnet range to assign to services. Default: 192.169.0.0/16 | -| `cluster.config.clusterConfiguration.scheduler.extraArgs` | This parameter contains extra arguments for the Kubernetes scheduler, such as disabling profiling. | -| `cluster.config.initConfiguration.nodeRegistration.kubeletExtraArgs` | This parameter contains extra arguments for kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | -| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK-E pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](/integrations/kubernetes-edge#configureoidcidentityprovider). | - -You can add cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open-source project. For more information, check out the [Cloud Init Stages](/clusters/edge/edge-configuration/cloud-init) reference. - -You can also use pack settings described in the [Palette eXtended Kubernetes](/integrations/kubernetes) guide. - -
- - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](/integrations/kubernetes-edge#configurecustomoidc). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The PXK-E Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. - -
- -```yaml -pack: - palette: - config: - oidc: - identityProvider: -``` - -
- -#### Example Kubeadm Configuration File - -```yaml -cluster: - config: | - clusterConfiguration: - apiServer: - extraArgs: - advertise-address: "0.0.0.0" - anonymous-auth: "true" - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - authorization-mode: RBAC,Node - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - disable-admission-plugins: AlwaysAdmit - enable-admission-plugins: AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction - profiling: "false" - secure-port: "6443" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extraVolumes: - - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - name: audit-log - pathType: DirectoryOrCreate - - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - name: audit-policy - pathType: File - readOnly: true - timeoutForControlPlane: 10m0s - controllerManager: - extraArgs: - feature-gates: RotateKubeletServerCertificate=true - pod-eviction-timeout: 1m0s - profiling: "false" - terminated-pod-gc-threshold: "25" - use-service-account-credentials: "true" - dns: {} - kubernetesVersion: v1.26.4 - etcd: - local: - dataDir: "/etc/kubernetes/etcd" - extraArgs: - listen-client-urls: "https://0.0.0.0:2379" - networking: - podSubnet: 192.168.0.0/16 - serviceSubnet: 192.169.0.0/16 - scheduler: - extraArgs: - profiling: "false" - initConfiguration: - localAPIEndpoint: {} - nodeRegistration: - kubeletExtraArgs: - event-qps: "0" - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - read-only-port: "0" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - joinConfiguration: - discovery: {} - nodeRegistration: - kubeletExtraArgs: - event-qps: "0" - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - read-only-port: "0" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -stages: - initramfs: - - sysctl: - vm.overcommit_memory: 1 - kernel.panic: 10 - kernel.panic_on_oops: 1 - commands: - - ln -s /etc/kubernetes/admin.conf /run/kubeconfig - files: - - path: /etc/hosts - permission: "0644" - content: | - 127.0.0.1 localhost - - path: "/etc/kubernetes/audit-policy.yaml" - owner_string: "root" - permission: 0600 - content: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - - level: None - users: ["system:kube-proxy"] - verbs: ["watch"] - resources: - - group: "" # core - resources: ["endpoints", "services", "services/status"] - - level: None - users: ["system:unsecured"] - namespaces: ["kube-system"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["configmaps"] - - level: None - users: ["kubelet"] # legacy kubelet identity - verbs: ["get"] - resources: - - group: "" # core - resources: ["nodes", "nodes/status"] - - level: None - userGroups: ["system:nodes"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["nodes", "nodes/status"] - - level: None - users: - - system:kube-controller-manager - - system:kube-scheduler - - system:serviceaccount:kube-system:endpoint-controller - verbs: ["get", "update"] - namespaces: ["kube-system"] - resources: - - group: "" # core - resources: ["endpoints"] - - level: None - users: ["system:apiserver"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["namespaces", "namespaces/status", "namespaces/finalize"] - - level: None - users: ["cluster-autoscaler"] - verbs: ["get", "update"] - namespaces: ["kube-system"] - resources: - - group: "" # core - resources: ["configmaps", "endpoints"] - # Don't log HPA fetching metrics. - - level: None - users: - - system:kube-controller-manager - verbs: ["get", "list"] - resources: - - group: "metrics.k8s.io" - # Don't log these read-only URLs. - - level: None - nonResourceURLs: - - /healthz* - - /version - - /swagger* - # Don't log events requests. - - level: None - resources: - - group: "" # core - resources: ["events"] - # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes - - level: Request - users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] - verbs: ["update","patch"] - resources: - - group: "" # core - resources: ["nodes/status", "pods/status"] - omitStages: - - "RequestReceived" - - level: Request - userGroups: ["system:nodes"] - verbs: ["update","patch"] - resources: - - group: "" # core - resources: ["nodes/status", "pods/status"] - omitStages: - - "RequestReceived" - # deletecollection calls can be large, don't log responses for expected namespace deletions - - level: Request - users: ["system:serviceaccount:kube-system:namespace-controller"] - verbs: ["deletecollection"] - omitStages: - - "RequestReceived" - # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, - # so only log at the Metadata level. - - level: Metadata - resources: - - group: "" # core - resources: ["secrets", "configmaps"] - - group: authentication.k8s.io - resources: ["tokenreviews"] - omitStages: - - "RequestReceived" - # Get repsonses can be large; skip them. - - level: Request - verbs: ["get", "list", "watch"] - resources: - - group: "" # core - - group: "admissionregistration.k8s.io" - - group: "apiextensions.k8s.io" - - group: "apiregistration.k8s.io" - - group: "apps" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "autoscaling" - - group: "batch" - - group: "certificates.k8s.io" - - group: "extensions" - - group: "metrics.k8s.io" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "settings.k8s.io" - - group: "storage.k8s.io" - omitStages: - - "RequestReceived" - # Default level for known APIs - - level: RequestResponse - resources: - - group: "" # core - - group: "admissionregistration.k8s.io" - - group: "apiextensions.k8s.io" - - group: "apiregistration.k8s.io" - - group: "apps" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "autoscaling" - - group: "batch" - - group: "certificates.k8s.io" - - group: "extensions" - - group: "metrics.k8s.io" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "settings.k8s.io" - - group: "storage.k8s.io" - omitStages: - - "RequestReceived" - # Default level for all other requests. - - level: Metadata - omitStages: - - "RequestReceived" -pack: - palette: - config: - oidc: - identityProvider: palette -``` - -
- -### Configure OIDC Identity Provider - -The OIDC IDP feature offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. - -When you add the PXK-E pack to a cluster profile, Palette displays the OIDC IDP options listed below. - -All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -You can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](/integrations/kubernetes-edge#userbacwithoidc). - -
- -- **None**: This setting does not require OIDC configuration for the cluster. It displays in the YAML file as `noauth`. - - -- **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in [Configure Custom OIDC](/integrations/kubernetes-edge#configurecustomoidc). This setting displays in the YAML file as `none`. - - -- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. - - -- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](/user-management/saml-sso) guide. - - - -If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. - - - - -### Configure Custom OIDC - -Follow these steps to configure a third-party OIDC IDP. - -
- -1. Add the following OIDC parameters to the `apiServer.extraArgs` section of your Kubernetes YAML file when creating a cluster profile. - - -```yaml -cluster: - config: - clusterConfiguration: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" -``` - -2. Add the following `kubeadmconfig.clientConfig` section that contains OIDC parameters to your Kubernetes YAML file. - - -```yaml -kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid -``` - -3. Provide third-party OIDC IDP details. - -### Use RBAC with OIDC - -You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. - -In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. - -![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) - - - -
- - - -## Prerequisites - -- A minimum of 2 CPU and 4GB Memory. - - -## Parameters - -| Parameter | Description | -|----------------------------------------------------------|----------------------------------------------------| -| `cluster.config.clusterConfiguration.apiServer.extraArgs` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port.| -| `cluster.config.clusterConfiguration.apiServer.extraVolumes` | This parameter describes extra volumes for the Kubernetes API server, such as `audit-log` and `audit-policy`. | -| `cluster.config.clusterConfiguration.controllerManager.extraArgs` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | -| `cluster.config.clusterConfiguration.etcd.local.dataDir` | This parameter specifies the data directory for etcd, the distributed key-value store that Kubernetes uses to persist cluster state. | -| `cluster.config.clusterConfiguration.networking.podSubnet` | The IP subnet range to assign to pods. Default: 192.168.0.0/16 | -| `cluster.config.clusterConfiguration.networking.serviceSubnet` | The IP subnet range to assign to services. Default: 192.169.0.0/16 | -| `cluster.config.clusterConfiguration.scheduler.extraArgs` | This parameter contains extra arguments for the Kubernetes scheduler, such as disabling profiling. | -| `cluster.config.initConfiguration.nodeRegistration.kubeletExtraArgs` | This parameter contains extra arguments for kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | -| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK-E pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](/integrations/kubernetes-edge#configureoidcidentityprovider). | - -You can add cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open-source project. For more information, check out the [Cloud Init Stages](/clusters/edge/edge-configuration/cloud-init) reference. - -You can also use pack settings described in the [Palette eXtended Kubernetes](/integrations/kubernetes) guide. - -
- - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](/integrations/kubernetes-edge#configurecustomoidc). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The PXK-E Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. - -
- -```yaml -pack: - palette: - config: - oidc: - identityProvider: -``` - -
- -#### Example Kubeadm Configuration File - -```yaml -cluster: - config: | - clusterConfiguration: - apiServer: - extraArgs: - advertise-address: "0.0.0.0" - anonymous-auth: "true" - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - authorization-mode: RBAC,Node - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - disable-admission-plugins: AlwaysAdmit - enable-admission-plugins: AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction - profiling: "false" - secure-port: "6443" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extraVolumes: - - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - name: audit-log - pathType: DirectoryOrCreate - - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - name: audit-policy - pathType: File - readOnly: true - timeoutForControlPlane: 10m0s - controllerManager: - extraArgs: - feature-gates: RotateKubeletServerCertificate=true - pod-eviction-timeout: 1m0s - profiling: "false" - terminated-pod-gc-threshold: "25" - use-service-account-credentials: "true" - dns: {} - kubernetesVersion: v1.26.4 - etcd: - local: - dataDir: "/etc/kubernetes/etcd" - extraArgs: - listen-client-urls: "https://0.0.0.0:2379" - networking: - podSubnet: 192.168.0.0/16 - serviceSubnet: 192.169.0.0/16 - scheduler: - extraArgs: - profiling: "false" - initConfiguration: - localAPIEndpoint: {} - nodeRegistration: - kubeletExtraArgs: - event-qps: "0" - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - read-only-port: "0" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - joinConfiguration: - discovery: {} - nodeRegistration: - kubeletExtraArgs: - event-qps: "0" - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - read-only-port: "0" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -stages: - initramfs: - - sysctl: - vm.overcommit_memory: 1 - kernel.panic: 10 - kernel.panic_on_oops: 1 - commands: - - ln -s /etc/kubernetes/admin.conf /run/kubeconfig - files: - - path: /etc/hosts - permission: "0644" - content: | - 127.0.0.1 localhost - - path: "/etc/kubernetes/audit-policy.yaml" - owner_string: "root" - permission: 0600 - content: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - - level: None - users: ["system:kube-proxy"] - verbs: ["watch"] - resources: - - group: "" # core - resources: ["endpoints", "services", "services/status"] - - level: None - users: ["system:unsecured"] - namespaces: ["kube-system"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["configmaps"] - - level: None - users: ["kubelet"] # legacy kubelet identity - verbs: ["get"] - resources: - - group: "" # core - resources: ["nodes", "nodes/status"] - - level: None - userGroups: ["system:nodes"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["nodes", "nodes/status"] - - level: None - users: - - system:kube-controller-manager - - system:kube-scheduler - - system:serviceaccount:kube-system:endpoint-controller - verbs: ["get", "update"] - namespaces: ["kube-system"] - resources: - - group: "" # core - resources: ["endpoints"] - - level: None - users: ["system:apiserver"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["namespaces", "namespaces/status", "namespaces/finalize"] - - level: None - users: ["cluster-autoscaler"] - verbs: ["get", "update"] - namespaces: ["kube-system"] - resources: - - group: "" # core - resources: ["configmaps", "endpoints"] - # Don't log HPA fetching metrics. - - level: None - users: - - system:kube-controller-manager - verbs: ["get", "list"] - resources: - - group: "metrics.k8s.io" - # Don't log these read-only URLs. - - level: None - nonResourceURLs: - - /healthz* - - /version - - /swagger* - # Don't log events requests. - - level: None - resources: - - group: "" # core - resources: ["events"] - # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes - - level: Request - users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] - verbs: ["update","patch"] - resources: - - group: "" # core - resources: ["nodes/status", "pods/status"] - omitStages: - - "RequestReceived" - - level: Request - userGroups: ["system:nodes"] - verbs: ["update","patch"] - resources: - - group: "" # core - resources: ["nodes/status", "pods/status"] - omitStages: - - "RequestReceived" - # deletecollection calls can be large, don't log responses for expected namespace deletions - - level: Request - users: ["system:serviceaccount:kube-system:namespace-controller"] - verbs: ["deletecollection"] - omitStages: - - "RequestReceived" - # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, - # so only log at the Metadata level. - - level: Metadata - resources: - - group: "" # core - resources: ["secrets", "configmaps"] - - group: authentication.k8s.io - resources: ["tokenreviews"] - omitStages: - - "RequestReceived" - # Get repsonses can be large; skip them. - - level: Request - verbs: ["get", "list", "watch"] - resources: - - group: "" # core - - group: "admissionregistration.k8s.io" - - group: "apiextensions.k8s.io" - - group: "apiregistration.k8s.io" - - group: "apps" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "autoscaling" - - group: "batch" - - group: "certificates.k8s.io" - - group: "extensions" - - group: "metrics.k8s.io" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "settings.k8s.io" - - group: "storage.k8s.io" - omitStages: - - "RequestReceived" - # Default level for known APIs - - level: RequestResponse - resources: - - group: "" # core - - group: "admissionregistration.k8s.io" - - group: "apiextensions.k8s.io" - - group: "apiregistration.k8s.io" - - group: "apps" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "autoscaling" - - group: "batch" - - group: "certificates.k8s.io" - - group: "extensions" - - group: "metrics.k8s.io" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "settings.k8s.io" - - group: "storage.k8s.io" - omitStages: - - "RequestReceived" - # Default level for all other requests. - - level: Metadata - omitStages: - - "RequestReceived" -pack: - palette: - config: - oidc: - identityProvider: palette -``` - -
- -### Configure OIDC Identity Provider - -The OIDC IDP feature offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. - -When you add the PXK-E pack to a cluster profile, Palette displays the OIDC IDP options listed below. - -All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -You can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](/integrations/kubernetes-edge#userbacwithoidc). - -
- -- **None**: This setting does not require OIDC configuration for the cluster. It displays in the YAML file as `noauth`. - - -- **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in [Configure Custom OIDC](/integrations/kubernetes-edge#configurecustomoidc). This setting displays in the YAML file as `none`. - - -- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. - - -- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](/user-management/saml-sso) guide. - - - -If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. - - - - -### Configure Custom OIDC - -Follow these steps to configure a third-party OIDC IDP. - -
- -1. Add the following OIDC parameters to the `apiServer.extraArgs` section of your Kubernetes YAML file when creating a cluster profile. - - -```yaml -cluster: - config: - clusterConfiguration: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" -``` - -2. Add the following `kubeadmconfig.clientConfig` section that contains OIDC parameters to your Kubernetes YAML file. - - -```yaml -kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid -``` - -3. Provide third-party OIDC IDP details. - -### Use RBAC with OIDC - -You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. - -In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. - -![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) - - - -
- - - - - -## Prerequisites - -- A minimum of 2 CPU and 4GB Memory. - - -## Parameters - -| Parameter | Description | -|----------------------------------------------------------|----------------------------------------------------| -| `cluster.config.clusterConfiguration.apiServer.extraArgs` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port.| -| `cluster.config.clusterConfiguration.apiServer.extraVolumes` | This parameter describes extra volumes for the Kubernetes API server, such as `audit-log` and `audit-policy`. | -| `cluster.config.clusterConfiguration.controllerManager.extraArgs` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | -| `cluster.config.clusterConfiguration.etcd.local.dataDir` | This parameter specifies the data directory for etcd, the distributed key-value store that Kubernetes uses to persist cluster state. | -| `cluster.config.clusterConfiguration.networking.podSubnet` | The IP subnet range to assign to pods. Default: 192.168.0.0/16 | -| `cluster.config.clusterConfiguration.networking.serviceSubnet` | The IP subnet range to assign to services. Default: 192.169.0.0/16 | -| `cluster.config.clusterConfiguration.scheduler.extraArgs` | This parameter contains extra arguments for the Kubernetes scheduler, such as disabling profiling. | -| `cluster.config.initConfiguration.nodeRegistration.kubeletExtraArgs` | This parameter contains extra arguments for kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | -| ``pack.palette.config.oidc.identityProvider`` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK-E pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](/integrations/kubernetes-edge#configureoidcidentityprovider). | - -You can add cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open-source project. For more information, check out the [Cloud Init Stages](/clusters/edge/edge-configuration/cloud-init) reference. - -You can also use pack settings described in the [Palette eXtended Kubernetes](/integrations/kubernetes) guide. - -
- -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](/integrations/kubernetes-edge#configurecustomoidc). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The PXK-E Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. - -
- -```yaml -palette: - config: - oidc: - identityProvider: -``` - -
- -#### Example Kubeadm Configuration File - -```yaml -cluster: - config: | - clusterConfiguration: - apiServer: - extraArgs: - advertise-address: "0.0.0.0" - anonymous-auth: "true" - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - authorization-mode: RBAC,Node - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - disable-admission-plugins: AlwaysAdmit - enable-admission-plugins: AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction - profiling: "false" - secure-port: "6443" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extraVolumes: - - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - name: audit-log - pathType: DirectoryOrCreate - - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - name: audit-policy - pathType: File - readOnly: true - timeoutForControlPlane: 10m0s - controllerManager: - extraArgs: - feature-gates: RotateKubeletServerCertificate=true - pod-eviction-timeout: 1m0s - profiling: "false" - terminated-pod-gc-threshold: "25" - use-service-account-credentials: "true" - dns: {} - kubernetesVersion: v1.25.2 - etcd: - local: - dataDir: "/etc/kubernetes/etcd" - extraArgs: - listen-client-urls: "https://0.0.0.0:2379" - networking: - podSubnet: 192.168.0.0/16 - serviceSubnet: 192.169.0.0/16 - scheduler: - extraArgs: - profiling: "false" - initConfiguration: - localAPIEndpoint: {} - nodeRegistration: - kubeletExtraArgs: - event-qps: "0" - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - read-only-port: "0" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - joinConfiguration: - discovery: {} - nodeRegistration: - kubeletExtraArgs: - event-qps: "0" - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - read-only-port: "0" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - -stages: - initramfs: - - sysctl: - vm.overcommit_memory: 1 - kernel.panic: 10 - kernel.panic_on_oops: 1 - commands: - - "ln -s /etc/kubernetes/admin.conf /run/kubeconfig" - files: - - path: /etc/hosts - permission: "0644" - content: | - 127.0.0.1 localhost - - path: "/etc/kubernetes/audit-policy.yaml" - owner_string: "root" - permission: 0600 - content: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - - level: None - users: ["system:kube-proxy"] - verbs: ["watch"] - resources: - - group: "" # core - resources: ["endpoints", "services", "services/status"] - - level: None - users: ["system:unsecured"] - namespaces: ["kube-system"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["configmaps"] - - level: None - users: ["kubelet"] # legacy kubelet identity - verbs: ["get"] - resources: - - group: "" # core - resources: ["nodes", "nodes/status"] - - level: None - userGroups: ["system:nodes"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["nodes", "nodes/status"] - - level: None - users: - - system:kube-controller-manager - - system:kube-scheduler - - system:serviceaccount:kube-system:endpoint-controller - verbs: ["get", "update"] - namespaces: ["kube-system"] - resources: - - group: "" # core - resources: ["endpoints"] - - level: None - users: ["system:apiserver"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["namespaces", "namespaces/status", "namespaces/finalize"] - - level: None - users: ["cluster-autoscaler"] - verbs: ["get", "update"] - namespaces: ["kube-system"] - resources: - - group: "" # core - resources: ["configmaps", "endpoints"] - # Don't log HPA fetching metrics. - - level: None - users: - - system:kube-controller-manager - verbs: ["get", "list"] - resources: - - group: "metrics.k8s.io" - # Don't log these read-only URLs. - - level: None - nonResourceURLs: - - /healthz* - - /version - - /swagger* - # Don't log events requests. - - level: None - resources: - - group: "" # core - resources: ["events"] - # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes - - level: Request - users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] - verbs: ["update","patch"] - resources: - - group: "" # core - resources: ["nodes/status", "pods/status"] - omitStages: - - "RequestReceived" - - level: Request - userGroups: ["system:nodes"] - verbs: ["update","patch"] - resources: - - group: "" # core - resources: ["nodes/status", "pods/status"] - omitStages: - - "RequestReceived" - # deletecollection calls can be large, don't log responses for expected namespace deletions - - level: Request - users: ["system:serviceaccount:kube-system:namespace-controller"] - verbs: ["deletecollection"] - omitStages: - - "RequestReceived" - # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, - # so only log at the Metadata level. - - level: Metadata - resources: - - group: "" # core - resources: ["secrets", "configmaps"] - - group: authentication.k8s.io - resources: ["tokenreviews"] - omitStages: - - "RequestReceived" - # Get repsonses can be large; skip them. - - level: Request - verbs: ["get", "list", "watch"] - resources: - - group: "" # core - - group: "admissionregistration.k8s.io" - - group: "apiextensions.k8s.io" - - group: "apiregistration.k8s.io" - - group: "apps" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "autoscaling" - - group: "batch" - - group: "certificates.k8s.io" - - group: "extensions" - - group: "metrics.k8s.io" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "settings.k8s.io" - - group: "storage.k8s.io" - omitStages: - - "RequestReceived" - # Default level for known APIs - - level: RequestResponse - resources: - - group: "" # core - - group: "admissionregistration.k8s.io" - - group: "apiextensions.k8s.io" - - group: "apiregistration.k8s.io" - - group: "apps" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "autoscaling" - - group: "batch" - - group: "certificates.k8s.io" - - group: "extensions" - - group: "metrics.k8s.io" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "settings.k8s.io" - - group: "storage.k8s.io" - omitStages: - - "RequestReceived" - # Default level for all other requests. - - level: Metadata - omitStages: - - "RequestReceived" - pack: - palette: - config: - oidc: - identityProvider: palette - ``` - -
- -### Configure OIDC Identity Provider - -The OIDC IDP feature offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. - -When you add the PXK-E pack to a cluster profile, Palette displays the OIDC IDP options listed below. - -All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -You can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](/integrations/kubernetes-edge#userbacwithoidc). - -
- -- **None**: This is the default setting and there is nothing to configure. This setting displays in the YAML file as `noauth`. - - -- **Custom**: This setting allows you to specify a third-party OIDC provider by configuring OIDC statements in the Kubeadm configuration file as described in [Configure Custom OIDC](/integrations/kubernetes-edge#configurecustomoidc). This setting displays in the YAML file as `none`. - - -- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. - - -- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](/user-management/saml-sso) guide. - - - -If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. - - - -### Configure Custom OIDC - -Follow these steps to configure a third-party OIDC IDP. - -
- -1. Add the following OIDC parameters to the `apiServer.extraArgs` section of your Kubernetes YAML file when creating a cluster profile. - - -```yaml -cluster: - config: - clusterConfiguration: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" -``` - -2. Add the following `kubeadmconfig.clientConfig` section that contains OIDC parameters to your Kubernetes YAML file. - - -```yaml -kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid -``` - -3. Provide third-party OIDC IDP details. - -### Use RBAC with OIDC - -You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. - -In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. - -![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) - - -
- - - - - -## Prerequisites - -- A minimum of 2 CPU and 4GB Memory. - - -## Parameters - -| Parameter | Description | -|----------------------------------------------------------|----------------------------------------------------| -| `cluster.config.clusterConfiguration.apiServer.extraArgs` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port.| -| `cluster.config.clusterConfiguration.apiServer.extraVolumes` | This parameter describes extra volumes for the Kubernetes API server, such as `audit-log` and `audit-policy`. | -| `cluster.config.clusterConfiguration.controllerManager.extraArgs` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | -| `cluster.config.clusterConfiguration.etcd.local.dataDir` | This parameter specifies the data directory for etcd, the distributed key-value store that Kubernetes uses to persist cluster state. | -| `cluster.config.clusterConfiguration.networking.podSubnet` | The IP subnet range to assign to pods. Default: 192.168.0.0/16 | -| `cluster.config.clusterConfiguration.networking.serviceSubnet` | The IP subnet range to assign to services. Default: 192.169.0.0/16 | -| `cluster.config.clusterConfiguration.scheduler.extraArgs` | This parameter contains extra arguments for the Kubernetes scheduler, such as disabling profiling. | -| `cluster.config.initConfiguration.nodeRegistration.kubeletExtraArgs` | This parameter contains extra arguments for kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | -| ``pack.palette.config.oidc.identityProvider`` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK-E pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](/integrations/kubernetes-edge#configureoidcidentityprovider). | - -You can add cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open-source project. For more information, check out the [Cloud Init Stages](/clusters/edge/edge-configuration/cloud-init) reference. - -You can also use pack settings described in the [Palette eXtended Kubernetes](/integrations/kubernetes) guide. - - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](/integrations/kubernetes-edge#configurecustomoidc). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The PXK-E Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. - -
- -```yaml -palette: - config: - dashboard: - identityProvider: -``` - -
- -#### Example Kubeadm Configuration File - -```yaml -cluster: - config: | - clusterConfiguration: - apiServer: - extraArgs: - advertise-address: "0.0.0.0" - anonymous-auth: "true" - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - authorization-mode: RBAC,Node - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - disable-admission-plugins: AlwaysAdmit - enable-admission-plugins: AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction - profiling: "false" - secure-port: "6443" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extraVolumes: - - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - name: audit-log - pathType: DirectoryOrCreate - - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - name: audit-policy - pathType: File - readOnly: true - timeoutForControlPlane: 10m0s - controllerManager: - extraArgs: - feature-gates: RotateKubeletServerCertificate=true - pod-eviction-timeout: 1m0s - profiling: "false" - terminated-pod-gc-threshold: "25" - use-service-account-credentials: "true" - dns: {} - kubernetesVersion: v1.24.6 - etcd: - local: - dataDir: "/etc/kubernetes/etcd" - extraArgs: - listen-client-urls: "https://0.0.0.0:2379" - networking: - podSubnet: 192.168.0.0/16 - serviceSubnet: 192.169.0.0/16 - scheduler: - extraArgs: - profiling: "false" - initConfiguration: - localAPIEndpoint: {} - nodeRegistration: - kubeletExtraArgs: - event-qps: "0" - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - read-only-port: "0" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - joinConfiguration: - discovery: {} - nodeRegistration: - kubeletExtraArgs: - event-qps: "0" - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - read-only-port: "0" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - -stages: - initramfs: - - sysctl: - vm.overcommit_memory: 1 - kernel.panic: 10 - kernel.panic_on_oops: 1 - commands: - - "ln -s /etc/kubernetes/admin.conf /run/kubeconfig" - files: - - path: /etc/hosts - permission: "0644" - content: | - 127.0.0.1 localhost - - path: "/etc/kubernetes/audit-policy.yaml" - owner_string: "root" - permission: 0600 - content: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - - level: None - users: ["system:kube-proxy"] - verbs: ["watch"] - resources: - - group: "" # core - resources: ["endpoints", "services", "services/status"] - - level: None - users: ["system:unsecured"] - namespaces: ["kube-system"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["configmaps"] - - level: None - users: ["kubelet"] # legacy kubelet identity - verbs: ["get"] - resources: - - group: "" # core - resources: ["nodes", "nodes/status"] - - level: None - userGroups: ["system:nodes"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["nodes", "nodes/status"] - - level: None - users: - - system:kube-controller-manager - - system:kube-scheduler - - system:serviceaccount:kube-system:endpoint-controller - verbs: ["get", "update"] - namespaces: ["kube-system"] - resources: - - group: "" # core - resources: ["endpoints"] - - level: None - users: ["system:apiserver"] - verbs: ["get"] - resources: - - group: "" # core - resources: ["namespaces", "namespaces/status", "namespaces/finalize"] - - level: None - users: ["cluster-autoscaler"] - verbs: ["get", "update"] - namespaces: ["kube-system"] - resources: - - group: "" # core - resources: ["configmaps", "endpoints"] - # Don't log HPA fetching metrics. - - level: None - users: - - system:kube-controller-manager - verbs: ["get", "list"] - resources: - - group: "metrics.k8s.io" - # Don't log these read-only URLs. - - level: None - nonResourceURLs: - - /healthz* - - /version - - /swagger* - # Don't log events requests. - - level: None - resources: - - group: "" # core - resources: ["events"] - # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes - - level: Request - users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] - verbs: ["update","patch"] - resources: - - group: "" # core - resources: ["nodes/status", "pods/status"] - omitStages: - - "RequestReceived" - - level: Request - userGroups: ["system:nodes"] - verbs: ["update","patch"] - resources: - - group: "" # core - resources: ["nodes/status", "pods/status"] - omitStages: - - "RequestReceived" - # deletecollection calls can be large, don't log responses for expected namespace deletions - - level: Request - users: ["system:serviceaccount:kube-system:namespace-controller"] - verbs: ["deletecollection"] - omitStages: - - "RequestReceived" - # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, - # so only log at the Metadata level. - - level: Metadata - resources: - - group: "" # core - resources: ["secrets", "configmaps"] - - group: authentication.k8s.io - resources: ["tokenreviews"] - omitStages: - - "RequestReceived" - # Get repsonses can be large; skip them. - - level: Request - verbs: ["get", "list", "watch"] - resources: - - group: "" # core - - group: "admissionregistration.k8s.io" - - group: "apiextensions.k8s.io" - - group: "apiregistration.k8s.io" - - group: "apps" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "autoscaling" - - group: "batch" - - group: "certificates.k8s.io" - - group: "extensions" - - group: "metrics.k8s.io" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "settings.k8s.io" - - group: "storage.k8s.io" - omitStages: - - "RequestReceived" - # Default level for known APIs - - level: RequestResponse - resources: - - group: "" # core - - group: "admissionregistration.k8s.io" - - group: "apiextensions.k8s.io" - - group: "apiregistration.k8s.io" - - group: "apps" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "autoscaling" - - group: "batch" - - group: "certificates.k8s.io" - - group: "extensions" - - group: "metrics.k8s.io" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "settings.k8s.io" - - group: "storage.k8s.io" - omitStages: - - "RequestReceived" - # Default level for all other requests. - - level: Metadata - omitStages: - - "RequestReceived" - ``` -
- -### Configure OIDC Identity Provider - -The OIDC IDP feature offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. - -When you add the PXK-E pack to a cluster profile, Palette displays the OIDC IDP options listed below. - -All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -You can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](/integrations/kubernetes-edge#userbacwithoidc). - -
- -- **None**: This is the default setting and there is nothing to configure. This setting displays in the YAML file as `noauth`. - - -- **Custom**: This setting allows you to specify a third-party OIDC provider by configuring OIDC statements in the Kubeadm configuration file as described in [Configure Custom OIDC](/integrations/kubernetes-edge#configurecustomoidc). This setting displays in the YAML file as `none`. - - -- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. - - -- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](/user-management/saml-sso) guide. - - - -If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. - - - - -### Configure Custom OIDC - -Follow these steps to configure a third-party OIDC IDP. - -
- -1. Add the following OIDC parameters to the `apiServer.extraArgs` section of your Kubernetes YAML file when creating a cluster profile. - - -```yaml -cluster: - config: - clusterConfiguration: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" -``` - -2. Add the following `kubeadmconfig.clientConfig` section that contains OIDC parameters to your Kubernetes YAML file. - - -```yaml -kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid -``` - -3. Provide third-party OIDC IDP details. - - -### Use RBAC with OIDC - -You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - - -Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. - -In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. - -![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) - - -
- - - - - - - -All versions less than v1.23.x are considered deprecated. Upgrade to a newer version to take advantage of new features. - - - - - -
- - -# Terraform - -You can reference Kubernetes in Terraform with the following code snippet. - -
- -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "edge-k8s" { - name = "edge-k8s" - version = "1.26.4" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# Resources - -- [Kubernetes](https://kubernetes.io/) - - - -- [Kubernetes Documentation](https://kubernetes.io/docs/concepts/overview/) - - - -- [Image Swap with Palette](/clusters/cluster-management/image-swap) - diff --git a/content/docs/06-integrations/00-kubernetes-generic.md b/content/docs/06-integrations/00-kubernetes-generic.md deleted file mode 100644 index 36880a62d3..0000000000 --- a/content/docs/06-integrations/00-kubernetes-generic.md +++ /dev/null @@ -1,1028 +0,0 @@ ---- -title: "Kubernetes" -metaTitle: "Kubernetes" -metaDescription: "Learn about the Kubernetes pack and how you can use it with your host clusters." -hiddenFromNav: true -type: "integration" -category: ["kubernetes", 'amd64'] -logoUrl: "https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Kubernetes - -The Kubernetes pack supports several cloud and data center infrastructure providers. This pack defines the default properties we use to deploy Kubernetes clusters and enables most of the Kubernetes hardening standards that the Center for Internet Security (CIS) recommends. - -We also support managed Kubernetes distributions for Elastic Kubernetes Service (EKS), Azure Kubernetes Service (AKS), Google Kubernetes Engine (GKE), and Tencent Kubernetes Engine (TKE). - -Review the [Maintenance Policy](/integrations/maintenance-policy) to learn about pack update and deprecation schedules. - -
- -# Versions Supported - - - - - - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory. - - -- Users or groups mapped to a Kubernetes RBAC role. - - -- Operating System (OS) dependencies as listed in the table. - -| OS Distribution | OS Version | Supports Kubernetes 1.27.x | -|---------------|------------|----------------------------| -| CentOS | 7.7 | ❌ | -| Ubuntu | 22.04 | ✅ | -| Ubuntu | 20.04 | ❌ | -| Ubuntu | 18.04 | ❌ | - - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `pack.palette.config.oidcidentityProvider`| OIDC identity provider configuration. | -| ``pack.podCIDR`` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| -| ``pack.serviceClusterIpRange`` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| -| ``kubeadmconfig.apiServer.extraArgs`` | A list of additional apiServer flags you can set.| -| ``kubeadmconfig.apiServer.extraVolumes`` | A list of additional volumes to mount on apiServer.| -| ``kubeadmconfig.controllerManager.extraArgs`` | A list of additional ControllerManager flags to set.| -| ``kubeadmconfig.scheduler.extraArgs`` | A list of additional Kube scheduler flags to set.| -| ``kubeadmconfig.kubeletExtraArgs`` | A list of kubelet arguments to set and copy to the nodes.| -| ``kubeadmconfig.files`` | A list of additional files to copy to the nodes.| -| ``kubeadmconfig.preKubeadmCommands`` | A list of additional commands to invoke **before** running kubeadm commands.| -| ``kubeadmconfig.postKubeadmCommands`` | A list of additional commands to invoke **after** running kubeadm commands.| -| ``pack.serviceDomain`` | The DNS name for the service domain in the cluster. Default: ``cluster.local``.| - - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. - - As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](/clusters) guide and [Cluster Deployment Errors](https://docs.spectrocloud.com/troubleshooting/cluster-deployment). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The Kubeadm config is updated with hardening improvements that do the following: - -
- -- Meet CIS standards for operating systems (OS). - - -- Enable a Kubernetes audit policy in the pack. The audit policy is hidden, and you cannot customize the default audit policy. If you want to apply your custom audit policy, refer to the [Enable Audit Logging](/audit-logs/kube-api-audit-logging) guide to learn how to create your custom audit policy by adjusting API server flags. - - -- Replace a deprecated PodSecurityPolicy (PSP) with one that offers three built-in policy profiles for broad security coverage: - -
- - - **Privileged**: An unrestricted policy that provides wide permission levels and allows for known privilege escalations. - -
- - - **Baseline**: A policy that offers minimal restrictions and prevents known privilege escalations. As shown in the example below, you can override the default cluster-wide policy to set baseline enforcement by enabling the `PodSecurity` Admission plugin in the `enable-admission-plugins` section of the YAML file. You can then add a custom Admission configuration and set the `admission-control-config-file` flag to the custom Admission. - -
- - ```yaml - kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" - admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - ``` - - - **Restricted**: A heavily restricted policy that follows Pod hardening best practices. This policy is set to warn and audit and identifies Pods that require privileged access. - -
- - You can enforce these policies at the cluster level or the Namespace level. For workloads that require privileged access, you can relax `PodSecurity` enforcement by adding these labels in the Namespace: - -
- - ```yaml - pod-security.kubernetes.io/enforce: privileged - pod-security.kubernetes.io/enforce-version: v1.26 - ``` - - -
- -#### Kubeadm Configuration File - -The default pack YAML contains minimal configurations offered by the managed provider. - - -### Configure OIDC Identity Provider - -You can configure an OpenID Connect (OIDC) identity provider to authenticate users and groups in your cluster. OIDC is an authentication layer on top of OAuth 2.0, an authorization framework that allows users to authenticate to a cluster without using a password. - -OIDC requires a *RoleBinding* for the users or groups you want to provide cluster access. You must create a RoleBinding to a Kubernetes role that is available in the cluster. The Kubernetes role can be a custom role you created or a [default Kubernetes role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles), such as the `cluster-admin` role. To learn how to create a RoleBinding through Palette, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -
- -#### Configure Custom OIDC - -The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory). - -
- - - - - - -Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. - -
- - -1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. Replace the `identityProvider` value with your OIDC provider name. - -
- - ```yaml - pack: - palette: - config: - oidc: - identityProvider: yourIdentityProviderNameHere - ``` - -2. Add the following `kubeadmconfig` parameters. Replace the values with your OIDC provider values. - -
- - ```yaml - kubeadmconfig: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" - ``` - -3. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. - -
- - ```yaml - kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid - ``` - - -
- - - - -Follow these steps to configure OIDC for managed EKS clusters. - -
- -1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. - -```yaml -oidcIdentityProvider: - identityProviderConfigName: 'Spectro-docs' - issuerUrl: 'issuer-url' - clientId: 'user-client-id-from-Palette' - usernameClaim: "email" - usernamePrefix: "-" - groupsClaim: "groups" - groupsPrefix: "" - requiredClaims: -``` - -2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. - -```yaml -clientConfig: - oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" - oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" - oidc-client-secret: yourSecretKeyHere - oidc-extra-scope: profile,email -``` - -3. Provide third-party OIDC IDP details. - -
-
- - -
- - - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory. - - -- Users or groups mapped to a Kubernetes RBAC role. - - -- Operating System (OS) dependencies as listed in the table. - -| OS Distribution | OS Version | Supports Kubernetes 1.26.x | -|---------------|------------|----------------------------| -| CentOS | 7.7 | ✅ | -| Ubuntu | 22.04 | ✅ | -| Ubuntu | 20.04 | ❌ | -| Ubuntu | 18.04 | ❌ | - - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `pack.palette.config.oidcidentityProvider`| OIDC identity provider configuration. | -| ``pack.podCIDR`` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| -| ``pack.serviceClusterIpRange`` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| -| ``kubeadmconfig.apiServer.extraArgs`` | A list of additional apiServer flags you can set.| -| ``kubeadmconfig.apiServer.extraVolumes`` | A list of additional volumes to mount on apiServer.| -| ``kubeadmconfig.controllerManager.extraArgs`` | A list of additional ControllerManager flags to set.| -| ``kubeadmconfig.scheduler.extraArgs`` | A list of additional Kube scheduler flags to set.| -| ``kubeadmconfig.kubeletExtraArgs`` | A list of kubelet arguments to set and copy to the nodes.| -| ``kubeadmconfig.files`` | A list of additional files to copy to the nodes.| -| ``kubeadmconfig.preKubeadmCommands`` | A list of additional commands to invoke **before** running kubeadm commands.| -| ``kubeadmconfig.postKubeadmCommands`` | A list of additional commands to invoke **after** running kubeadm commands.| -| ``pack.serviceDomain`` | The DNS name for the service domain in the cluster. Default: ``cluster.local``.| - - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. - - As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](/clusters) guide and [Cluster Deployment Errors](https://docs.spectrocloud.com/troubleshooting/cluster-deployment). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The Kubeadm config is updated with hardening improvements that do the following: - -
- -- Meet CIS standards for operating systems (OS). - - -- Enable a Kubernetes audit policy in the pack. The audit policy is hidden, and you cannot customize the default audit policy. If you want to apply your custom audit policy, refer to the [Enable Audit Logging](/audit-logs/kube-api-audit-logging) guide to learn how to create your custom audit policy by adjusting API server flags. - - -- Replace a deprecated PodSecurityPolicy (PSP) with one that offers three built-in policy profiles for broad security coverage: - -
- - - **Privileged**: An unrestricted policy that provides wide permission levels and allows for known privilege escalations. - -
- - - **Baseline**: A policy that offers minimal restrictions and prevents known privilege escalations. As shown in the example below, you can override the default cluster-wide policy to set baseline enforcement by enabling the `PodSecurity` Admission plugin in the `enable-admission-plugins` section of the YAML file. You can then add a custom Admission configuration and set the `admission-control-config-file` flag to the custom Admission. - -
- - ```yaml - kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" - admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - ``` - - - **Restricted**: A heavily restricted policy that follows Pod hardening best practices. This policy is set to warn and audit and identifies Pods that require privileged access. - -
- - You can enforce these policies at the cluster level or the Namespace level. For workloads that require privileged access, you can relax `PodSecurity` enforcement by adding these labels in the Namespace: - -
- - ```yaml - pod-security.kubernetes.io/enforce: privileged - pod-security.kubernetes.io/enforce-version: v1.26 - ``` - - -
- -#### Kubeadm Configuration File - -The default pack YAML contains minimal configurations offered by the managed provider. - - -### Configure OIDC Identity Provider - -You can configure an OpenID Connect (OIDC) identity provider to authenticate users and groups in your cluster. OIDC is an authentication layer on top of OAuth 2.0, an authorization framework that allows users to authenticate to a cluster without using a password. - -OIDC requires a *RoleBinding* for the users or groups you want to provide cluster access. You must create a RoleBinding to a Kubernetes role that is available in the cluster. The Kubernetes role can be a custom role you created or a [default Kubernetes role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles), such as the `cluster-admin` role. To learn how to create a RoleBinding through Palette, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -
- -#### Configure Custom OIDC - -The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory). - -
- - - - - - -Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. - -
- - -1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. Replace the `identityProvider` value with your OIDC provider name. - -
- - ```yaml - pack: - palette: - config: - oidc: - identityProvider: yourIdentityProviderNameHere - ``` - -2. Add the following `kubeadmconfig` parameters. Replace the values with your OIDC provider values. - -
- - ```yaml - kubeadmconfig: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" - ``` - -3. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. - -
- - ```yaml - kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid - ``` - - -
- - - - -Follow these steps to configure OIDC for managed EKS clusters. - -
- -1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. - -```yaml -oidcIdentityProvider: - identityProviderConfigName: 'Spectro-docs' - issuerUrl: 'issuer-url' - clientId: 'user-client-id-from-Palette' - usernameClaim: "email" - usernamePrefix: "-" - groupsClaim: "groups" - groupsPrefix: "" - requiredClaims: -``` - -2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. - -```yaml -clientConfig: - oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" - oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" - oidc-client-secret: yourSecretKeyHere - oidc-extra-scope: profile,email -``` - -3. Provide third-party OIDC IDP details. - -
-
- - -
- - - - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory. - - -- Operating System (OS) dependencies as listed in the table. - -| OS Distribution | OS Version | Supports Kubernetes 1.25.x | -|---------------|------------|----------------------------| -| CentOS | 7.7 | ✅ | -| Ubuntu | 22.04 | ✅ | -| Ubuntu | 20.04 | ❌ | -| Ubuntu | 18.04 | ❌ | - - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `pack.palette.config.oidcidentityProvider`| OIDC identity provider configuration. | -| ``pack.podCIDR`` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| -| ``pack.serviceClusterIpRange`` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| -| ``kubeadmconfig.apiServer.extraArgs`` | A list of additional apiServer flags you can set.| -| ``kubeadmconfig.apiServer.extraVolumes`` | A list of additional volumes to mount on apiServer.| -| ``kubeadmconfig.controllerManager.extraArgs`` | A list of additional ControllerManager flags to set.| -| ``kubeadmconfig.scheduler.extraArgs`` | A list of additional Kube scheduler flags to set.| -| ``kubeadmconfig.kubeletExtraArgs`` | A list of kubelet arguments to set and copy to the nodes.| -| ``kubeadmconfig.files`` | A list of additional files to copy to the nodes.| -| ``kubeadmconfig.preKubeadmCommands`` | A list of additional commands to invoke **before** running kubeadm commands.| -| ``kubeadmconfig.postKubeadmCommands`` | A list of additional commands to invoke **after** running kubeadm commands.| -| ``pack.serviceDomain`` | The DNS name for the service domain in the cluster. Default: ``cluster.local``.| - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. - - As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](/clusters) guide and [Cluster Deployment Errors](https://docs.spectrocloud.com/troubleshooting/cluster-deployment). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - - -The Kubeadm config is updated with hardening improvements that do the following: - -
- -- Meet CIS standards for operating systems (OS). - - -- Enable a Kubernetes audit policy in the pack that you can customize by adjusting API server flags. - - -- Replace a deprecated PodSecurityPolicy (PSP) with one that offers three built-in policy profiles for broad security coverage: - -
- - - **Privileged**: An unrestricted policy that provides wide permission levels and allows for known privilege escalations. - -
- - - **Baseline**: A policy that offers minimal restrictions and prevents known privilege escalations. As shown in the example below, you can override the default cluster-wide policy to set baseline enforcement by enabling the `PodSecurity` Admission plugin in the `enable-admission-plugins` section of the YAML file. You can then add a custom Admission configuration and set the `admission-control-config-file` flag to the custom Admission. - -
- - ```yaml - kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" - admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - ``` - - - **Restricted**: A heavily restricted policy that follows Pod hardening best practices. This policy is set to warn and audit and identifies Pods that require privileged access. - -
- - You can enforce these policies at the cluster level or the Namespace level. For workloads that require privileged access, you can relax `PodSecurity` enforcement by adding these labels in the Namespace: - -
- - ```yaml - pod-security.kubernetes.io/enforce: privileged - pod-security.kubernetes.io/enforce-version: v1.25 - ``` - -
- -
- -#### Kubeadm Configuration File - -The default pack YAML contains minimal configurations offered by the managed provider. - - -### Configure OIDC Identity Provider - -You can configure an OpenID Connect (OIDC) identity provider to authenticate users and groups in your cluster. OIDC is an authentication layer on top of OAuth 2.0, an authorization framework that allows users to authenticate to a cluster without using a password. - -OIDC requires a *RoleBinding* for the users or groups you want to provide cluster access. You must create a RoleBinding to a Kubernetes role that is available in the cluster. The Kubernetes role can be a custom role you created or a [default Kubernetes role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles), such as the `cluster-admin` role. To learn how to create a RoleBinding through Palette, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -
- -#### Configure Custom OIDC - -The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory). - -
- - - - - - -Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. - -
- -1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. Replace the `identityProvider` value with your OIDC provider name. - -
- - ```yaml - pack: - palette: - config: - oidc: - identityProvider: palette - ``` - - -2. Add the following `kubeadmconfig` parameters. Replace the values with your OIDC provider values. - -
- - ```yaml - kubeadmconfig: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" - ``` - - -3. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. - - -```yaml -kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid -``` - - -
- - - - -Follow these steps to configure OIDC for managed EKS clusters. - -
- -1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. - -
- - ```yaml - oidcIdentityProvider: - identityProviderConfigName: 'Spectro-docs' - issuerUrl: 'issuer-url' - clientId: 'user-client-id-from-Palette' - usernameClaim: "email" - usernamePrefix: "-" - groupsClaim: "groups" - groupsPrefix: "" - requiredClaims: - ``` - -2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. - -
- - ```yaml - clientConfig: - oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" - oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" - oidc-client-secret: yourSecretKeyHere - oidc-extra-scope: profile,email - ``` - -3. Provide third-party OIDC IDP details. - -
-
- -
- - - - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory. - - -- Operating System (OS) dependencies as listed in the table. - -| OS Distribution | OS Version | Supports Kubernetes 1.24.x | -|---------------|------------|----------------------------| -| CentOS | 7.7 | ✅ | -| Ubuntu | 22.04 | ❌ | -| Ubuntu | 20.04 | ✅ | -| Ubuntu | 18.04 | ❌ | - - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `pack.palette.config.oidcidentityProvider`| OIDC identity provider configuration. | -| `pack.podCIDR` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| -| `pack.serviceClusterIpRange` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| -| `kubeadmconfig.apiServer.extraArgs` | A list of additional apiServer flags you can set.| -| `kubeadmconfig.apiServer.extraVolumes` | A list of additional volumes to mount on apiServer.| -| `kubeadmconfig.controllerManager.extraArgs` | A list of additional ControllerManager flags to set.| -| `kubeadmconfig.scheduler.extraArgs` | A list of additional Kube scheduler flags to set.| -| `kubeadmconfig.kubeletExtraArgs` | A list of kubelet arguments to set and copy to the nodes.| -| `kubeadmconfig.files` | A list of additional files to copy to the nodes. | -| `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands.| -| `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands.| -| `pack.serviceDomain` | The DNS name for the service domain in the cluster. Default: ``cluster.local``.| - - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. - - As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](/clusters) guide and [Cluster Deployment Errors](https://docs.spectrocloud.com/troubleshooting/cluster-deployment). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The Kubeadm config is updated with hardening improvements that do the following: - -
- -- Meet CIS standards for operating systems (OS). - - -- Enable a Kubernetes audit policy in the pack that you can customize by adjusting API server flags. - - -- Replace a deprecated PodSecurityPolicy (PSP) with one that offers three built-in policy profiles for broad security coverage: - -
- - - **Privileged**: An unrestricted policy that provides wide permission levels and allows for known privilege escalations. - -
- - - **Baseline**: A policy that offers minimal restrictions and prevents known privilege escalations. As shown in the example below, you can override the default cluster-wide policy to set baseline enforcement by enabling the `PodSecurity` Admission plugin in the `enable-admission-plugins` section of the YAML file. You can then add a custom Admission configuration and set the `admission-control-config-file` flag to the custom Admission. - -
- - ```yaml - kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" - admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - ``` - - - **Restricted**: A heavily restricted policy that follows Pod hardening best practices. This policy is set to warn and audit and identifies Pods that require privileged access. - -
- - You can enforce these policies at the cluster level or the Namespace level. For workloads that require privileged access, you can relax `PodSecurity` enforcement by adding these labels in the Namespace: - -
- - ```yaml - pod-security.kubernetes.io/enforce: privileged - pod-security.kubernetes.io/enforce-version: v1.24 - ``` - -
- -
- -#### Kubeadm Configuration File - -The default pack YAML contains minimal configurations offered by the managed provider. - - -### Configure OIDC Identity Provider - -You can configure an OpenID Connect (OIDC) identity provider to authenticate users and groups in your cluster. OIDC is an authentication layer on top of OAuth 2.0, an authorization framework that allows users to authenticate to a cluster without using a password. - -OIDC requires a *RoleBinding* for the users or groups you want to provide cluster access. You must create a RoleBinding to a Kubernetes role that is available in the cluster. The Kubernetes role can be a custom role you created or a [default Kubernetes role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles), such as the `cluster-admin` role. To learn how to create a RoleBinding through Palette, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -
- -#### Configure Custom OIDC - -The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory). - -
- - - - - - -Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. - -
- - -1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. Replace the `identityProvider` value with your OIDC provider name. - -
- - ```yaml - pack: - palette: - config: - oidc: - identityProvider: palette - ``` - - -2. Add the following `kubeadmconfig` parameters. Replace the values with your OIDC provider values. - -
- - ```yaml - kubeadmconfig: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" - ``` - -3. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. - -
- - ```yaml - kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid - ``` - - -
- - - - -Follow these steps to configure OIDC for managed EKS clusters. - -
- -1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. - -
- - ```yaml - oidcIdentityProvider: - identityProviderConfigName: 'Spectro-docs' - issuerUrl: 'issuer-url' - clientId: 'user-client-id-from-Palette' - usernameClaim: "email" - usernamePrefix: "-" - groupsClaim: "groups" - groupsPrefix: "" - requiredClaims: - ``` - -2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. - -
- - ```yaml - clientConfig: - oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" - oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" - oidc-client-secret: yourSecretKeyHere - oidc-extra-scope: profile,email - ``` - -3. Provide third-party OIDC IDP details. - -
-
- - -
- - - - - -All versions less than v1.23.x are considered deprecated. Upgrade to a newer version to take advantage of new features. - - - -
- -
-
- - -# Terraform - - -You can reference Kubernetes in Terraform with the following code snippet. - -
- - - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "k8s" { - name = "kubernetes-aks" - version = "1.26" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "k8s" { - name = "kubernetes-eks" - version = "1.24" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "k8s" { - name = "kubernetes-gke" - version = "1.25.8" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "k8s" { - name = "kubernetes-gke" - version = "1.24.4" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - - -# Resources - -- [Kubernetes](https://kubernetes.io/) - - - -- [Kubernetes Documentation](https://kubernetes.io/docs/concepts/overview/) - - - -- [Image Swap with Palette](/clusters/cluster-management/image-swap) - diff --git a/content/docs/06-integrations/00-kubernetes.md b/content/docs/06-integrations/00-kubernetes.md deleted file mode 100644 index 7684409e60..0000000000 --- a/content/docs/06-integrations/00-kubernetes.md +++ /dev/null @@ -1,1299 +0,0 @@ ---- -title: "Palette eXtended Kubernetes" -metaTitle: "Palette eXtended Kubernetes" -metaDescription: "Learn about the Palette eXtended Kubernetes pack and how you can use it with your host clusters." -hiddenFromNav: true -type: "integration" -category: ["kubernetes", 'amd64', 'fips'] -logoUrl: "https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png" ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Palette eXtended Kubernetes - -The Palette eXtended Kubernetes (PXK) pack supports several [cloud and data center infrastructure providers](/clusters). This pack defines the default properties we use to deploy Kubernetes clusters and enables most of the Kubernetes hardening standards that the Center for Internet Security (CIS) recommends. - -We also support managed Kubernetes distributions for Elastic Kubernetes Service (EKS), Azure Kubernetes Service (AKS), Google Kubernetes Engine (GKE), and Tencent Kubernetes Engine (TKE). - -We offer PXK as a core pack in Palette. - -Review our [Maintenance Policy](/integrations/maintenance-policy) to learn about pack update and deprecation schedules. - -## What is PXK? - -Palette eXtended Kubernetes (PXK) is a customized version of the open-source Cloud Native Computing Foundation (CNCF) distribution of Kubernetes. This Kubernetes version can be deployed through Palette to all major infrastructure providers, public cloud providers, and private data center providers. This is the default distribution when deploying a Kubernetes cluster through Palette. You have the option to choose other Kubernetes distributions, such as MicroK8s, Konvoy, and more, should you want to consume a different Kubernetes distribution. - -PXK is different from the upstream open-source Kubernetes version primarily because of the carefully reviewed and applied hardening of the operating system (OS) and Kubernetes. The hardening ranges from removing unused kernel modules to using an OS configuration that follows industry best practices. Our custom Kubernetes configuration addresses common Kubernetes deployment security pitfalls and implements industry best practices. - -A benefit of Palette when used with PXK is the ability to apply different flavors of container storage interface (CSI) plugins and container network interface (CNI) plugins. Other open-source Kubernetes distributions, such as MicroK8s, RKE2, and K3s, come with a default CSI and CNI. Additional complexity and overhead are required from you to enable different interfaces. PXK supports the ability to select other interface plugins out of the box without any additional overhead or complexity needed from your side. - -There are no changes to the Kubernetes source code and we also follow the same versioning schema as the upstream open-source Kubernetes distribution. - -
- - - - -We also offer Palette eXtended Kubernetes Edge (PXK-E) for Edge deployments. Refer to the [PXK-E glossary definition](/glossary-all#paletteextendedkubernetesedge(pxk-e)) to learn more about PXK-E. - - - -# Versions Supported - - - - - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory. - -- Operating System (OS) dependencies as listed in the table. - -| OS Distribution | OS Version | Supports Kubernetes 1.27.x | -|---------------|------------|----------------------------| -| CentOS | 7.7 | ❌ | -| Ubuntu | 22.04 | ✅ | -| Ubuntu | 20.04 | ❌ | -| Ubuntu | 18.04 | ❌ | - - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `pack.palette.config.dashboard.identityProvider`| OIDC identity provider configuration. | -| `pack.podCIDR` | The CIDR range for Pods in the cluster. This should match the networking layer property. Default: `192.168.0.0/16`| -| `pack.serviceClusterIpRange` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| -| ``pack.palette.config.dashboard.identityProvider`` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](/integrations/kubernetes#configureoidcidentityprovider). | -| `kubeadmconfig.apiServer.extraArgs` | A list of additional apiServer flags you can set.| -| `kubeadmconfig.apiServer.extraVolumes` | A list of additional volumes to mount on the apiServer.| -| `kubeadmconfig.controllerManager.extraArgs` | A list of additional ControllerManager flags to set.| -| `kubeadmconfig.scheduler.extraArgs` | A list of additional Kube scheduler flags to set.| -| `kubeadmconfig.kubeletExtraArgs` | A list of kubelet arguments to set and copy to the nodes.| -| `kubeadmconfig.files` | A list of additional files to copy to the nodes.| -| `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands.| -| `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands.| -| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). | -| `pack.serviceDomain` | The DNS name for the service domain in the cluster. Default: `cluster.local`.| - - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. - - As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](/clusters) guide and [Cluster Deployment Errors](https://docs.spectrocloud.com/troubleshooting/cluster-deployment). - - -- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The PXK Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. - -
- -```yaml -palette: - config: - dashboard: - identityProvider: -``` - -
- -#### Example Kubeadm Configuration File - -```yaml -pack: - k8sHardening: True - podCIDR: "192.168.0.0/16" - serviceClusterIpRange: "10.96.0.0/12" - palette: - config: - dashboard: - identityProvider: palette -kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" - admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - authorization-mode: RBAC,Node - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extraVolumes: - - name: audit-log - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - pathType: DirectoryOrCreate - - name: audit-policy - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - readOnly: true - pathType: File - - name: pod-security-standard - hostPath: /etc/kubernetes/pod-security-standard.yaml - mountPath: /etc/kubernetes/pod-security-standard.yaml - readOnly: true - pathType: File - controllerManager: - extraArgs: - profiling: "false" - terminated-pod-gc-threshold: "25" - pod-eviction-timeout: "1m0s" - use-service-account-credentials: "true" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extraArgs: - profiling: "false" - kubeletExtraArgs: - read-only-port: "0" - event-qps: "0" - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - files: - - path: hardening/audit-policy.yaml - targetPath: /etc/kubernetes/audit-policy.yaml - targetOwner: "root:root" - targetPermissions: "0600" - - path: hardening/90-kubelet.conf - targetPath: /etc/sysctl.d/90-kubelet.conf - targetOwner: "root:root" - targetPermissions: "0600" - - targetPath: /etc/kubernetes/pod-security-standard.yaml - targetOwner: "root:root" - targetPermissions: "0600" - content: | - apiVersion: apiserver.config.k8s.io/v1 - kind: AdmissionConfiguration - plugins: - - name: PodSecurity - configuration: - apiVersion: pod-security.admission.config.k8s.io/v1 - kind: PodSecurityConfiguration - defaults: - enforce: "baseline" - enforce-version: "v1.26" - audit: "baseline" - audit-version: "v1.26" - warn: "restricted" - warn-version: "v1.26" - audit: "restricted" - audit-version: "v1.26" - exemptions: - # Array of authenticated usernames to exempt. - usernames: [] - # Array of runtime class names to exempt. - runtimeClasses: [] - # Array of namespaces to exempt. - namespaces: [kube-system] - - preKubeadmCommands: - - 'echo "====> Applying kernel parameters for Kubelet"' - - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' - postKubeadmCommands: - - 'echo "List of post kubeadm commands to be executed"' - - # Client configuration to add OIDC based authentication flags in kubeconfig - #clientConfig: - #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" - #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" - #oidc-client-secret: yourSecretClientSecretGoesHere - #oidc-extra-scope: profile,email -``` - -
- -### Configure OIDC Identity Provider - -Platforms that use PXK can use the OIDC IDP feature, which offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. - -When you add the PXK pack to a cluster profile, Palette displays the OIDC IDP options listed below. - -All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - - -
- -- **None**: This setting does not require OIDC configuration for the cluster. It displays in the YAML file as `noauth`. - -
- - - - We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that rely on OIDC. - - - -- **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). This setting displays in the YAML file as `none`. - - -- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. - - -- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](/user-management/saml-sso) guide. - - - -If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. - - - - -### Configure Custom OIDC - -The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory). - -
- - - - - - -Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. - -
- -1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. - - -```yaml -kubeadmconfig: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" -``` - -2. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. - - -```yaml -kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid -``` - - -
- - - - -Follow these steps to configure OIDC for managed EKS clusters. - -
- -1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. - -```yaml -oidcIdentityProvider: - identityProviderConfigName: 'Spectro-docs' - issuerUrl: 'issuer-url' - clientId: 'user-client-id-from-Palette' - usernameClaim: "email" - usernamePrefix: "-" - groupsClaim: "groups" - groupsPrefix: "" - requiredClaims: -``` - -2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. - -```yaml -clientConfig: - oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" - oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" - oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv - oidc-extra-scope: profile,email -``` - -3. Provide third-party OIDC IDP details. - -
- -
- - -### Use RBAC with OIDC - -You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. - -In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. - -![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) - - -
- - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory. - -- Operating System (OS) dependencies as listed in the table. - -| OS Distribution | OS Version | Supports Kubernetes 1.26.x | -|---------------|------------|----------------------------| -| CentOS | 7.7 | ✅ | -| Ubuntu | 22.04 | ✅ | -| Ubuntu | 20.04 | ❌ | -| Ubuntu | 18.04 | ❌ | - - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| `pack.palette.config.dashboard.identityProvider`| OIDC identity provider configuration. | -| `pack.podCIDR` | The CIDR range for Pods in the cluster. This should match the networking layer property. Default: `192.168.0.0/16`| -| `pack.serviceClusterIpRange` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| -| ``pack.palette.config.dashboard.identityProvider`` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](/integrations/kubernetes#configureoidcidentityprovider). | -| `kubeadmconfig.apiServer.extraArgs` | A list of additional apiServer flags you can set.| -| `kubeadmconfig.apiServer.extraVolumes` | A list of additional volumes to mount on the apiServer.| -| `kubeadmconfig.controllerManager.extraArgs` | A list of additional ControllerManager flags to set.| -| `kubeadmconfig.scheduler.extraArgs` | A list of additional Kube scheduler flags to set.| -| `kubeadmconfig.kubeletExtraArgs` | A list of kubelet arguments to set and copy to the nodes.| -| `kubeadmconfig.files` | A list of additional files to copy to the nodes.| -| `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands.| -| `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands.| -| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). | -| `pack.serviceDomain` | The DNS name for the service domain in the cluster. Default: `cluster.local`.| - - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. - - As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](/clusters) guide and [Cluster Deployment Errors](https://docs.spectrocloud.com/troubleshooting/cluster-deployment). - - -- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The PXK Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. - -
- -```yaml -palette: - config: - dashboard: - identityProvider: -``` - -
- -#### Example Kubeadm Configuration File - -```yaml -pack: - k8sHardening: True - podCIDR: "192.168.0.0/16" - serviceClusterIpRange: "10.96.0.0/12" - palette: - config: - dashboard: - identityProvider: palette -kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" - admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - authorization-mode: RBAC,Node - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extraVolumes: - - name: audit-log - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - pathType: DirectoryOrCreate - - name: audit-policy - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - readOnly: true - pathType: File - - name: pod-security-standard - hostPath: /etc/kubernetes/pod-security-standard.yaml - mountPath: /etc/kubernetes/pod-security-standard.yaml - readOnly: true - pathType: File - controllerManager: - extraArgs: - profiling: "false" - terminated-pod-gc-threshold: "25" - pod-eviction-timeout: "1m0s" - use-service-account-credentials: "true" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extraArgs: - profiling: "false" - kubeletExtraArgs: - read-only-port: "0" - event-qps: "0" - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - files: - - path: hardening/audit-policy.yaml - targetPath: /etc/kubernetes/audit-policy.yaml - targetOwner: "root:root" - targetPermissions: "0600" - - path: hardening/90-kubelet.conf - targetPath: /etc/sysctl.d/90-kubelet.conf - targetOwner: "root:root" - targetPermissions: "0600" - - targetPath: /etc/kubernetes/pod-security-standard.yaml - targetOwner: "root:root" - targetPermissions: "0600" - content: | - apiVersion: apiserver.config.k8s.io/v1 - kind: AdmissionConfiguration - plugins: - - name: PodSecurity - configuration: - apiVersion: pod-security.admission.config.k8s.io/v1 - kind: PodSecurityConfiguration - defaults: - enforce: "baseline" - enforce-version: "v1.26" - audit: "baseline" - audit-version: "v1.26" - warn: "restricted" - warn-version: "v1.26" - audit: "restricted" - audit-version: "v1.26" - exemptions: - # Array of authenticated usernames to exempt. - usernames: [] - # Array of runtime class names to exempt. - runtimeClasses: [] - # Array of namespaces to exempt. - namespaces: [kube-system] - - preKubeadmCommands: - - 'echo "====> Applying kernel parameters for Kubelet"' - - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' - postKubeadmCommands: - - 'echo "List of post kubeadm commands to be executed"' - - # Client configuration to add OIDC based authentication flags in kubeconfig - #clientConfig: - #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" - #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" - #oidc-client-secret: yourSecretClientSecretGoesHere - #oidc-extra-scope: profile,email -``` - -
- -### Configure OIDC Identity Provider - -Platforms that use PXK can use the OIDC IDP feature, which offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. - -When you add the PXK pack to a cluster profile, Palette displays the OIDC IDP options listed below. - -All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - - -
- -- **None**: This setting does not require OIDC configuration for the cluster. It displays in the YAML file as `noauth`. - -
- - - - We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that rely on OIDC. - - - -- **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). This setting displays in the YAML file as `none`. - - -- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. - - -- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](/user-management/saml-sso) guide. - - - -If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. - - - - -### Configure Custom OIDC - -The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory). - -
- - - - - - -Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. - -
- -1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. - - -```yaml -kubeadmconfig: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" -``` - -2. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. - - -```yaml -kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid -``` - - -
- - - - -Follow these steps to configure OIDC for managed EKS clusters. - -
- -1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. - -```yaml -oidcIdentityProvider: - identityProviderConfigName: 'Spectro-docs' - issuerUrl: 'issuer-url' - clientId: 'user-client-id-from-Palette' - usernameClaim: "email" - usernamePrefix: "-" - groupsClaim: "groups" - groupsPrefix: "" - requiredClaims: -``` - -2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. - -```yaml -clientConfig: - oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" - oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" - oidc-client-secret: yourSecretClientSecretGoesHere - oidc-extra-scope: profile,email -``` - -3. Provide third-party OIDC IDP details. - -
- -
- - -### Use RBAC with OIDC - -You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. - -In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. - -![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) - - -
- - - - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory. - -- Operating System (OS) dependencies as listed in the table. - -| OS Distribution | OS Version | Supports Kubernetes 1.25.x | -|---------------|------------|----------------------------| -| CentOS | 7.7 | ✅ | -| Ubuntu | 22.04 | ✅ | -| Ubuntu | 20.04 | ❌ | -| Ubuntu | 18.04 | ❌ | - - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| ``pack.podCIDR`` | The CIDR range for Pods in the cluster. This should match the networking layer property. Default: `192.168.0.0/16`| -| ``pack.serviceClusterIpRange`` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| -| ``pack.palette.config.dashboard.identityProvider`` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](/integrations/kubernetes#configureoidcidentityprovider). | -| ``kubeadmconfig.apiServer.extraArgs`` | A list of additional apiServer flags you can set.| -| ``kubeadmconfig.apiServer.extraVolumes`` | A list of additional volumes to mount on the apiServer.| -| ``kubeadmconfig.controllerManager.extraArgs`` | A list of additional ControllerManager flags to set.| -| ``kubeadmconfig.scheduler.extraArgs`` | A list of additional Kube scheduler flags to set.| -| ``kubeadmconfig.kubeletExtraArgs`` | A list of kubelet arguments to set and copy to the nodes.| -| ``kubeadmconfig.files`` | A list of additional files to copy to the nodes.| -| ``kubeadmconfig.preKubeadmCommands`` | A list of additional commands to invoke **before** running kubeadm commands.| -| ``kubeadmconfig.postKubeadmCommands`` | A list of additional commands to invoke **after** running kubeadm commands.| -| ``kubeadmconfig.clientConfig`` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). | -| ``pack.serviceDomain`` | The DNS name for the service domain in the cluster. Default: ``cluster.local``.| - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. - - As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](/clusters) guide and [Cluster Deployment Errors](https://docs.spectrocloud.com/troubleshooting/cluster-deployment). - - -- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The PXK Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. - -
- -```yaml -palette: - config: - dashboard: - identityProvider: -``` - -
- -#### Example Kubeadm Configuration File - -```yaml -pack: - k8sHardening: True - podCIDR: "192.168.0.0/16" - serviceClusterIpRange: "10.96.0.0/12" - palette: - config: - dashboard: - identityProvider: palette -kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" - admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - authorization-mode: RBAC,Node - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extraVolumes: - - name: audit-log - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - pathType: DirectoryOrCreate - - name: audit-policy - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - readOnly: true - pathType: File - - name: pod-security-standard - hostPath: /etc/kubernetes/pod-security-standard.yaml - mountPath: /etc/kubernetes/pod-security-standard.yaml - readOnly: true - pathType: File - controllerManager: - extraArgs: - profiling: "false" - terminated-pod-gc-threshold: "25" - pod-eviction-timeout: "1m0s" - use-service-account-credentials: "true" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extraArgs: - profiling: "false" - kubeletExtraArgs: - read-only-port: "0" - event-qps: "0" - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - files: - - path: hardening/audit-policy.yaml - targetPath: /etc/kubernetes/audit-policy.yaml - targetOwner: "root:root" - targetPermissions: "0600" - - path: hardening/90-kubelet.conf - targetPath: /etc/sysctl.d/90-kubelet.conf - targetOwner: "root:root" - targetPermissions: "0600" - - targetPath: /etc/kubernetes/pod-security-standard.yaml - targetOwner: "root:root" - targetPermissions: "0600" - content: | - apiVersion: apiserver.config.k8s.io/v1 - kind: AdmissionConfiguration - plugins: - - name: PodSecurity - configuration: - apiVersion: pod-security.admission.config.k8s.io/v1 - kind: PodSecurityConfiguration - defaults: - enforce: "baseline" - enforce-version: "v1.25" - audit: "baseline" - audit-version: "v1.25" - warn: "restricted" - warn-version: "v1.25" - audit: "restricted" - audit-version: "v1.25" - exemptions: - # Array of authenticated usernames to exempt. - usernames: [] - # Array of runtime class names to exempt. - runtimeClasses: [] - # Array of namespaces to exempt. - namespaces: [kube-system] - - preKubeadmCommands: - - 'echo "====> Applying kernel parameters for Kubelet"' - - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' - - # Client configuration to add OIDC based authentication flags in kubeconfig - #clientConfig: - #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" - #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" - #oidc-client-secret: yourSecretClientSecretGoesHere - #oidc-extra-scope: profile,email - ``` - -
- -### Configure OIDC Identity Provider - -Platforms that use PXK can use the OIDC IDP feature, which offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. - -When you add the PXK pack to a cluster profile, Palette displays the OIDC IDP options listed below. - -All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - - -
- -- **None**: This is the default setting and there is nothing to configure. This setting displays in the YAML file as `noauth`. - -
- - - - We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that rely on OIDC. - - - -- **Custom**: This setting allows you to specify a third-party OIDC provider by configuring OIDC statements in the Kubeadm configuration file as described in [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). This setting displays in the YAML file as `none`. - - -- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. - - -- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](/user-management/saml-sso) guide. - - - -If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. - - - -### Configure Custom OIDC - -The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory). - -
- - - - - - -Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. - -
- -1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. - - -```yaml -kubeadmconfig: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" -``` - -2. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. - - -```yaml -kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid -``` - -3. Provide third-party OIDC IDP details. Refer to the [SAML & SSO Setup](/user-management/saml-sso) for guidance on configuring a third party IDP with Palette. - -
- - - - -Follow these steps to configure OIDC for managed EKS clusters. - -
- -1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. - -```yaml -oidcIdentityProvider: - identityProviderConfigName: 'Spectro-docs' - issuerUrl: 'issuer-url' - clientId: 'user-client-id-from-Palette' - usernameClaim: "email" - usernamePrefix: "-" - groupsClaim: "groups" - groupsPrefix: "" - requiredClaims: -``` - -2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. - -```yaml -clientConfig: - oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" - oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" - oidc-client-secret: yourSecretClientSecretGoesHere - oidc-extra-scope: profile,email -``` - -
- -
- -### Use RBAC with OIDC - -You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. - -In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. - -![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) - - -
- - - - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory. - -- Operating System (OS) dependencies as listed in the table. - -| OS Distribution | OS Version | Supports Kubernetes 1.24.x | -|---------------|------------|----------------------------| -| CentOS | 7.7 | ✅ | -| Ubuntu | 22.04 | ❌ | -| Ubuntu | 20.04 | ✅ | -| Ubuntu | 18.04 | ❌ | - - -## Parameters - -| Parameter | Description | -|-----------|-------------| -| ``pack.podCIDR`` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| -| ``pack.serviceClusterIpRange`` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| -| ``kubeadmconfig.apiServer.extraArgs`` | A list of additional apiServer flags you can set.| -| ``kubeadmconfig.apiServer.extraVolumes`` | A list of additional volumes to mount on apiServer.| -| ``kubeadmconfig.controllerManager.extraArgs`` | A list of additional ControllerManager flags to set.| -| ``kubeadmconfig.scheduler.extraArgs`` | A list of additional Kube scheduler flags to set.| -| ``kubeadmconfig.kubeletExtraArgs`` | A list of kubelet arguments to set and copy to the nodes.| -| ``kubeadmconfig.files`` | A list of additional files to copy to the nodes. | -| ``kubeadmconfig.preKubeadmCommands`` | A list of additional commands to invoke **before** running kubeadm commands.| -| ``kubeadmconfig.postKubeadmCommands`` | A list of additional commands to invoke **after** running kubeadm commands.| -| ``pack.serviceDomain`` | The DNS name for the service domain in the cluster. Default: ``cluster.local``.| - - -## Usage - -The Kubeadm configuration file is where you can do the following: - -
- -- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. - - As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](/clusters) guide and [Cluster Deployment Errors](https://docs.spectrocloud.com/troubleshooting/cluster-deployment). - - -- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). - - -- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](/integrations/frp) guide. - - -#### Configuration Changes - -The PXK Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. - -
- -```yaml -palette: - config: - dashboard: - identityProvider: -``` - -
- -#### Example Kubeadm Configuration File - -```yaml -pack: - k8sHardening: True - podCIDR: "192.168.0.0/16" - serviceClusterIpRange: "10.96.0.0/12" - palette: - config: - dashboard: - identityProvider: noauth -kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - default-not-ready-toleration-seconds: "60" - default-unreachable-toleration-seconds: "60" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - authorization-mode: RBAC,Node - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extraVolumes: - - name: audit-log - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - pathType: DirectoryOrCreate - - name: audit-policy - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - readOnly: true - pathType: File - controllerManager: - extraArgs: - profiling: "false" - terminated-pod-gc-threshold: "25" - pod-eviction-timeout: "1m0s" - use-service-account-credentials: "true" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extraArgs: - profiling: "false" - kubeletExtraArgs: - read-only-port: "0" - event-qps: "0" - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - files: - - path: hardening/audit-policy.yaml - targetPath: /etc/kubernetes/audit-policy.yaml - targetOwner: "root:root" - targetPermissions: "0600" - - path: hardening/privileged-psp.yaml - targetPath: /etc/kubernetes/hardening/privileged-psp.yaml - targetOwner: "root:root" - targetPermissions: "0600" - - path: hardening/90-kubelet.conf - targetPath: /etc/sysctl.d/90-kubelet.conf - targetOwner: "root:root" - targetPermissions: "0600" - preKubeadmCommands: - - 'echo "====> Applying kernel parameters for Kubelet"' - - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' - postKubeadmCommands: - - 'export KUBECONFIG=/etc/kubernetes/admin.conf && [ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"' - # Client configuration to add OIDC based authentication flags in kubeconfig - #clientConfig: - #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" - #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" - #oidc-client-secret: yourSecretClientSecretGoesHere - #oidc-extra-scope: profile,email - ``` -
- -### Configure OIDC Identity Provider - -Platforms that use PXK can use the OIDC IDP feature, which offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. - -When you add the PXK pack to a cluster profile, Palette displays the OIDC IDP options listed below. - -All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -
- -- **None**: This is the default setting and there is nothing to configure. This setting displays in the YAML file as `noauth`. - -
- - - - We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that rely on OIDC. - - - -- **Custom**: This setting allows you to specify a third-party OIDC provider by configuring OIDC statements in the Kubeadm configuration file as described in [Configure Custom OIDC](/integrations/kubernetes#configurecustomoidc). This setting displays in the YAML file as `none`. - - -- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. - - -- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](/user-management/saml-sso) guide. - - - -If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. - - - - -### Configure Custom OIDC - -The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory). - -
- - - - - - -Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](/clusters/public-cloud/azure/aks/#configureanazureactivedirectory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. - -
- -1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. - - -```yaml -kubeadmconfig: - apiServer: - extraArgs: - oidc-issuer-url: "provider URL" - oidc-client-id: "client-id" - oidc-groups-claim: "groups" - oidc-username-claim: "email" -``` - -2. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. - - -```yaml -kubeadmconfig: - clientConfig: - oidc-issuer-url: "" - oidc-client-id: "" - oidc-client-secret: "" - oidc-extra-scope: profile,email,openid -``` - -3. Provide third-party OIDC IDP details. - -
- - - - -Follow these steps to configure OIDC for managed EKS clusters. - -
- -1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. - -```yaml -oidcIdentityProvider: - identityProviderConfigName: 'Spectro-docs' - issuerUrl: 'issuer-url' - clientId: 'user-client-id-from-Palette' - usernameClaim: "email" - usernamePrefix: "-" - groupsClaim: "groups" - groupsPrefix: "" - requiredClaims: -``` - -2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. - -```yaml -clientConfig: - oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" - oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" - oidc-client-secret: yourSecretClientSecretGoesHere - oidc-extra-scope: profile,email -``` - -
- -
- -### Use RBAC with OIDC - -You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](/clusters/cluster-management/cluster-rbac/#createrolebindings). - -Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. - -In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. - -![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) - - -
- - - - - - - -All versions less than v1.23.x are considered deprecated. Upgrade to a newer version to take advantage of new features. - - - - - -
- - -# Terraform - -You can reference Kubernetes in Terraform with the following code snippet. - -
- -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "k8s" { - name = "kubernetes" - version = "1.26.4" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# Resources - -- [Kubernetes](https://kubernetes.io/) - - - -- [Kubernetes Documentation](https://kubernetes.io/docs/concepts/overview/) - - - -- [Image Swap with Palette](/clusters/cluster-management/image-swap) - diff --git a/content/docs/06-integrations/00-kubevious.md b/content/docs/06-integrations/00-kubevious.md deleted file mode 100644 index 49139b99bf..0000000000 --- a/content/docs/06-integrations/00-kubevious.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: 'Kubevious' -metaTitle: 'Kubevious' -metaDescription: 'Kubevious Monitoring pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['monitoring', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/kubevious/blobs/sha256:5e33d7b51b1317a834b4552d96fc1cc8463000a7eedbcb4b784ea07236f3d7f7?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Kubevious - -Kubevious integration provides a graphical interface that renders easy to understand, application-centric Kubernetes configurations. - -## Versions Supported - - - - -* **1.0.10** - - - - -* **0.8.15** - - - - - * **0.5.9** - - - - -## Components - -This integration deploys the following components: - -* Deployment -* MySql DB -* UI -* Parser - -# Ingress - -Follow the steps below to configure Ingress on Kubevious, according to the corresponding version - -1. Within the manifest, find the kubevious section **user** > **interface** > **service** > **type** and confirm/change, according to the Kubevious version as listed in the table below. - - | **Versions** | **Parameters** | **Action** | - | ------------ | -------------------------------- | -------------------------------------------------------------------- | - | **1.0.10** | ui: service: type: **ClusterIP** | Confirm that it states **ClusterIP**. | - | **0.8.15** | ui: service: type: **ClusterIP** | Confirm that it states **ClusterIP**. | - | **0.5.9** | ui: svcType: **LoadBalancer** | Change kubevious.ui.svcType from **LoadBalancer** to **Cluster IP**. | - -2. Configure Ingress - * Enable Ingress; change enabled from *false* to **true**. - * Set Ingress rules like annotations, path, hosts, etc. - -With these configuration changes, you can access the Kubevious service on the Ingress Controller LoadBalancer hostname/IP. - -## References - -https://github.com/kubevious/kubevious diff --git a/content/docs/06-integrations/00-kubevirt.md b/content/docs/06-integrations/00-kubevirt.md deleted file mode 100644 index 4725c6759c..0000000000 --- a/content/docs/06-integrations/00-kubevirt.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: 'KubeVirt' -metaTitle: 'KubeVirt' -metaDescription: 'Choosing KubeVirt within the Palette console' -hiddenFromNav: true -type: "integration" -category: ['system app', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/kubevirt/blobs/sha256:185e7a7658c05ab478f2822b080a7e21da9113b4a8bf5fb7fb3338d9a5796eed?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -KubeVirt is a virtual machine management add-on for Kubernetes clusters. Create predefine virtual machines using KubeVirt, and Palette will provision KubeVirt as an Add-on Pack to manage the VM resources within the orchestrator. - -
- -## Version Supported - - - - -**0.51.0** - - - - -**0.55.0** - - - - -
-
- -## Notable Parameters - -```yaml -manifests: - KubeVirt-operator: - # Enable Emulation (when no nested virtualization enabled) - useEmulation: true - KubeVirt-cr: - contents: | - apiVersion: KubeVirt.io/v1 - kind: KubeVirt - metadata: - name: KubeVirt - namespace: KubeVirt - spec: - certificateRotateStrategy: {} - configuration: - developerConfiguration: - featureGates: [] - customizeComponents: {} - imagePullPolicy: IfNotPresent - workloadUpdateStrategy: {} -``` - -## References - -[Installing KubeVirt on Kubernetes](https://KubeVirt.io/user-guide/operations/installation/#installing-KubeVirt-on-kubernetes) - -[Github KubeVirt](https://github.com/KubeVirt/KubeVirt/releases/tag/v0.51.0) diff --git a/content/docs/06-integrations/00-kubewatch.md b/content/docs/06-integrations/00-kubewatch.md deleted file mode 100644 index 26b2c91cdd..0000000000 --- a/content/docs/06-integrations/00-kubewatch.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: 'kube-watch' -metaTitle: 'kube-watch' -metaDescription: 'kube-watch monitoring pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['monitoring', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/kubewatch/blobs/sha256:a277fb90357df9cbffe98eea1ed100fba1b17970b8fc056d210c4f7bfe4f17a3?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Kubewatch - -Kubewatch is a Kubernetes watcher that currently publishes notification to available collaboration hubs/notification channels. It is run in the k8s cluster for monitoring resource changes and event notifications are obtained through webhooks. The supported webhooks are: - - slack - - hipchat - - mattermost - - flock - - webhook - - smtp - -## Usage: - - kubewatch [flags] - kubewatch [command] - - -## Versions Supported - - - - - -**1.0.7** - - - - -## References - -https://github.com/bitnami-labs/kubewatch/blob/master/README.md diff --git a/content/docs/06-integrations/00-longhorn.md b/content/docs/06-integrations/00-longhorn.md deleted file mode 100644 index 90e7a0c4a1..0000000000 --- a/content/docs/06-integrations/00-longhorn.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: 'Longhorn' -metaTitle: 'Longhorn Integration with Palette' -metaDescription: 'Longhorn pack in Palette' -hiddenFromNav: true -type: "integration" -category: ["storage", 'amd64', 'fips'] -logoUrl: 'https://registry.spectrocloud.com/v1/csi-longhorn/blobs/sha256:8257bd6697941139cea8ace907e25b3859cb8de48f965a5b6011d518cad0a2db?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Longhorn Overview - -Longhorn is a lightweight distributed block storage system for cloud native storage Kubernetes that allows you to replicate storage to Kubernetes clusters. Once Longhorn is installed, it adds persistent volume support to the Kubernetes cluster using containers and microservices. - -Longhorn creates a dedicated storage controller for each block device volume and replicates the volume across multiple nodes. - -# Version Supported - - - - - - -## Prerequisites - -- Kubernetes cluster is 1.21 or higher. - -## Parameters - -The table lists commonly used parameters you can configure when adding this pack. - -| Parameter | Description | Default | -|-------------------------|--------------------------------------------------------|---------------------------------------------| -| defaultClass | The volume type to be used. | `true` | -| defaultFsType | The default file system. | `ext4` | -| defaultClassReplicaCount| The default number of copies of data store in your cluster. | `3` | -| defaultDataLocality | The default location where data computation will occur. | `disabled` Best effort | -| reclaimPolicy | This means that a dynamically provisioned volume will be automatically deleted when deletes when corresponding PersistentVolumeClaim is deleted. For important data, it is more appropriate to use the "Retain" policy | `Delete` | -| migratable | The ability to transfer data to another data storage systems | `false` | -| recurringJobSelector:enable | The management of recurring jobs. You can enable this feature and type a comma-separated list of jobs to run: `recurringJobSelector:enable:jobList [ ]` | `false` | - -## Usage - -Longhorn provides these features: - -- Enterprise-grade distributed storage with no single point of failure. -- Incremental snapshots of block storage. -- Backup to secondary storage (NFSv4 or S3-compatible object storage) built on change block detection. -- Recurring snapshot and backup. - -For more information, check out Longhorn guide on [How to Create Volumes](https://longhorn.io/docs/1.4.0/volumes-and-nodes/create-volumes/). - - - - - - -## Prerequisites - -- Kubernetes cluster is at least version 1.22 and not higher than 1.24. - -## Parameters - -The table lists commonly used parameters you can configure when adding this pack. - -| Parameter | Description | Default | -|-------------------------|--------------------------------------------------------|---------------------------------------------| -| defaultClass | The volume type to be used. | `true` | -| defaultFsType | The default file system. | `ext4` | -| defaultClassReplicaCount| The default number of copies of data store in your cluster. | `3` | -| defaultDataLocality | The default location where data computation will occur. | `disabled` Best effort | -| reclaimPolicy | This means that a dynamically provisioned volume will be automatically deleted when deletes when corresponding PersistentVolumeClaim is deleted. For important data, it is more appropriate to use the "Retain" policy | `Delete` | -| migratable | The ability to transfer data to another data storage systems | `false` | -| recurringJobSelector:enable | The management of recurring jobs. You can enable this feature and type a comma-separated list of jobs to run: `recurringJobSelector:enable:jobList [ ]` | `false` | - -## Usage - -Longhorn provides these features: - -- Enterprise-grade distributed storage with no single point of failure. -- Incremental snapshots of block storage. -- Backup to secondary storage (NFSv4 or S3-compatible object storage) built on change block detection. -- Recurring snapshot and backup. - -For more information, check out Longhorn guide on [How to Create Volumes](https://longhorn.io/docs/1.4.0/volumes-and-nodes/create-volumes/). - - - - -# Terraform - -When using this Pack as a base layer, you need the following terraform code. - -``` -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "csi-longhorn" { - name = "longhorn" - version = "1.3.1" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - -# References - -[Longhorn](https://longhorn.io/) diff --git a/content/docs/06-integrations/00-metallb.md b/content/docs/06-integrations/00-metallb.md deleted file mode 100644 index 0e1f77de65..0000000000 --- a/content/docs/06-integrations/00-metallb.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: 'MetalLB' -metaTitle: 'MetalLB' -metaDescription: 'MetalLB Load Balancer pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['load balancers', 'amd64', 'arm64'] -logoUrl: 'https://registry.spectrocloud.com/v1/lb-metallb/blobs/sha256:3d09a1eab856a03d5b821062dcd1da624256e8f1e2ede404d88cb088d3adb945?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# MetalLB - -MetalLB is a load-balancer implementation for bare metal [Kubernetes](https://kubernetes.io/) clusters, using standard routing protocols. This integration is recommended for the on-prem cloud(s) and will help external service(s) get an IP address when the service type is set as LoadBalancer. - -## MetalLB Pack Working Details: - -* The address set in pack values goes into a configMap **`config`** in **`metallb-system`** namespace. This configMap is used by the MetalLB controller and speakers as volume mounts. - -* Any changes to the address will get updated in the configMap. Our users may confirm this with this command: - - kubectl describe cm config -n metallb-system. - -* However, the controller and speaker pods are already running with a previous copy of the configMap and these deployments are not aware of the new changes made to configMap. To ensure the address change are reflected, we need to restart the controller and speaker pods so that they will fetch the new configMap and start assigning new addresses correctly. - -* Run the following commands, which will help restart the controller and speaker: - - kubectl rollout restart deploy controller -n metallb-system - kubectl rollout restart ds speaker -n metallb-system - -## Versions Supported - - - - - -* **0.13.5** - - - - - -* **0.11.0** - - - - - -* **0.9.5** - - - - - * **0.8.3** - - - - -## Components - -* MetalLB controller. -* Speaker (runs on all nodes, deployed as DaemonSet). - -## References - -https://metallb.universe.tf/
-https://github.com/metallb/metallb diff --git a/content/docs/06-integrations/00-microk8s.md b/content/docs/06-integrations/00-microk8s.md deleted file mode 100644 index c87e4fc1df..0000000000 --- a/content/docs/06-integrations/00-microk8s.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'MicroK8s' -metaTitle: 'MicroK8s Integration with Palette' -metaDescription: 'MicroK8s pack in Palette' -hiddenFromNav: true -type: "integration" -category: ["kubernetes", 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/kubernetes-microk8s/blobs/sha256:b971b64f62e2e67b0a166316f96e6f4211aacea6e28459bb89275e8882ade985?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# MicroK8s Overview - -MicroK8s is a Cloud Native Computing Foundation (CNCF) certified upstream Kubernetes deployment that runs entirely on your workstation or edge device. It runs all Kubernetes services natively without virtual machines and packs all the required libraries and binaries. - -## Prerequisites - -- One of the following Ubuntu environments to run commands: - - 22.04 LTS - - 20.04 LTS - - 18.04 LTS - - 16.04 LTS - - Or another operating system that supports snapd. - - -- At least 20 GB of disk space and 4 GB of memory. -- An internet connection. - - - -If your environment doesn't meet these requirements, there are alternative ways to install MicroK8s, including additional OS support and an offline deployment. - - - - -## Versions Supported - - - - - -* **1.25.0** - - - - - -* **1.24.0** - - - - -MicroK8s installs a minimal, lightweight Kubernetes you can run and use on almost any machine. When installing MicroK8s you can specify a channel made up of two components: - -- **Track**: denotes the upstream Kubernetes version. -- **Risk level**: indicates the maturity level of the release, such as stable and edge. - -MicroK8s comes with its own packaged version of the ``kubectl`` command for operating Kubernetes. This avoids interfering with any version that may already be on the host machine. You can run it in a terminal like this: -
- -``` yaml -microk8s kubectl -``` - -If you are using or want to use a different kubectl command, you can configure it for your Linux, Mac, or Windows operating system. - -
- - - -### Caveat for MicroK8s with AWS EBS pack - -When you deploy AWS EBS pack with MicroK8s, you need to change EBS CSI pack node.kubelet values from: - -```yaml - node: - env: [] - kubeletPath: /var/lib/kubelet -``` -to the below yaml content: - -```yaml - node: - env: [] - kubeletPath: /var/snap/microk8s/common/var/lib/kubelet -``` - - -# References - -[MicroK8s](https://microk8s.io/docs) diff --git a/content/docs/06-integrations/00-multus-cni.md b/content/docs/06-integrations/00-multus-cni.md deleted file mode 100644 index e07091b2d4..0000000000 --- a/content/docs/06-integrations/00-multus-cni.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: 'Multus CNI' -metaTitle: 'Multus CNI' -metaDescription: 'Choosing Multus CNI within the Palette console' -hiddenFromNav: true -type: "integration" -category: ['network', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/cni-multus/blobs/sha256:3727499ea41784a17c818b7269c27918b8664766c40d1b1f3cd90c34d5154676?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -The Multus Container Network Interface (CNI) plugin enables multiple, network interfaces to attach to pods within Kubernetes. Palette provisions the CNI-Multus 3.8.0 Add-on pack, so you can create a multi-homed pod for Kubernetes right within the orchestrator. - -
- -## Version Supported - - - - -**cni-multus 3.8.0** - - - - -
-
- -# Notable Parameters - -| **Parameters** | **Values** | **Required/Optional** | **Description** | -| ---------------------- | ------- | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| **name** | string | required | Enter the name of the network. | -| **type** | string | required | "multus" | -| **confDir** | string | optional | This is the directory for the CNI config file that multus reads. default /etc/cni/multus/net.d. | -| **cniDir** | string | optional | Multus CNI data directory. | -| **binDir** | string | optional | Additional directory for CNI plugins which multus calls. | -| **kubeconfig** | string | optional | Kubeconfig file for the out-of-cluster communication with kube-apiserver. | -| **logToStderr** | boolean | optional | Enable or disable logging to STDERR. Defaults to true. | -| **logFile** | string | optional | File path for log file. Multus add log in given file. | -| **logLevel** | string | optional | Logging level | -| **logOptions** | object | optional | Logging option | -| **namespaceIsolation** | boolean | optional | Enables a security feature the where pods are only allowed to access
NetworkAttachmentDefinitions in the namespace where the pod resides. Defaults to *false*. | -| **capabilities** | list | optional | Capabilities supported by at least one of the delegates. | -| **readinessindicatorfile** | string | | The path to a file whose existence denotes that the default network is ready. | - - -## References - -- [Multus-CNI](https://github.com/k8snetworkplumbingwg/multi-net-spec) - -- [Multus-CNI Quickstart Guide](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/quickstart.md) - -- [Mutltus Configuration](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/configuration.md) -
diff --git a/content/docs/06-integrations/00-nfs-subdir-external.md b/content/docs/06-integrations/00-nfs-subdir-external.md deleted file mode 100644 index 60de9c4d4a..0000000000 --- a/content/docs/06-integrations/00-nfs-subdir-external.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: 'nfs-subdir-External' -metaTitle: 'nfs-subdir-external' -metaDescription: 'NFS-Subdir-External Provisioner pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-nfs-subdir-external/blobs/sha256:4b40eb85382d04dc4dcfc174b5e288b963b6201f6915e14b07bd8a5c4323b51b?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# NFS Subdir External Provisioner - -NFS Subdir External Provisioner is an automatic provisioner for Kubernetes that uses the already configured NFS server, automatically creating Persistent storage volumes. It installs the storage classes and NFS client provisioner into the workload cluster - -## Prerequisites - -Kubernetes >=1.9 - - -## Versions Supported - - - - - -**1.0** - - - - - - -## References - -https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner - -https://artifacthub.io/packages/helm/nfs-subdir-external-provisioner/nfs-subdir-external-provisioner diff --git a/content/docs/06-integrations/00-nginx.md b/content/docs/06-integrations/00-nginx.md deleted file mode 100644 index a9cb318a8c..0000000000 --- a/content/docs/06-integrations/00-nginx.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: 'Nginx' -metaTitle: 'Nginx' -metaDescription: 'Nginx Ingress pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['ingress', 'amd64', 'arm64'] -logoUrl: 'https://registry.spectrocloud.com/v1/nginx/blobs/sha256:a36bf7e8023f018298ddbf0c82a49c38e872db4b0e480a39c285ae002916a83f?type=image/png' ---- - -import WarningBox from 'shared/components/WarningBox'; -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Nginx - -Ingress resource(s) in Kubernetes helps provide Service(s) externally-reachable URLs, load balance traffic, terminate SSL / TLS, and offer name-based virtual hosting. NGINX integration is an [Ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers) responsible for fulfilling the Ingress, usually with a load balancer, though it may also configure your edge router or additional frontends to help handle the traffic. - -## Versions Supported - - - - - -* **1.4.0** - - - - - -* **1.3.0** - - - - - - -* **1.2.5** - -
- - **1.2.4** - -
- - **1.2.3** - -
- - **1.2.1** (deprecated) - -
- - **1.2.0** (deprecated) - -
- - - -* **1.0.4** - - - - - -* **0.26.1** - - - - - - * **0.43.0** - - - -
- - -## Components - -Integration creates the following components: - -* Ingress Controller. -* Default Backend. - -## Default SSL certificate - -NGINX Ingress controller provides an option to set a default SSL certificate to be used for requests that do not match any of the configured server names. The default certificate will also be used for ingress tls: sections that do not have a secretName option. -Below steps will come in handy to set the default certificate. - -1. Create a secret with key and certificate - ```bash - kubectl -n kube-system create secret tls ingress-tls --cert server.crt --key server.key - ``` -2. Edit Nginx ingress pack values to include extraArgs.default-ssl-certificate section which will reference the secret created above - ```bash - charts: - nginx-ingress: - fullnameOverride: "nginx-ingress" - controller: - ... - ... - extraArgs: - default-ssl-certificate: "kube-system/ingress-tls" - ``` - -## Troubleshooting - -For basic troubleshooting, refer the below troubleshooting guide: -https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md - -## References - -- [Nginx Ingress Controller](https://www.nginx.com/products/nginx-ingress-controller/) diff --git a/content/docs/06-integrations/00-opa-gatekeeper.md b/content/docs/06-integrations/00-opa-gatekeeper.md deleted file mode 100644 index 663597f57e..0000000000 --- a/content/docs/06-integrations/00-opa-gatekeeper.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: 'OpenPolicyAgent' -metaTitle: 'OpenPolicyAgent' -metaDescription: 'OpenPolicyAgent security pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['security', 'amd64', 'arm64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/open-policy-agent/blobs/sha256:fcbad202dc9ca5e7a756562d8f9fc180ee77474034447dabc302d8a5a2bbe148?type=image/png" alt="OpenPolicyAgent logo' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Open Policy Agent - -Palette users can leverage the **Open Policy Agent (OPA) Gatekeeper** to strengthen the security administration of Kubernetes environment. The major motivation behind the deployment is admission customization via configurations without code. Gatekeeper provides an admission control system based on policies or rules implemented through parameterized and admin configurable constraints. Palette supports **Gatekeeper v3.0**. - -The major features of OPA are: - -* **Validating Admission Control** -* **Policies and Constraints** - * **Sample Policies**: - * All namespaces must have a label that lists a point-of-contact. - * All pods must have an upper bound for resource usage. - * All images must be from an approved repository. - * Services must all have globally unique selectors. - * **Constraint Properties** - * AND-ed together - * Schema validation - * Selection semantics -* **Audit**: The periodical evaluation of resources against constraints. -* **Data Replication**: Constraints to be compared against other objects in the cluster. - -## Versions Supported - - - - - -* **3.11.0** -* **3.9.0** - - - - - - -**3.7.0** - - - - - -**3.6.0** - - - - - -**3.5.1** - - - - - -## References - -https://kubernetes.io/blog/2019/08/06/opa-gatekeeper-policy-and-governance-for-kubernetes/ diff --git a/content/docs/06-integrations/00-openstack-cinder.md b/content/docs/06-integrations/00-openstack-cinder.md deleted file mode 100644 index 979ff23728..0000000000 --- a/content/docs/06-integrations/00-openstack-cinder.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: 'OpenStackCinder' -metaTitle: 'OpenStackCinder' -metaDescription: 'OpenStackCinder storage pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-openstack-cinder/blobs/sha256:ebb9650566d2cdfe9b0fc7d474a1cdcd562a9020807e49f891df199379ab8961?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Open Stack Cinder - -Unlike the traditional storage drivers of Kubernetes and the implementation of the Container Storage Interface (CSI), we can deliver storage plug-ins using a standard interface without ever having to change the core Kubernetes code. Open Stack Cinder provides OpenShift Container Platform users with storage options, such as volume snapshots that are not possible with in-tree volume plug-ins. - -# Versions Supported - - - - - -**1.23** - - - - - -**1.22** - - - - - -**1.21** - - - - - -**1.20** - - - - - -**1.19** - - - - - -**1.18** - - - - - -# References - -[OpenStack Cinder CSI Driver Operator](https://docs.openshift.com/container-platform/4.7/storage/container_storage_interface/persistent-storage-csi-cinder.html#csi-about_persistent-storage-csi-cinder) - -[CSI Cinder driver](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md/) diff --git a/content/docs/06-integrations/00-permission-manager.md b/content/docs/06-integrations/00-permission-manager.md deleted file mode 100644 index 2838160769..0000000000 --- a/content/docs/06-integrations/00-permission-manager.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'Permission Manager' -metaTitle: 'Permission Manager' -metaDescription: 'Permission Manager Authentication pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['authentication', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/permission-manager/blobs/sha256:15d08b02d78823c12616b72d1b5adb0520940016b89bae1f758e6f1a105597ff?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Permission Manager - -This integration provides a graphical user interface for RBAC management in Kubernetes. You can create users, assign namespaces/permissions, and distribute Kubeconfig YAML files quickly. - - -## Versions Supported - - - - -* **1.0.0** - - - - -## Configuration - -| Name | Supported Value | Description | -| --- | --- | --- | -| namespace| Any valid namespace string | The namespace under which this integration should be deployed onto| -| authPassword | | Login password for the web interface | - -## Customizing the permission templates - -Create a ClusterRole starting with `template-namespaced-resources___` or `template-cluster-resources___` and apply it to the cluster. Permission manager will honor any custom resources with this naming convention and will populate on the user interface. - -# Ingress - -Follow below steps to configure Ingress on Permission Manager - -1. Change serviceType from "LoadBalancer" to "ClusterIP" (line #10) -2. Ingress (line #13) - * Enable Ingress; Change enabled from false to "true" - * Set Ingress rules like annotations, path, hosts, etc. - -With these config changes, you can access Permission manager service on the Ingress Controller LoadBalancer hostname / IP - -## References - - diff --git a/content/docs/06-integrations/00-portworx.md b/content/docs/06-integrations/00-portworx.md deleted file mode 100644 index 8c03af579d..0000000000 --- a/content/docs/06-integrations/00-portworx.md +++ /dev/null @@ -1,557 +0,0 @@ ---- -title: 'Portworx' -metaTitle: 'Portworx Integration with Spectro Cloud' -metaDescription: 'Portworx storage integration for on-prem installations' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/csi-portworx/blobs/sha256:e27bc9aaf22835194ca38062061c29b5921734eed922e57d693d15818ade7486?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Portworx - -[Portworx](https://portworx.com/) is a software-defined persistent storage solution designed and purpose-built for applications deployed as containers, via container orchestrators such as Kubernetes. You can use Palette to install Portworx on the cloud or on-premises. - -## Versions Supported - -
- - - - - -* **2.11.2** - - - - - - -* **2.10.0** - - - - - - -* **2.9.0** - - - - -* **2.8.0** - - - - -* **2.6.1** - - - - -## Prerequisites - -For deploying Portworx for Kubernetes, make sure to configure the properties in the pack: - - -* Have at least three nodes with the proper [hardware, software, and network requirements](https://docs.portworx.com/install-portworx/prerequisites). - - -* Ensure you are using a supported Kubernetes version. - - -* Identify and set up the storageType. - - -
- -## Contents - -The default installation of Portworx will deploy the following components in the Kubernetes cluster. - - -* Portworx - - -* CSI Provisioner - - -* [Lighthouse](https://portworx.com/blog/manage-portworx-clusters-using-lighthouse/) - - -* [Stork](https://github.com/libopenstorage/stork) and [Stork on Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/stork/) - - -* Storage class making use of portworx-volume provisioner. - -## Notable Parameters - -### Manifests - Portworx: - -```yaml - -manifests: - portworx: - - # The namespace to install Portworx resources - namespace: "portworx" - - # Portworx storage type and size - storageType: "type=zeroedthick,size=150" - - # Max storgae nodes per zone - maxStorageNodesPerZone: 3 - - # Node recovery timeout in seconds - nodeRecoveryTimeout: 1500 - - # Portworx storage class config - storageClass: - enabled: true - isDefaultStorageClass: true - allowVolumeExpansion: true - reclaimPolicy: Retain - volumeBindingMode: Immediate - parameters: - repl: "3" - priority_io: "high" - #sharedv4: true - - k8sVersion: '{{.spectro.system.kubernetes.version}}' - - templateVersion: "v4" - - # List of additional container args to be passed - args: - ociMonitor: - #- "-dedicated_cache" - #- "-a" - storkDeployment: - #- "--app-initializer=true" - storkScheduler: - #- "--scheduler-name=xyz" - autoPilot: - csiProvisioner: - csiSnapshotter: - csiSnapshotController: - csiResizer: - - # The private registry from where images will be pulled from. When left empty, images will be pulled from the public registry - # Example, imageRegistry: "harbor.company.com/portworx" - imageRegistry: "" - -``` -# Integrating to an External etcd - -Starting Portworx v2.6.1, you can use the presets feature to toggle between the available ETCD options. - -By default, Portworx is set to use internal KVDB. However, you can integrate Portworx to an external etcd server by following the steps below. - -1. Enable `useExternalKvdb` flag by setting it to *true*. - - -2. Configure the external etcd endpoints in `externalKvdb.endpoints`. - - -If the external etcd server is configured to authenticate via certificates, additionally you may want to set up the following: - -1. Enable `externalKvdb.useCertsForSSL` flag by setting it to *true*. - - -2. Setup certificate related configuration in `externalKvdb.cacert`, `externalKvdb.cert`, and `externalKvdb.key`. - - - -Make sure to follow the correct indentation style; otherwise, certs will not be imported correctly and will result in Portworx deployment failure. - - - -## etcd Presets - -These are the three types of Presets that can be selected and modified. - -
- - - - -## Use Internal KVDB - -```yaml -# ECTD selection - useExternalKvdb: false - - # External kvdb related config - externalKvdb: - - useCertsForSSL: false - -vsphere-cloud-controller-manager: - k8sVersion: '{{.spectro.system.kubernetes.version}}' -``` - - - - -## Use Non-Secure KVDB Endpoints - -```yaml -# External kvdb related config - externalKvdb: - # List of External KVDB endpoints to use with Portworx. Used only when useExternalKvdb is true - endpoints: - - etcd:http://100.26.199.167:2379 - - etcd:http://100.26.199.168:2379 - - etcd:http://100.26.199.169:2379 - useCertsForSSL: false - useExternalKvdb: true - vsphere-cloud-controller-manager: - k8sVersion: '{{.spectro.system.kubernetes.version}}' -``` - - - - - -## Use Certs Secured KVDB Endpoints - -```yaml - -# External KVDB Related Configuration - externalKvdb: - # List of External KVDB endpoints to use with Portworx. Used only when useExternalKvdb is true - endpoints: - - etcd:https://100.26.199.167:2379 - - etcd:https://100.26.199.168:2379 - - etcd:https://100.26.199.169:2379 - useCertsForSSL: true - # The CA cert to use for etcd authentication. Make sure to follow the same indentation style as given in the example below - cacert: |- - -----BEGIN CERTIFICATE----- - MIIC3DCCAcQCCQCr1j968rOV3zANBgkqhkiG9w0BAQsFADAwMQswCQYDVQQGEwJV - UzELMAkGA1UECAwCQ0ExFDASBgNVBAcMC1NhbnRhIENsYXJhMB4XDTIwMDkwNDA1 - MzcyNFoXDTI1MDkwMzA1MzcyNFowMDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB - MRQwEgYDVQQHDAtTYW50YSBDbGFyYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC - AQoCggEBALt2CykKKwncWNQqB6Jg0QXd58qeDk40OF4Ti8DewZiZgpQOgA/+GYO7 - bx2/oQyAwjvhpTYjmMN5zORJpE3p9A+o57An1+B9D8gm1W1uABVEmwiKZhXpa+3H - Zlon58GR+kAJPbMIpvWbjMZb4fxZM0BPo0PHzzITccoaTV4+HY4YoDNAVjfZ1cEn - Hu2PUyN8M4RM+HdE4MOQVwqFDq/Fr6mLBMV0PdiwML0tjZ7GSGSjv1hme3mOLvKP - qSWx4hCd5oTegEfneUKKnVhH3JLpSU1NaC6jU3vhyowRNOShi77/uJCnkx3mp9JG - c4YruKrGc997wmUMsIv0owt49Y3dAi8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEA - kEXPdtpOURiZIi01aNJkzLvm55CAhCg57ZVeyZat4/LOHdvo+eXeZ2LHRvEpbakU - 4h1TQJqeNTd3txI0eIx8WxpwbJNxesuTecCWSIeaN2AApIWzHev/N7ZYJsZ0EM2f - +rYVcX8mcOkLeyKDInCKySxIPok8kU4qQLTWytJbeRYhxh7mSMuZXu7mtSh0HdP1 - C84Ml+Ib9uY2lbr1+15MhfSKdpvmLVOibRIrdqQirNhl8uU9I1/ExDxXyR2NBMLW - tzGgsz5dfFDZ4oMqAc8Nqm9LuvmIZYMCunMZedI2h7jGH3LVQXdM81iZCgJdTgKf - i9CNyx+CcwUCkWQzhrHBQA== - -----END CERTIFICATE----- - # The cert to use for etcd authentication. Make sure to follow the same indentation style as given in the example below - cert: |- - -----BEGIN CERTIFICATE----- - MIIDaTCCAlGgAwIBAgIJAPLC+6M3EezhMA0GCSqGSIb3DQEBCwUAMDAxCzAJBgNV - BAYTAlVTMQswCQYDVQQIDAJDQTEUMBIGA1UEBwwLU2FudGEgQ2xhcmEwHhcNMjAw - OTA0MDUzODIyWhcNMjIxMjA4MDUzODIyWjA4MQswCQYDVQQGEwJVUzETMBEGA1UE - CAwKQ2FsaWZvcm5pYTEUMBIGA1UEBwwLU2FudGEgQ2xhcmEwggEiMA0GCSqGSIb3 - DQEBAQUAA4IBDwAwggEKAoIBAQCycmCHPrX0YNk75cu3H5SQv/D1qND2+2rGvv0Z - x28A98KR/Bdchk1QaE+UHYPWejsRWUtEB0Q0KreyxpwH1B4EHNKpP+jV9YqCo5fW - 3QRipWONKgvrSKkjVp/4U/NAAWCHfruB1d9u/qR4utY7sEKHE9AxmbyG+K19mOB2 - FJc7NOsTwN8d6uA5ZfFKmv3VtZzl0+Vq1qFSyIZT9zXYM22YjBAqXk9FVoI0FoQt - zpymQrsajfS+hNX7lSUVKKv3IplpNqSOyTHRF7TWo5NOH+YRWJHLAgZoq2w/yaEi - 5IdjLdb1JXmVUyBgq590WcJZDakwD9SPOHrM9K1vTl9I41q7AgMBAAGjfjB8MEoG - A1UdIwRDMEGhNKQyMDAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEUMBIGA1UE - BwwLU2FudGEgQ2xhcmGCCQCr1j968rOV3zAJBgNVHRMEAjAAMAsGA1UdDwQEAwIE - 8DAWBgNVHREEDzANggtleGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAUOBn - YdTif6WlRpQOj+3quGrafJSNL8TqHkpmgaInSpMVFwDsmPF/HoAVVpX+H3oMY8p7 - Ll4I1Q7szpGRnKpJuzMZp5+gNpmwAz2MdAr7Ae9wH/+o8c2avbfpaHFWVTJZJ6X1 - Q6m6jmXcU0QSS4zj+lyxDNKnXfwVL8hVp0mXRFfPpb4l5ZCBoj4IA2UgyeU7F/nn - nvR5rmg781zc0lUL6X7HaSfQjtPDTSZYFqwE93vSe42JP7NWM96lZHy2IlfE88Wp - jUvOOJjaFVuluaJ78uCydMGEkJmipxH+1YXicH47RQ30tD5QyXxGBi+8jw5z0RiR - ptWD/oDFCiCjlffyzg== - -----END CERTIFICATE----- - # The key to use for etcd authentication. Make sure to follow the same indentation style as given in the example below - key: |- - -----BEGIN RSA PRIVATE KEY----- - MIIEogIBAAKCAQEAsnJghz619GDZO+XLtx+UkL/w9ajQ9vtqxr79GcdvAPfCkfwX - XIZNUGhPlB2D1no7EVlLRAdENCq3ssacB9QeBBzSqT/o1fWKgqOX1t0EYqVjjSoL - 60ipI1af+FPzQAFgh367gdXfbv6keLrWO7BChxPQMZm8hvitfZjgdhSXOzTrE8Df - HergOWXxSpr91bWc5dPlatahUsiGU/c12DNtmIwQKl5PRVaCNBaELc6cpkK7Go30 - voTV+5UlFSir9yKZaTakjskx0Re01qOTTh/mEViRywIGaKtsP8mhIuSHYy3W9SV5 - lVMgYKufdFnCWQ2pMA/Ujzh6zPStb05fSONauwIDAQABAoIBAGHELIKspv/m993L - Pttrn/fWUWwmO6a1hICzLvQqwfRjyeQ1m48DveQp4j+iFBM0EJymsYfp+0IhjVeT - XPUlD/Ts3bYA384pouOEQbJkkPyC5JH40WLtAk3sLeTeCc2tc3eIxa6SwMGNHgtP - QgSdwzVCc7RZKGNCZ7sCQSgwi9LRdyjHU0z0KW3lHqsMkK+yEg8zuH2DpIgvFej8 - KxjwF9ZEsnYDcERdd4TOu2NTEIl5N7F8E6di/CLP/wkfHazjX+qGcuBXjeGhPgdb - fKCcrFxhbavaJRMGLqnOD99l/zvySnA+LUSZ35KB/2ZfLMv71Z9oABTlyiR+76GW - 0lcQjmECgYEA2Jrq2qe7IUZ8CURWJ6rDKgD83LGRCHAWZ+dYvFmdsyfAGMV4+p4V - zKSidiTWAgl7ppiZdaEPu/2cH8uohDkdx2CTSUKPUM6+PBhE4hwSA42RlnIpGWbf - YEqcZ/qeo1IFb1A1YslwdslCVLc3INEbWairBEGis8aAxUaoEiTiPTMCgYEA0ubQ - 05BijLK6XH6YfASDLxwRg6jxn3mBqh+pAwE4tVVJVI9yXnNzN4/WKJJM+mdSGfpv - UcJy86ZcmHNzanZUPWh80U2pyRoVXvVQpY8hdMQ3neya60mc6+Nneba2LflkBVmd - cdoNGO0zAcGb0FKDCF2H3fizDxcoOyUjeKlLnFkCgYABU0lWlyok9PpzUBC642eY - TTM+4nNBuvXYIuk/FclKPFcHj8XCus7lVqiL0oPgtVAlX8+okZi4DMA0zZk1XegZ - vTSJgTfBRdKSKY/aVlOh4+7dHcu0lRWO0EYOuNDZrPnNiY8aEKN4hpi6TfivYbgq - H0cUmpY1RWSqUFlc6w7bUwKBgEMINctoksohbHZFjnWsgX2RsEdmhRWo6vuFgJSB - 6OJJrzr/NNysWSyJvQm8JldYS5ISNRuJcDvc3oVd/IsT/QZflXx48MQIVE6QLgfR - DFMuonbBYyPxi7y11Ies+Q53u8CvkQlEwvDvQ00Fml6GOzuHbs2wZEkhlRnnXfTV - 6kBRAoGAP9NUZox5ZrwkOx7iH/zEx3X3qzFoN/zSI2iUi2XRWaglGbNAxqX5/ug8 - xJIi1Z9xbsZ/3cPEdPif2VMdvIy9ZSsBwIEuzRf8YNw6ZGphsO95FKrgmoqA44mm - WsqUCBt5+DnOaDyvMkokP+T5tj/2LXemuIi4Q5nrOmw/WwVGGGs= - -----END RSA PRIVATE KEY----- - useExternalKvdb: true -vsphere-cloud-controller-manager: - k8sVersion: '{{.spectro.system.kubernetes.version}}' - -``` - - - - -# Environments - -
- - - - -## vSphere Environment - -For deploying Portworx storage on vSphere environments, make sure to configure the following properties in the pack: - -* vSphere Configuration file - - -* Storage Type - - -* Kubernetes Version - -### vSphere Manifest - -Additional parameters for the manifest is as follows: - -
- -```yaml - -# VSphere cloud configurations -vsphereConfig: - insecure: "true" - host: "" - port: "443" - datastorePrefix: "datastore" - installMode: "shared" - userName: "" - password: "" - # Enter the name of the secret which has vsphere user credentials (Use keys VSPHERE_USER, VSPHERE_PASSWORD) - userCredsSecret: "" -``` -
- -## Using Secrets for vSphere User Credentials - -Portworx pack values allow you to configure vSphere user credentials in two ways: - - -1. Username & password - (`portworx.vsphereConfig.userName` and `portworx.vsphereConfig.password`). - - -2. Secret - (`portworx.vsphereConfig.userCredsSecret` is available with v2.6.1 and above). - - -If you chose the latter, make sure to create the secret in the target cluster manually or by bringing your own (BYO) manifest Add-on pack. - -
- - -Until the secret is created in the cluster, Portworx deployments might fail to run. When secret is configured, reconciliation should recover Portworx. - - -Secret can be created using the spec below, - -
- - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: px-vsphere-secret - namespace: kube-system -type: Opaque -data: - VSPHERE_USER: "b64 encoded admin username" - VSPHERE_PASSWORD: "b64 encoded admin password" -``` -and this secret can be referenced in the Portworx pack values as shown below: - -
- -``` -manifests: - portworx: - vsphereConfig: - userCredsSecret: "px-vsphere-secret" -``` - -Ensure to follow the correct indentation style; otherwise, certificates will not be imported correctly and resulting in a Portworx deployment failure. - -
- - - -## AWS Environment -Palette provisions Portworx in an AWS environment. The following are the packs supported: -
- -### Packs Supported - - - - -**portworx-aws-2.9** - - - - - -**portworx-aws-2.10** - - - - - -
- -### Prerequisites - -To deploy Portworx in an AWS environment, have the following prerequisites in place. - - -* Ensure the Portworx Nodes have the TCP ports open at **9001-9022**. - - -* Ensure there is an open UDP port at **9002**. - - -* Apply the following policy to the **User** in AWS: - -```yaml -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "", - "Effect": "Allow", - "Action": [ - "ec2:AttachVolume", - "ec2:ModifyVolume", - "ec2:DetachVolume", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:DeleteTags", - "ec2:DeleteVolume", - "ec2:DescribeTags", - "ec2:DescribeVolumeAttribute", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVolumeStatus", - "ec2:DescribeVolumes", - "ec2:DescribeInstances", - "autoscaling:DescribeAutoScalingGroups" - ], - "Resource": [ - "*" - ] - } - ] -} -``` - -
- -## AWS Manifest - -```yaml -manifests: - portworx: - - # The namespace to install Portworx resources - namespace: "portworx" - - # Portworx storage type and size - storageType: "type=gp3,size=150" - - # Max storage nodes per zone - maxStorageNodesPerZone: 3 - - # Node recovery timeout in seconds - nodeRecoveryTimeout: 1500 - - # Portworx storage class config - storageClass: - enabled: true - isDefaultStorageClass: true - allowVolumeExpansion: true - reclaimPolicy: Retain - volumeBindingMode: Immediate - parameters: - repl: "3" - priority_io: "high" - #sharedv4: true - - # Kubernetes version. - k8sVersion: '{{.spectro.system.kubernetes.version}}' - - templateVersion: "v4" - - # List of additional container args to be passed - args: - ociMonitor: - #- "-dedicated_cache" - #- "-a" - storkDeployment: - #- "--app-initializer=true" - storkScheduler: - #- "--scheduler-name=xyz" - autoPilot: - csiProvisioner: - csiSnapshotter: - csiSnapshotController: - csiResizer: - - # The private registry from where images will be pulled from. When left empty, images will be pulled from the public registry - # Example, imageRegistry: "harbor.company.com/portworx" - imageRegistry: "" - - # ECTD selection - useExternalKvdb: false - - # External kvdb related config - externalKvdb: - - useCertsForSSL: false -``` - -
- - -
- -
- -
- -## References - -[Portworx Install with Kubernetes](https://docs.portworx.com/portworx-install-with-kubernetes/) - -[Lighthouse](https://legacy-docs.portworx.com/enterprise/lighthouse-new.html) - -[Installation Prerequisites](https://docs.portworx.com/install-portworx/prerequisites/) - -[Install Portworx on AWS ASG using the DaemonSet](https://docs.portworx.com/install-portworx/cloud/aws/aws-asg/daemonset/) diff --git a/content/docs/06-integrations/00-portworx_operator.md b/content/docs/06-integrations/00-portworx_operator.md deleted file mode 100644 index a7bf742cd0..0000000000 --- a/content/docs/06-integrations/00-portworx_operator.md +++ /dev/null @@ -1,629 +0,0 @@ ---- -title: 'Portworx /w Operator' -metaTitle: 'Portworx storage CSI (Essentials/PAYG/Enterprise)' -metaDescription: 'Portworx storage CSI for all use cases' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/csi-portworx/blobs/sha256:e27bc9aaf22835194ca38062061c29b5921734eed922e57d693d15818ade7486?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Portworx /w Operator - -[Portworx](https://portworx.com/) is a software-defined persistent storage solution designed and purpose-built for applications deployed as containers via container orchestrators such as Kubernetes. You can use Palette to install Portworx on a cloud platform, on-premises, or at the edge. - -## Versions Supported - -
- - - - - - -* **2.11.x** - - - - - -## Prerequisites - -For deploying Portworx with Operator for Kubernetes, make sure to configure the properties in the pack: -
- -* Have at least three nodes with the proper [hardware, software, and network requirements](https://docs.portworx.com/install-portworx/prerequisites). - -* Ensure you use a supported Kubernetes version (1.19 or above). - -* Identify and set up the storageType. - -
- -## Contents - -The default installation of Portworx /w Operator will deploy the following components in the Kubernetes cluster: -
- -* Portworx Operator - -* `StorageCluster` resource that tells the Operator how to deploy & configure Portworx - -* `StorageClass` resource for dynamic provisioning of PersistentVolumes using the portworx-volume provisioner - -* [Stork](https://github.com/libopenstorage/stork) and [Stork on Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/stork/) - - -Optionally, you can enable [Lighthouse](https://legacy-docs.portworx.com/enterprise/lighthouse-new) for essential monitoring of the Portworx cluster. - -
- -## Notable Parameters - -### Charts - Portworx: -```yaml -charts: - portworx-generic: - - license: - # Valid options for "type" are: essentials, saas, enterprise - # If you want to deploy the PX Enterprise Trial version, or need manual offline activation, - # select the "enterprise" type and set "activateLicense" to false. - type: essentials - # The next block only gets used if the type is set to "essentials" - essentials: - # Base64-decoded value of the px-essen-user-id value in the px-essential secret - # Find your Essentials Entitlement ID at https://central.portworx.com/profile - userId: 1234abcd-12ab-12ab-12ab-123456abcdef - # Base64-decoded value of the px-osb-endpoint value in the px-essential secret - # Leave at the default value unless there are special circumstances - endpoint: https://pxessentials.portworx.com/osb/billing/v1/register - # The next block only gets used if the type is set to "saas" - saas: - key: - # The next block only gets used if the type is set to "enterprise" - enterprise: - activateLicense: true - activationId: - # customLicenseServer: - # url: http://hostname:7070/fne/bin/capability - # importUnknownCa: true - # licenseBorrowInterval: 1w15m - # addFeatures: - # - feature1 - # - feature2 - - storageCluster: - # When autoGenerateName is true, a name of type "px-cluster-1234abcd-12ab-12ab-12ab-123456abcdef" is generated and the "name" field is ignored - autoGenerateName: false - name: "px-{{.spectro.system.cluster.name}}" - # annotations: - # If you need additional annotations, specify them here - spec: {} - # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here - - storageClass: - name: spectro-storage-class - isDefaultStorageClass: true - # annotations: - # If you need additional annotations, specify them here - allowVolumeExpansion: true - # Delete or Retain - reclaimPolicy: Delete - # WaitForFirstConsumer or Immediate - volumeBindingMode: WaitForFirstConsumer - parameters: - repl: "3" - priority_io: "high" - # sharedv4: true - # Add additional parameters as needed (https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-pvcs/dynamic-provisioning/) -``` -# Selecting a license model - -This pack can install Portworx in 3 different licensing modes: - -* **Essentials**: a free Portworx license with limited functionality that allows you to run small production or proof-of-concept workloads. Essentials limits capacity and advanced features, but otherwise functions the same way as the fully-featured Portworx Enterprise version of Portworx. - - -* **Enterprise**: the fully featured version of Portworx. If you install this model without a valid key, Portworx will automatically enter a 30-day trial mode. - - -* **Enterprise SaaS PAYG**: the fully featured version of Portworx but using a SaaS license key that allows unlimited use and in-arrears billing. If you install this model without a valid key, Portworx will automatically enter a 30-day trial mode. - - -Use the presets in the pack user interface to select which license model you want to use, then update the `charts.portworx-generic.license` section for your chosen license model. - -
- - - - -```yaml - license: - type: essentials - essentials: - # Base64-decoded value of the px-essen-user-id value in the px-essential secret - # Find your Essentials Entitlement ID at https://central.portworx.com/profile - userId: 1234abcd-12ab-12ab-12ab-123456abcdef - # Base64-decoded value of the px-osb-endpoint value in the px-essential secret - # Leave at the default value unless there are special circumstances - endpoint: https://pxessentials.portworx.com/osb/billing/v1/register -``` - - - - -```yaml - license: - type: saas - saas: - key: -``` - - - - - -```yaml - license: - type: enterprise - enterprise: - activateLicense: true - activationId: - # customLicenseServer: - # url: http://hostname:7070/fne/bin/capability - # importUnknownCa: true - # licenseBorrowInterval: 1w15m - # addFeatures: - # - feature1 - # - feature2 -``` - - - - - -# Selecting a storage specification - -This pack can install Portworx in various different storage environment: - -* **Using existing disks (generic)**: This mode does not integrate with any particular storage solution, it just uses existing disks available on the nodes. - - -* **AWS Cloud Storage**: This mode integrates with Amazon EBS block volumes and allows EKS and EC2 kubernetes clusters to dynamically attach EBS volumes to worker nodes for Portworx. - - -* **Azure Cloud Storage**: This mode integrates with Azure block storage and allows AKS and regular Azure kubernetes clusters to dynamically attach Azure block storage to worker nodes for Portworx. - - -* **Google Cloud Storage**: This mode integrates with Google persistent disks and allows GKE and regular Google kubernetes clusters to dynamically attach persistent disks to worker nodes for Portworx. - - -* **VMware vSphere Datastores**: This mode integrates with VMware vSphere storage and allows kubernetes clusters on vSphere to dynamically attach vSAN and regular Datastore disks to worker nodes for Portworx. - - -* **Pure Storage Flash Array**: This mode integrates with Pure Storage Flash Arrays and allows kubernetes clusters to dynamically attach Flash Array disks over iSCSI to worker nodes for Portworx. - - -Use the presets in the pack user interface to select which storage specification you want to use, then update the `charts.portworx-generic.storageCluster` section to your specific needs. - -
- - - - -```yaml - storageCluster: - spec: - # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here - image: portworx/oci-monitor:2.11.2 - imagePullPolicy: Always - kvdb: - internal: true - # endpoints: - # - etcd:https://etcd.company.domain:2379 - # authSecret: px-kvdb-auth - storage: - useAll: true - journalDevice: auto - secretsProvider: k8s - stork: - enabled: true - args: - webhook-controller: "true" - autopilot: - enabled: true - csi: - enabled: true - monitoring: - prometheus: - enabled: false - exportMetrics: false -``` - - - - -```yaml - storageCluster: - annotations: - portworx.io/is-eks: "true" - spec: - # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here - image: portworx/oci-monitor:2.11.2 - imagePullPolicy: Always - kvdb: - internal: true - # endpoints: - # - etcd:https://etcd.company.domain:2379 - # authSecret: px-kvdb-auth - cloudStorage: - deviceSpecs: - - type=gp2,size=150 - kvdbDeviceSpec: type=gp2,size=150 - secretsProvider: k8s - stork: - enabled: true - args: - webhook-controller: "true" - autopilot: - enabled: true - csi: - enabled: true - monitoring: - prometheus: - enabled: false - exportMetrics: false -``` -### Prerequisites - -To deploy Portworx in an AWS environment, ensure the following IAM Policy is created in AWS and attached to the correct IAM Role: -
- -```yaml -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:AttachVolume", - "ec2:ModifyVolume", - "ec2:DetachVolume", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:DeleteTags", - "ec2:DeleteVolume", - "ec2:DescribeTags", - "ec2:DescribeVolumeAttribute", - "ec2:DescribeVolumesModifications", - "ec2:DescribeVolumeStatus", - "ec2:DescribeVolumes", - "ec2:DescribeInstances", - "autoscaling:DescribeAutoScalingGroups" - ], - "Resource": [ - "*" - ] - } - ] -} -``` - -* When deploying a regular Kubernetes cluster on AWS EC2 using Palette, attach the policy to the `nodes.cluster-api-provider-aws.sigs.k8s.io` IAM Role. Or alternatively, edit the AWS cloud account in Palette, enable the `Add IAM Policies` option, and select the Portworx IAM Policy described above. This will automatically attach the IAM Policy to the correct IAM Role. - -* When deploying an EKS cluster, use the `managedMachinePool.roleAdditionalPolicies` option in the `kubernetes-eks` pack to automatically attach the Portworx IAM Policy to the EKS worker pool IAM role that Palette will manage for you. For example: - -```yaml -managedMachinePool: - roleAdditionalPolicies: - - "arn:aws:iam::012345678901:policy/my-portworx-policy" -``` - -
- - -
- - -```yaml - storageCluster: - annotations: - portworx.io/is-aks: "true" - spec: - # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here - image: portworx/oci-monitor:2.11.2 - imagePullPolicy: Always - kvdb: - internal: true - # endpoints: - # - etcd:https://etcd.company.domain:2379 - # authSecret: px-kvdb-auth - cloudStorage: - deviceSpecs: - - type=Premium_LRS,size=150 - kvdbDeviceSpec: type=Premium_LRS,size=150 - secretsProvider: k8s - stork: - enabled: true - args: - webhook-controller: "true" - autopilot: - enabled: true - csi: - enabled: true - monitoring: - prometheus: - enabled: false - exportMetrics: false - env: - - name: AZURE_CLIENT_SECRET - valueFrom: - secretKeyRef: - name: px-azure - key: AZURE_CLIENT_SECRET - - name: AZURE_CLIENT_ID - valueFrom: - secretKeyRef: - name: px-azure - key: AZURE_CLIENT_ID - - name: AZURE_TENANT_ID - valueFrom: - secretKeyRef: - name: px-azure - key: AZURE_TENANT_ID - azureSecret: - tenantId: "your_azure_tenant_id" - clientId: "your_azure_client_id" - clientSecret: "your_client_secret" -``` - - - - -```yaml - storageCluster: - annotations: - portworx.io/is-gke: "true" - spec: - # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here - image: portworx/oci-monitor:2.11.2 - imagePullPolicy: Always - kvdb: - internal: true - # endpoints: - # - etcd:https://etcd.company.domain:2379 - # authSecret: px-kvdb-auth - cloudStorage: - deviceSpecs: - - type=pd-standard,size=150 - kvdbDeviceSpec: type=pd-standard,size=150 - secretsProvider: k8s - stork: - enabled: true - args: - webhook-controller: "true" - autopilot: - enabled: true - csi: - enabled: true - monitoring: - prometheus: - enabled: false - exportMetrics: false -``` - - - - -```yaml - storageCluster: - spec: - # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here - image: portworx/oci-monitor:2.11.2 - imagePullPolicy: Always - kvdb: - internal: true - # endpoints: - # - etcd:https://etcd.company.domain:2379 - # authSecret: px-kvdb-auth - cloudStorage: - deviceSpecs: - - type=lazyzeroedthick,size=150 - kvdbDeviceSpec: type=lazyzeroedthick,size=32 - secretsProvider: k8s - stork: - enabled: true - args: - webhook-controller: "true" - autopilot: - enabled: true - csi: - enabled: true - monitoring: - prometheus: - enabled: false - exportMetrics: false - env: - - name: VSPHERE_INSECURE - value: "true" - - name: VSPHERE_USER - valueFrom: - secretKeyRef: - name: px-vsphere-secret - key: VSPHERE_USER - - name: VSPHERE_PASSWORD - valueFrom: - secretKeyRef: - name: px-vsphere-secret - key: VSPHERE_PASSWORD - - name: VSPHERE_VCENTER - value: "my-vcenter.company.local" - - name: VSPHERE_VCENTER_PORT - value: "443" - - name: VSPHERE_DATASTORE_PREFIX - value: "datastore" - - name: VSPHERE_INSTALL_MODE - value: "shared" - vsphereSecret: - user: "username_for_vCenter_here" - password: "your_password" -``` - - - - -```yaml - storageCluster: - spec: - # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here - image: portworx/oci-monitor:2.11.2 - imagePullPolicy: Always - kvdb: - internal: true - # endpoints: - # - etcd:https://etcd.company.domain:2379 - # authSecret: px-kvdb-auth - cloudStorage: - deviceSpecs: - - size=150 - kvdbDeviceSpec: size=32 - secretsProvider: k8s - stork: - enabled: true - args: - webhook-controller: "true" - autopilot: - enabled: true - csi: - enabled: true - monitoring: - prometheus: - enabled: false - exportMetrics: false - env: - - name: PURE_FLASHARRAY_SAN_TYPE - value: "ISCSI" -``` - -To activate the Pure Flash Array integration, you will need to create a `secret` on your cluster named `px-pure-secret` that contains your Flash Array license. You can do this by running the below kubectl command: - -``` -kubectl create secret generic px-pure-secret --namespace kube-system --from-file=pure.json= -``` - - - -
- -# Integrating into an External Etcd - -Portworx Enterprise supports multiple Etcd scenarios. - -Portworx will default use its internal key-value store (KVDB). However, you can integrate Portworx to an external Etcd server by following the steps below. -
- -1. Select the `Use External Kvdb over HTTP` or `Use External Kvdb over SSL` preset in the pack user interface. If your external Etcd server requires certificate authentication, you need the `Use External Kvdb over SSL` preset. - - -2. Configure the external Etcd endpoint(s) in `charts.portworx-generic.storageCluster.spec.kvdb.endpoints`. - - -3. When using the `Use External Kvdb over SSL` preset, leave the `charts.portworx-generic.storageCluster.spec.kvdb.endpoints` option to its default of `px-kvdb-auth` since that is the name of the secret that will be created by this pack. - - -When using the `Use External Kvdb over SSL` preset, you additionally need to configure the `charts.portworx-generic.externalKvdb` section: -
- -1. Set `charts.portworx-generic.externalKvdb.useCertsForSSL` to `true` to enable certificate authentication. - - -2. Input your SSL certificates in the `cacert`, `cert`, and `key` sections of `charts.portworx-generic.externalKvdb`. The preset will give you cropped example values that you can overwrite with your actual PEM certificates. - - - -Make sure to follow the provided indentation style; otherwise, certs will not be imported correctly and will result in Portworx deployment failure. - - - -## Kvdb and Etcd Presets - -These are the three types of Presets that can be selected and modified. The pack defaults to the `Use Internal Kvdb` option. Change to a different preset if you need to connect to an external Etcd server. - -
- - - - -```yaml - storageCluster: - spec: - kvdb: - internal: true -``` - - - - -```yaml - storageCluster: - spec: - kvdb: - endpoints: - - etcd:http://etcd.company.domain:2379 -``` - - - - - -```yaml - storageCluster: - spec: - kvdb: - endpoints: - - etcd:http://etcd.company.domain:2379 - authSecret: px-kvdb-auth - - # External kvdb related config, only used if storageCluster.spec.kvdb.internal != true - externalKvdb: - useCertsForSSL: true - # The CA cert to use for etcd authentication. Make sure to follow the same indentation style as given in the example below - cacert: |- - -----BEGIN CERTIFICATE----- - MIIC3DCCAcQCCQCr1j968rOV3zANBgkqhkiG9w0BAQsFADAwMQswCQYDVQQGEwJV - < .. > - i9CNyx+CcwUCkWQzhrHBQA== - -----END CERTIFICATE----- - # The cert to use for etcd authentication. Make sure to follow the same indentation style as given in the example below - cert: |- - -----BEGIN CERTIFICATE----- - MIIDaTCCAlGgAwIBAgIJAPLC+6M3EezhMA0GCSqGSIb3DQEBCwUAMDAxCzAJBgNV - < .. > - ptWD/oDFCiCjlffyzg== - -----END CERTIFICATE----- - # The key to use for etcd authentication. Make sure to follow the same indentation style as given in the example below - key: |- - -----BEGIN RSA PRIVATE KEY----- - MIIEogIBAAKCAQEAsnJghz619GDZO+XLtx+UkL/w9ajQ9vtqxr79GcdvAPfCkfwX - < .. > - WsqUCBt5+DnOaDyvMkokP+T5tj/2LXemuIi4Q5nrOmw/WwVGGGs= - -----END RSA PRIVATE KEY----- -``` - - - - -
- -## References - -- [Portworx Install with Kubernetes](https://docs.portworx.com/portworx-install-with-kubernetes/) -- [Lighthouse](https://docs.portworx.com/reference/lighthouse/) -- [Installation Prerequisites](https://docs.portworx.com/install-portworx/prerequisites/) diff --git a/content/docs/06-integrations/00-prismacloud.md b/content/docs/06-integrations/00-prismacloud.md deleted file mode 100644 index e9d4f8c8be..0000000000 --- a/content/docs/06-integrations/00-prismacloud.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: 'prisma-cloud-compute' -metaTitle: 'prisma-cloud-compute' -metaDescription: 'prism-cloud-compute Security pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['security', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/prismacloud/blobs/sha256:9ddb035af0e9f299e5df178ebb3153e90383a5e42ded2c1a3f6c9470dd851c12?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Prisma Cloud Compute - -Prisma Cloud Compute is a cloud workload protection platform (CWPP) offering protection for hosts, containers, and server-less deployments in any cloud, and across the software lifecycle. Prisma Cloud Compute is cloud-native and API-enabled. It can protect tenant workloads, regardless of the underlying compute technology or the cloud deployment. -## Versions Supported - - - - - -**20.9.0** - - - - -## References - -https://docs.prismacloudcompute.com/docs/ diff --git a/content/docs/06-integrations/00-prometheus-agent.md b/content/docs/06-integrations/00-prometheus-agent.md deleted file mode 100644 index 13789891d8..0000000000 --- a/content/docs/06-integrations/00-prometheus-agent.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: 'Prometheus Agent' -metaTitle: 'Prometheus Agent' -metaDescription: 'Prometheus Agent Monitoring Pack' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['monitoring', 'amd64', 'arm64'] -logoUrl: 'https://registry.spectrocloud.com/v1/prometheus-operator/blobs/sha256:64589616d7f667e5f1d7e3c9a39e32c676e03518a318924e123738693e104ce0?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Prometheus Agent - -Prometheus is an open-source monitoring and alerting system that is designed to collect and analyze metrics from various systems and services. - -Prometheus is built around a time-series database that stores metrics data. It uses a flexible querying language called PromQL to extract and process metrics data. Prometheus also has a powerful alerting system that can be used to send notifications when specific conditions are met. - -Prometheus can be used to monitor a wide range of systems and services, including servers, containers, databases, and applications. It can be deployed in a variety of environments, including on-prem, cloud, and hybrid setups. - -The Prometheus Agent pack works in tandem with the [Prometheus Operator pack](/integrations/prometheus-operator). Check out the guides [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) and [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent) to learn how to create a monitoring stack with Prometheus for your Palette environment. - - -# Versions Supported - -**19.0.X** - -# Prerequisites - -* A host cluster that has the [Prometheus Operator pack](/integrations/prometheus-operator) installed. - -# Parameters - -The Prometheus agent supports all the parameters exposed by the Prometheus Helm Chart. Refer to the [Prometheus Helm Chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#configuration) documentation for details. - -From a Palette perspective, you must provide a value for the `remoteWrite.url` parameter shown in the following example. - -
- -```yaml -charts: - prometheus: - server: - remoteWrite: - - url: "" -``` - -The `remoteWrite.url` is exposed by the [Prometheus Operator pack](/integrations/prometheus-operator) when installed in a cluster. You can find the Prometheus server URL by reviewing the details of the Kubernetes cluster hosting the Prometheus server. Use the URL exposed by the Prometheus service. - -The following image displays a host cluster with the Prometheus Operator pack installed. Use the URL exposed for port 9090 to populate the `remoteWrite.url` parameter. - -![A view of the cluster details page with a highlighted box around the Prometheus service URL](/integrations_prometheus-agent_cluster-detail-view.png) - -
- - - -The Prometheus server URL must be in the format `http://HOST:PORT/api/v1/write`. -Example: `http://a2c938972938b4f0daee5f56edbd40af-1690032247.us-east-1.elb.amazonaws.com:9090/api/v1/write` - - - -If the Prometheus server is configured with authentication, add the authentication parameters. Replace `` and `` with the actual credential values. - -
- -```yaml -charts: - prometheus: - server: - remoteWrite: - - url: "" - remote_timeout: "5s" - basic_auth: - username: "" - password: -``` - -# Usage - -The Prometheus agent pack works out-of-the-box and only requires you to provide a Prometheus server URL. Add the Prometheus agent pack to a cluster profile to get started with Prometheus. You can create a new cluster profile that has the Prometheus agent as an add-on pack or you can [update an existing cluster profile](/cluster-profiles/task-update-profile) by adding the Prometheus agent pack. - - -Log in to the Grafana dashboard to view and create dashboards. You can find the Grafana dashboard URL by reviewing the details of the Kubernetes cluster hosting the Prometheus server. Use the URL exposed by the **prometheus-operator-kube-prometheus-stack-grafana** service. - -![The URL of the service prometheus-operator-kube-prometheus-stack-grafana](/integrations_prometheus-agent_cluster-detail-view-grafana.png) - - -Palette exposes a set of Grafana dashboards by default. You can find the Spectro Cloud dashboards by navigating to Grafana's left **Main Menu** > **Dashboards** and expanding the **Spectro Cloud** folder. - -The following dashboards are available by default: - -- Kubernetes/System/API Server: A view of the resources and status of the Kubernetes cluster hosting the Prometheus server. - - -- Kubernetes/Views/Global: An aggregate view of all the resources used by Kubernetes clusters. - - -- Kubernetes/Views/Namespaces: An aggregate view of all the resources used by a specific Kubernetes namespace. - - -- Kubernetes/Views/Nodes: A view of all nodes with the Prometheus agent installed. - - -- Kubernetes/Views/Pods: A view of all the pods in a node with the Prometheus agent installed. - -
- - - -Use the filters to narrow down the information displayed. All Palette dashboards include the **project** and **cluster** filter. - - - - -We encourage you to check out the [Grafana](https://grafana.com/tutorials/) tutorials and learning resources to learn more about Grafana. - -# Terraform - -You can retrieve details about the Prometheus agent pack by using the following Terraform code. - -```tf -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "pack-info" { - name = "prometheus-agent" - version = "19.0.2" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# References - -- [Prometheus Operator pack](/integrations/prometheus-operator) - - -- [Prometheus Helm Chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#configuration) - - -- [Grafana Tutorials](https://grafana.com/tutorials/) diff --git a/content/docs/06-integrations/00-prometheus-cluster-metrics.md b/content/docs/06-integrations/00-prometheus-cluster-metrics.md deleted file mode 100644 index 7f87cc739e..0000000000 --- a/content/docs/06-integrations/00-prometheus-cluster-metrics.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: 'Prometheus Cluster Metrics' -metaTitle: 'Prometheus Cluster Metrics' -metaDescription: "Use the Prometheus Cluster Metrics addon pack to expose Palette resource metrics" -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['monitoring', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/prometheus-operator/blobs/sha256:64589616d7f667e5f1d7e3c9a39e32c676e03518a318924e123738693e104ce0?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -The Prometheus Cluster Metrics pack exposes Palette-specific host cluster metrics to Prometheus. You can use this data to learn about the state of your clusters, resource utilization, and more. Use the [Spectro Cloud Grafana Dashboards](/integrations/grafana-spectrocloud-dashboards) pack to access the metric data through Grafana dashboards. - - -# Versions Supported - -**3.4.X** - - -# Prerequisites - -* A host cluster that has the [Prometheus Operator pack](/integrations/prometheus-operator) `v45.4.X` or greater installed. Check out the [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) for instructions on how to deploy a monitoring stack. - - -* A cluster profile with the [Prometheus Agent](/integrations/prometheus-agent) pack `v19.0.X` or greater installed. - -# Usage - -The Prometheus Cluster Metrics requires no additional configuration and is designed to work out-of-the-box. - -You can learn how to add the Prometheus Cluster Metrics to your cluster by following the steps outlined in the [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent/). - -Use the [Spectro Cloud Grafana Dashboards](/integrations/grafana-spectrocloud-dashboards) pack to access the metric data through Grafana dashboards. - -# Terraform - -```terraform -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "cluster-metrics" { - name = "spectro-cluster-metrics" - version = "3.3.0" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# References - -- [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent/). - - -- [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) - - -- [Prometheus Operator pack](/integrations/prometheus-operator) - - -- [Prometheus Agent](/integrations/prometheus-agent) - - -- [Spectro Cloud Grafana Dashboards](/integrations/grafana-spectrocloud-dashboards) \ No newline at end of file diff --git a/content/docs/06-integrations/00-prometheus-operator.md b/content/docs/06-integrations/00-prometheus-operator.md deleted file mode 100644 index 0ea90b9d21..0000000000 --- a/content/docs/06-integrations/00-prometheus-operator.md +++ /dev/null @@ -1,1010 +0,0 @@ ---- -title: 'Prometheus Operator' -metaTitle: 'Prometheus Operator' -metaDescription: 'Prometheus Operator Monitoring pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['monitoring', 'amd64', 'arm64'] -logoUrl: 'https://registry.spectrocloud.com/v1/prometheus-operator/blobs/sha256:64589616d7f667e5f1d7e3c9a39e32c676e03518a318924e123738693e104ce0?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Prometheus Operator - -Prometheus is an open-source monitoring system that is designed to collect and analyze metrics from various sources, such as applications, servers, and networks. It is widely used in the DevOps world to monitor the health and performance of applications and infrastructure. Prometheus stores metrics in a time-series database and provides a query language for analyzing the data. It also includes a powerful alerting system that can notify operators when thresholds are breached. - -The Prometheus Operator is a tool that simplifies the deployment and management of Prometheus in a Kubernetes cluster. It automates tasks such as configuring Prometheus, creating and managing Prometheus rules and alerts and scaling Prometheus instances based on demand. The Operator uses Kubernetes custom resources to define and manage Prometheus instances and related resources, such as ServiceMonitors, which enable Prometheus to discover and monitor services running in the cluster. - - -You can use the Prometheus Operator to create a monitoring stack that other host clusters point to and forward metrics to. Check out the guide [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) to learn how to create a monitoring stack with Prometheus for your Palette environment. - -
- - - -We recommend you use version v44.3.x or greater moving forward for a simplified and improved user experience when creating a monitoring stack for your architecture. Starting with version v44.3.x the remote monitoring feature is supported. Check out the [Prometheus Remote Write Tuning](https://prometheus.io/docs/practices/remote_write/) to learn more about the remote monitoring feature. - - - -## Versions Supported - - - - -## Prerequisites - -* Kubernetes v1.16 or greater. - - -* The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: - - Recommended size: - - 8 CPU - - 16 GB Memory - - 20 GB Storage - - - - As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: - - Each added agent: - - 0.1 CPU - - 250 MiB Memory - - 1 GB Storage - - - Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. - -## Parameters - -The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) documentation for details. - - -The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: - -
- -```yaml -charts: - kube-prometheus-stack: - grafana: - adminPassword: "" -``` - -Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. - -Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. - -![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) - -Review the usage section below to learn more about each preset option. - -
- -## Usage - -Check out the guide [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) to learn how to create a monitoring stack with Prometheus for your Palette environment. - -
- -#### Email Alerts - -You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. - -
- -```yaml -charts: - kube-prometheus-stack: - alertmanager: - config: - receivers: - - name: email-alert - email_configs: - - to: @.com - send_resolved: true - from: @.com - smarthost: smtp..com:587 - auth_username: @.com - auth_identity: @.com - auth_password: -``` - -Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. - -
- -#### Grafana Ingress - -You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. - -If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. - - -Toggle the **Enable** button to enable the use of Ingress. - -
- - -#### Thanos SideCar - -[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. - - -Toggle the **Enable** button to enable the use of Thanos. - -
- -#### Object Store - -Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. - -
- -```yaml -charts: - kube-prometheus-stack: - prometheus: - prometheusSpec: - thanos: - objstoreConfig: -``` - -#### Thanos Ruler Object Store - -By default, Thanos Ruler event data is saved in object storage specified for Thanos, but you can specify a different object storage for event data. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. - - -
- -#### Remote Monitoring - -You can configure the Prometheus server to accept metrics from Prometheus agents and become a centralized aggregation point for all Kubernetes metrics. Enabling this feature will expose port 9090 of the *prometheus-operator-prometheus* service. Use the generated service URL to provide other Kubernetes clusters with the installed [Prometheus Agent](/integrations/prometheus-agent) so that cluster metrics can be forwarded to the Prometheus server. - - - -The remote monitoring feature is configured with defaults to help you consume this feature out-of-the-box. You can change any configuration related to remote monitoring to fine-tune settings for your environment. - -Refer to the [Prometheus Remote Write Tuning](https://prometheus.io/docs/practices/remote_write/) resource to learn more about configuration options. - - -To get started with remote monitoring, check out the [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) guide. - -
- -#### Palette Resources Monitoring - -You can access internal Palette metrics in Grafana by adding the [Prometheus Cluster Metrics](/integrations/prometheus-cluster-metrics) pack to all your client clusters. Refer to the [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent) guide to learn more. - -
- - -#### Persistent Storage - -You can configure the Prometheus Operator to use persistent storage. To enable persistent storage add the following code snippet to the `kube-prometheus-stack.prometheus.prometheusSpec.storageSpec` configuration block in the pack's YAML configuration file. The code snippet below creates a Persistent Volume Claim (PVC) for the Prometheus Operator. - -
- - -```yaml -kube-prometheus-stack: - prometheus: - prometheusSpec: - storageSpec: - volumeClaimTemplate: - metadata: - name: prom-operator-pvc - spec: - storageClassName: spectro-storage-class - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 5Gi -``` - -### Dependencies - -The Prometheus Operator pack installs the following dependencies: - -* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) -* [Prometheus](https://prometheus.io/) -* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/) - -* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) -* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) -* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) -* Service monitors to scrape internal Kubernetes components - - -
- - - -## Prerequisites - -* Kubernetes v1.16 or greater. - - -* The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: - - Recommended size: - - 8 CPU - - 16 GB Memory - - 20 GB Storage - - - - As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: - - Each added agent: - - 0.1 CPU - - 250 MiB Memory - - 1 GB Storage - - - Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. - -## Parameters - -The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) documentation for details. - - -The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: - -
- -```yaml -charts: - kube-prometheus-stack: - grafana: - adminPassword: "" -``` - -Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. - -Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. - -![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) - -Review the usage section below to learn more about each preset option. - -
- -## Usage - -Check out the guide [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) to learn how to create a monitoring stack with Prometheus for your Palette environment. - -
- -#### Email Alerts - -You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. - -
- -```yaml -charts: - kube-prometheus-stack: - alertmanager: - config: - receivers: - - name: email-alert - email_configs: - - to: @.com - send_resolved: true - from: @.com - smarthost: smtp..com:587 - auth_username: @.com - auth_identity: @.com - auth_password: -``` - -Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. - -
- -#### Grafana Ingress - -You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. - -If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. - - -Toggle the **Enable** button to enable the use of Ingress. - -
- - -#### Thanos SideCar - -[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. - - -Toggle the **Enable** button to enable the use of Thanos. - -
- -#### Object Store - -Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. - -
- -```yaml -charts: - kube-prometheus-stack: - prometheus: - prometheusSpec: - thanos: - objstoreConfig: -``` - -#### Thanos Ruler Object Store - -By default, Thanos Ruler event data is saved in object storage specified for Thanos, but you can specify a different object storage for event data. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. - - -
- -#### Remote Monitoring - -You can configure the Prometheus server to accept metrics from Prometheus agents and become a centralized aggregation point for all Kubernetes metrics. Enabling this feature will expose port 9090 of the *prometheus-operator-prometheus* service. Use the generated service URL to provide other Kubernetes clusters with the installed [Prometheus Agent](/integrations/prometheus-agent) so that cluster metrics can be forwarded to the Prometheus server. - - - -The remote monitoring feature is configured with defaults to help you consume this feature out-of-the-box. You can change any configuration related to remote monitoring to fine-tune settings for your environment. - -Refer to the [Prometheus Remote Write Tuning](https://prometheus.io/docs/practices/remote_write/) resource to learn more about configuration options. - - -To get started with remote monitoring, check out the [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) guide. - -
- -#### Palette Resources Monitoring - -You can access internal Palette metrics in Grafana by adding the [Prometheus Cluster Metrics](/integrations/prometheus-cluster-metrics) pack to all your client clusters. Refer to the [Enable Monitoring on Host Cluster](/clusters/cluster-management/monitoring/deploy-agent) guide to learn more. - -
- -### Dependencies - -The Prometheus Operator pack installs the following dependencies: - -* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) -* [Prometheus](https://prometheus.io/) -* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/) - -* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) -* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) -* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) -* Service monitors to scrape internal Kubernetes components - - - -
- - - -## Prerequisites - -* Kubernetes v1.16 or greater. - - -* The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: - - Recommended size: - - 8 CPU - - 16 GB Memory - - 20 GB Storage. - - - As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: - - Each added agent: - - 0.1 CPU - - 250 MiB Memory - - 1 GB Storage. - - Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. - -## Parameters - -The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. - -The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: - -
- -```yaml -charts: - kube-prometheus-stack: - grafana: - adminPassword: "" -``` - -Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. - -Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. - -![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) - -Review the usage section below to learn more about each preset option. - -
- -## Usage - -Check out the guide [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) to learn how to create a monitoring stack with Prometheus for your Palette environment. - -
- -#### Email Alerts - -You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. - -
- -```yaml -charts: - kube-prometheus-stack: - alertmanager: - config: - receivers: - - name: email-alert - email_configs: - - to: @.com - send_resolved: true - from: @.com - smarthost: smtp..com:587 - auth_username: @.com - auth_identity: @.com - auth_password: -``` - -Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. - -
- -#### Grafana Ingress - -You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. - -If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. - - -Toggle the **Enable** button to enable the use of Ingress. - -
- - -#### Thanos SideCar - -[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. - - -Toggle the **Enable** button to enable the use of Thanos. - -
- -#### Object Store - -Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. - -
- -```yaml -charts: - kube-prometheus-stack: - prometheus: - prometheusSpec: - thanos: - objstoreConfig: -``` - -#### Thanos Ruler Object Store - -By default, Thanos Ruler event data is saved in object storage specified for Thanos, but you can specify a different object storage for event data. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. - - -
- -#### Remote Monitoring - -You can configure the Prometheus server to accept metrics from Prometheus agents and become a centralized aggregation point for all Kubernetes metrics. Enabling this feature will expose port 9090 of the *prometheus-operator-prometheus* service. Use the generated service URL to provide other Kubernetes clusters with the [Prometheus Agent](/integrations/prometheus-agent) installed so that cluster metrics can be forwarded to the Prometheus server. - - -The remote monitoring feature is configured with defaults to help you consume this feature out-of-the-box. You can change any configuration related to remote monitoring to fine-tune settings for your environment. - -Refer to the [Prometheus Remote Write](https://prometheus.io/docs/practices/remote_write/) resource to learn more about configuration options. - -To get started with remote monitoring, check out the [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) guide. - -### Dependencies - -The Prometheus Operator pack installs the following dependencies: - -* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) -* [Prometheus](https://prometheus.io/) -* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/). -* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) -* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) -* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) -* and the service monitors to scrape internal Kubernetes components. - - -
- - - -## Prerequisites - -* Kubernetes v1.16 or greater. - -## Parameters - -The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. - -The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: - -
- -```yaml -charts: - kube-prometheus-stack: - grafana: - adminPassword: "" -``` - -Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. - -Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. - -![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) - -Review the usage section below to learn more about each preset option. - -
- -#### Email Alerts - -You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. - -
- -```yaml -charts: - kube-prometheus-stack: - alertmanager: - config: - receivers: - - name: email-alert - email_configs: - - to: @.com - send_resolved: true - from: @.com - smarthost: smtp..com:587 - auth_username: @.com - auth_identity: @.com - auth_password: -``` - -Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. - -
- -#### Grafana Ingress - -You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. - -If you do not enable the ingress option, by default a service with a load balancer will be created that exposes port 80. - - -Toggle the **Enable** button to enable the use of Ingress. - -
- - -#### Thanos SideCar - -[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. - -Toggle the **Enable** button to enable the use of Thanos. - -
- -#### Object Store - -Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. - -
- -```yaml -charts: - kube-prometheus-stack: - prometheus: - prometheusSpec: - thanos: - objstoreConfig: -``` - -#### Thanos Ruler Object Store - -You can specify a different object storage to store the Thanos Ruler event data. Defaults to the object storage specified for Thanos. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. - -
- - -### Dependencies - -The Prometheus Operator pack installs the following dependencies: - -* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) -* [Prometheus](https://prometheus.io/) -* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/). -* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) -* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) -* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) -* and the service monitors to scrape internal Kubernetes components. - -
- - - -## Prerequisites - -* Kubernetes v1.16 or greater. - -## Parameters - -The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. - -The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: - -
- -```yaml -charts: - kube-prometheus-stack: - grafana: - adminPassword: "" -``` - -Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. - -Additional parameters you should be aware of can be found by expanding the **Presets** view of the pack. You can modify the preset settings during the profile creation process or the cluster deployment process when reviewing the cluster profile. -![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) - -Review the usage section below to learn more about each preset option. - -
- -#### Email Alerts - -You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. - -
- -```yaml -charts: - kube-prometheus-stack: - alertmanager: - config: - receivers: - - name: email-alert - email_configs: - - to: @.com - send_resolved: true - from: @.com - smarthost: smtp..com:587 - auth_username: @.com - auth_identity: @.com - auth_password: -``` - -Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. - -
- -#### Grafana Ingress - -You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. - -If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. - - -Toggle the **Enable** button to enable the use of Ingress. - -
- - -#### Thanos SideCar - -[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. - - -Toggle the **Enable** button to enable the use of Thanos. - -
- -#### Object Store - -Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. - -
- -```yaml -charts: - kube-prometheus-stack: - prometheus: - prometheusSpec: - thanos: - objstoreConfig: -``` - -#### Thanos Ruler Object Store - -You can specify a different object storage to store the Thanos Ruler event data. Defaults to the object storage specified for Thanos. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. - -
- - -### Dependencies - -The Prometheus Operator pack installs the following dependencies: - -* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) -* [Prometheus](https://prometheus.io/) -* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/). -* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) -* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) -* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) -* and the service monitors to scrape internal Kubernetes components. - - -
- - - -## Prerequisites - -* Kubernetes v1.16 or greater. - -## Parameters - -The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. - -The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: - -
- -```yaml -charts: - kube-prometheus-stack: - grafana: - adminPassword: "" -``` - -Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. - -Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. - -![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) - -Review the usage section below to learn more about each preset option. - -
- -#### Email Alerts - -You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. - -
- -```yaml -charts: - kube-prometheus-stack: - alertmanager: - config: - receivers: - - name: email-alert - email_configs: - - to: @.com - send_resolved: true - from: @.com - smarthost: smtp..com:587 - auth_username: @.com - auth_identity: @.com - auth_password: -``` - -Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. - -
- -#### Grafana Ingress - -You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. - -If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. - - -Toggle the **Enable** button to enable the use of Ingress. - -
- - -#### Thanos SideCar - -[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. - - -Toggle the **Enable** button to enable the use of Thanos. - -
- -#### Object Store - -Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. - -
- -```yaml -charts: - kube-prometheus-stack: - prometheus: - prometheusSpec: - thanos: - objstoreConfig: -``` -### Dependencies - -The Prometheus Operator pack installs the following dependencies: - -* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) -* [Prometheus](https://prometheus.io/) -* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/). -* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) -* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) -* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) -* and the service monitors to scrape internal Kubernetes components. - - -
- - - - -## Prerequisites - -* Kubernetes v1.16 or greater. - -## Parameters - -The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. - -The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: - -
- -```yaml -charts: - kube-prometheus-stack: - grafana: - adminPassword: "" -``` - -Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. - -Additional parameters you should be aware can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile creation or when you deploy the cluster and review the cluster profile. - -![A view of the pack's preset drawer expanded with radion buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) - -Review the usage section below to learn more about each preset option. - -
- -#### Email Alerts - -You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. - -
- -```yaml -charts: - kube-prometheus-stack: - alertmanager: - config: - receivers: - - name: email-alert - email_configs: - - to: @.com - send_resolved: true - from: @.com - smarthost: smtp..com:587 - auth_username: @.com - auth_identity: @.com - auth_password: -``` - -Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. - -
- -#### Grafana Ingress - -You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. - -If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. - - -Toggle the **Enable** button to enable the use of Ingress. - -
- -### Dependencies - -The Prometheus Operator pack installs the following dependencies: - -* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) -* [Prometheus](https://prometheus.io) -* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager). -* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) -* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) -* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) -* and the service monitors to scrape internal Kubernetes components. - -
- - - -All versions less than v30.2.X are considered deprecated. Upgrade to a newer version to take advantage of new features. - - - -
- -# Terraform - -You can retrieve details about the Prometheus operator pack by using the following Terraform code. - -
- -```terraform -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "pack-info" { - name = "prometheus-opeartor" - version = "45.4.0" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# References - -- [Deploy Monitoring Stack](/clusters/cluster-management/monitoring/deploy-monitor-stack) - - -- [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) - - -- [Prometheus Remote Write Tuning](https://prometheus.io/docs/practices/remote_write) - - -- [Thanos & Prometheus](https://prometheus-operator.dev/docs/operator/thanos) - - -- [Prometheus FAQ](https://prometheus.io/docs/introduction/faq) - - -- [Prometheus Cluster Metrics](/integrations/prometheus-cluster-metrics) diff --git a/content/docs/06-integrations/00-rke2.md b/content/docs/06-integrations/00-rke2.md deleted file mode 100644 index 67558c2156..0000000000 --- a/content/docs/06-integrations/00-rke2.md +++ /dev/null @@ -1,209 +0,0 @@ ---- -title: 'RKE2' -metaTitle: 'RKE2 Integration with Palette' -metaDescription: 'RKE2 pack in Palette' -hiddenFromNav: true -type: "integration" -category: ['kubernetes', 'amd64', 'fips'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/kubernetes-rke2/blobs/sha256:47cde61005d9996f1571c132ba9f753982134a7a0d8e445e27001ab8519e6051?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# RKE2 Overview - -[RKE2](https://docs.rke2.io/) is a fully conformant Kubernetes distribution focusing on security and compliance within the U.S. Federal Government sector. To meet the Kubernetes security and compliance goals required by the U.S. Federal Government, RKE2 establishes the following: - -
- -1. Provides defaults and configuration options that allow clusters to pass the CIS Kubernetes Benchmark v1.6 with minimal operator intervention. - - -2. Enables Federal Information Processing Standard 140-2 (FIPS 140-2) compliance. - - -3. Scans components regularly for Common Vulnerabilities and Exposures (CVEs) using Trivy in the build pipeline. - - -RKE2 launches control plane components as static pods, managed by the kubelet instead of relying on Docker. Additionally, the embedded container runtime is containerd. - -You can deploy RKE2 by adding this pack to a cluster profile. Once the cluster profile is created, you can deploy the RKE2-based Kubernetes clusters through Palette. - - -
- - - -RKE2 is only available for Edge host deployments. Refer to the [Edge](/clusters/edge) documentation to learn more about Edge. - - - -# Versions Supported - -The following RKE2 versions are supported to work with Palette. - -
- - - - - -## Prerequisites - -- A Linux operating system. Refer to the official [RKE2 requirements](https://docs.rke2.io/install/requirements) for more details on supported Linux distributions and versions. - -- 8 GB Memory - -- 4 CPU - -- An Edge host. Refer to the [Edge](/clusters/edge) documentation to learn more about Edge. - - -## Usage - -You can add RKE2 to an Edge cluster profile as the Kubernetes layer. Refer to the [Create Cluster Profiles](/cluster-profiles/task-define-profile) guide to learn more. - -RKE2 offers several customization options, ranging from networking to security. We recommend you review the following RKE2 documentation: - -
- - -- [Configuration Options](https://docs.rke2.io/install/configuration) - - -- [Inbound Network Rules](https://docs.rke2.io/install/requirements#inbound-network-rules) - - -- [Registries Configuration](https://docs.rke2.io/install/containerd_registry_configuration) - - -- [Advanced Options](https://docs.rke2.io/advanced) - - -Many of the Day-2 cluster management responsibilities are handled by Palette. Review the [Cluster Management](/clusters/cluster-management) reference resource to learn more about Palette and Day-2 operations. - -
- - - -## Prerequisites - -- A Linux operating system. Refer to the official [RKE2 requirements](https://docs.rke2.io/install/requirements) for more details on supported Linux distributions and versions. - -- 8 GB Memory - -- 4 CPU - -- An Edge host. Refer to the [Edge](/clusters/edge) documentation to learn more about Edge. - -## Usage - -You can add RKE2 to an Edge cluster profile as the Kubernetes layer. To learn more, refer to the [Create Cluster Profiles](/cluster-profiles/task-define-profile) guide. - -RKE2 offers several customization options, ranging from networking to security. We recommend you review the following RKE2 documentation: - -
- - -- [Configuration Options](https://docs.rke2.io/install/configuration) - - -- [Inbound Network Rules](https://docs.rke2.io/install/requirements#inbound-network-rules) - - -- [Registries Configuration](https://docs.rke2.io/install/containerd_registry_configuration) - - -- [Advanced Options](https://docs.rke2.io/advanced) - - -Many of the Day-2 cluster management responsibilities are handled by Palette. Review the [Cluster Management](/clusters/cluster-management) reference resource to learn more about Palette and Day-2 operations. - -
- - - -## Prerequisites - -- A Linux operating system. Refer to the official [RKE2 requirements](https://docs.rke2.io/install/requirements) for more details on supported Linux distributions and versions. - -- 8 GB Memory - -- 4 CPU - -- An Edge host. Refer to the [Edge](/clusters/edge) documentation to learn more about Edge. - - -## Usage - -You can add RKE2 to an Edge cluster profile as the Kubernetes layer. To learn more, refer to the [Create Cluster Profiles](/cluster-profiles/task-define-profile) guide. - -RKE2 offers several customization options, ranging from networking to security. We recommend you review the following RKE2 documentation: - -
- - -- [Configuration Options](https://docs.rke2.io/install/configuration) - - -- [Inbound Network Rules](https://docs.rke2.io/install/requirements#inbound-network-rules) - - -- [Registries Configuration](https://docs.rke2.io/install/containerd_registry_configuration) - - -- [Advanced Options](https://docs.rke2.io/advanced) - - -Many of the Day-2 cluster management responsibilities are handled by Palette. Review the [Cluster Management](/clusters/cluster-management) reference resource to learn more about Palette and Day-2 operations. - -
- - - - -The following major versions of RKE2 are deprecated. - - -
- - -- 1.23.x - - -- 1.22.x - - - - -
-
- - -
- -# Terraform - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "k8s" { - name = "edge-rke2" - version = "1.25.2" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# Resources - -- [RKE2 Documentation](https://docs.rke2.io) - - -- [RKE2 GitHub Repository](https://github.com/rancher/rke2) \ No newline at end of file diff --git a/content/docs/06-integrations/00-rook-ceph.md b/content/docs/06-integrations/00-rook-ceph.md deleted file mode 100644 index fd6f741750..0000000000 --- a/content/docs/06-integrations/00-rook-ceph.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: 'rook-ceph' -metaTitle: 'rook-ceph' -metaDescription: 'Rook Ceph storage pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64'] -logoUrl: ' https://registry.dev.spectrocloud.com/v1/csi-rook-ceph/blobs/sha256:2817270f4eecbc2eea0740c55c7611d1a538a3e17da610a3487bb11b067076d1?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Rook Ceph - -Rook turns storage software into self-managing, self-scaling, and self-healing storage services. It automates deployment, bootstrapping, configuration, provisioning, scaling, upgrading, migration, disaster recovery, monitoring, and resource management. Rook uses the facilities provided by the underlying cloud-native container management, scheduling, and orchestration platform to perform its duties. - -The pack provides the following two configurations: -* A three-node Ceph cluster (recommended). -* A single node Ceph cluster. - -Please make sure that your worker node pool size satisfies the minimum nodes requirement for your Ceph cluster. Additional disks should be attached to your worker pool nodes to deploy a Ceph cluster. For example, suppose you are using existing appliances for your Kubernetes cluster (typical for edge clusters); you will need to ensure that additional disks (1 or 3 - based on your Ceph cluster settings) are attached to the appliance. The device filter needs to be configured in the pack settings for such cases. As an example, if the additional disks were sdd, sde, sdf, the following configuration would be required: - -**Example:** -```json - storage: - useAllNodes: true - useAllDevices: false - deviceFilter: ^sd[d-f] - config: - osdsPerDevice: "1" # this value can be overridden at the node or device level - -``` -## Versions Supported - - - - - -**1.9.2** - - - - - -**1.8.3** - - - - - - -**1.5.9** - - - - - - -## References - -- [Rook Cepth Documentation](https://rook.io/docs/rook/v1.10/Getting-Started/intro/) diff --git a/content/docs/06-integrations/00-spectro-k8s-dashboard.md b/content/docs/06-integrations/00-spectro-k8s-dashboard.md deleted file mode 100644 index 6535ac7bfc..0000000000 --- a/content/docs/06-integrations/00-spectro-k8s-dashboard.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: "Spectro Kubernetes Dashboard" -metaTitle: "Spectro Cloud's Pre-configured Kubernetes Dashboard" -metaDescription: "Palette's pre-configured Kubernetes Dashboard Monitoring pack reduces the complexity of standing up the Kubernetes dashboard for a cluster." -hiddenFromNav: true -type: "integration" -category: ["monitoring", 'arm64', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png' ---- - - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Spectro Kubernetes Dashboard - -Spectro Kubernetes Dashboard is a web-based UI for Kubernetes clusters that auto-enables the Kubernetes Dashboard using secure ports and conveniently includes the [Spectro Proxy](/integrations/frp) pack. - - -# Versions Supported - -**2.7.x** - -
- -## Prerequisites - -- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](/integrations/frp) reverse proxy. - - -- Users or groups must be mapped to a Kubernetes RBAC role, either a *Role* or a *ClusterRole*. You can create a custom role through a manifest and use Palette's roleBinding feature to associate users or groups with the role. Refer to the [Create a Role Binding](/clusters/cluster-management/cluster-rbac#createrolebindings) guide to learn more. - - -## Parameters - - - -| Parameter | Description | Default | -|-----------|-------------|---------| -| `namespace` | The Kubernetes namespace to install the dashboard. | `kubernetes-dashboard` | -| `ClusterRole` | The ClusterRole to assign to the Spectro Kubernetes Dashboard. | `read-only` | -| `certDuration` | Self-signed certificate duration in hours. | 8760h (365d) | -| `certRenewal` | Self-signed certificate renewal in hours | 720h (30d) | -| `enableInsecureLogin` | RBAC ensures secure login. | `false` | -| `serviceType` | The service type for the dashboard. Supported values are `ClusterIP`, `LoadBalancer`, and `NodePort`. | `ClusterIP` | -| `skipLogin` | Enables or disables the skip login option on the Spectro Kubernetes Dashboard. | `false` | -| `enableInsecureLogin` | Enables non-Secure Sockets Layer (SSL) login. Dashboard login is always restricted to HTTP(S) + localhost or HTTPS and external domain. | `false` | -| `ingress.enabled` | Ingress configuration to access the `ClusterIP`, `LoadBalancer`, or `NodePort`. | `false` | - -## Usage - -To use the Spectro Kubernetes Dashboard pack, you have to add it to your cluster profile. Spectro Kubernetes Dashboard supports several public cloud and data center cloud environments. To learn more, refer to [Clusters](/clusters). - -Use the following information to find the Kubernetes Dashboard pack. -- **Pack Type**: Monitoring -- **Registry**: Public Repo -- **Pack Name**: Spectro Kubernetes Dashboard -- **Pack Version**: 2.7.0 or higher - -Spectro Kubernetes Dashboard has the following Access options. - -
- -- **Proxied**: This option is useful for access to the Kubernetes Dashboard from anywhere and leverages the Spectro Proxy pack, which gets installed automatically. The Service resource for the Kubernetes Dashboard will be configured as ClusterIP and is only accessible through the proxy. To learn more, check out the [Spectro Proxy](/integrations/frp) guide. - - -- **Direct**: This option is meant for a private configuration where a proxy is not implemented or not desired. The Service resource for the Kubernetes Dashboard will be configured as LoadBalancer, which you can access directly. This option requires you to be on a network that can reach the IP address given to the LoadBalancer service. - - -# Terraform - -You can reference the Spectro Proxy pack in Terraform with a data resource. - -
- -```tf -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "k8s-dashboard" { - name = "spectro-k8s-dashboard" - version = "2.7.0" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -# References - -- [Microsoft Access Control Using Kubernetes RBAC](https://learn.microsoft.com/en-us/azure/aks/azure-ad-rbac?toc=https%3A%2F%2Fdocs.micro[…]icrosoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json&tabs=portal) - - -- [Terraform Data Resource](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) - - diff --git a/content/docs/06-integrations/00-splunk.md b/content/docs/06-integrations/00-splunk.md deleted file mode 100644 index 79b5d543b5..0000000000 --- a/content/docs/06-integrations/00-splunk.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'Splunk' -metaTitle: 'Splunk' -metaDescription: 'Splunk Monitoring pack in Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['logging', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/splunk/blobs/sha256:1729cfced51a1ef8693997aee535f098a782f15fba9ca5232a8dfba68a8d4857?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Splunk - -Splunk Connect for Kubernetes imports and searches the Kubernetes loggings, objects, and metrics data in Splunk platform deployment. It supports the importing and searching of container logs. - -## Versions Supported - - - - - -**1.4.3** - - - - -## References - -https://github.com/splunk/splunk-connect-for-kubernetes diff --git a/content/docs/06-integrations/00-ubuntu.md b/content/docs/06-integrations/00-ubuntu.md deleted file mode 100644 index 6fa849ae0f..0000000000 --- a/content/docs/06-integrations/00-ubuntu.md +++ /dev/null @@ -1,883 +0,0 @@ ---- -title: 'Ubuntu' -metaTitle: 'Ubuntu' -metaDescription: 'Choose Ubuntu Operating System pack in Palette.' - -hiddenFromNav: true -type: "integration" -category: ['operating system', 'amd64'] -logoUrl: 'https://registry.spectrocloud.com/v1/ubuntu-vsphere/blobs/sha256:09a727f9005b79c69d8e60e12ce130880c63131315b49e7fb4cc44e53d34dc7a?type=image/png' ---- - -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import Tabs from 'shared/components/ui/Tabs'; - - -# Ubuntu - -[Ubuntu](https://ubuntu.com) is an open-source operating system based on the Linux kernel. Developed by Canonical Ltd., Ubuntu is a popular choice for desktops, servers, and cloud environments due to its ease of use, robustness, and versatility. - -Boasting a comprehensive package system, Ubuntu provides a wealth of pre-compiled software directly accessible for installation. With its regular release cycle, Ubuntu ensures updated software and security patches, making it a secure and reliable choice for various use cases. - -In addition to its stability, Ubuntu's community support, extensive documentation, and commitment to free software principles make it a widely favored choice among Linux distributions. - -You can use Ubuntu as the base Operating System (OS) when deploying a host cluster by using the Ubuntu pack when you create a [cluster profile](/cluster-profiles). - - -
- - - -Review [Maintenance Policy](/integrations/maintenance-policy#ospacks) to learn about pack update and deprecation schedules. - - - - -# Version Supported - -
- - - - - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory - - - -- You can use Ubuntu with a supported Kubernetes version. The table lists Kubernetes dependencies. - - - |Kubernetes Version | Supports Kubernetes | -|------------|----------------------------| -1.26 | ✅ | -1.25 | ✅ | -1.24 | ❌ | - - -## Parameters - -The Ubuntu OS pack has no required parameters. - -You can customize the Ubuntu OS pack. The following configuration blocks are available for use within the `kubeadmconfig` configuration block in the YAML file. - -
- - - -Palette applies a default set of configuration options when deploying Ubuntu. You can override the defaults configurations by using the following parameters but it's not required. - - - - -| Field | Description | YAML Type | Required | -| --- | --- | --- | --- | -| `apiServer` | Extra settings for the Kube API server control plane component. Refer to [Kube API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) documentation for available options. | object | No | -| `controllerManager` | Extra settings for the Kubernetes controller manager control plane component. Review the [Kubernetes controller manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) documentation for more information. | object | No | -| `scheduler` | Extra settings for the Kubernetes scheduler control plane component. Refer to the [Kube scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler) documenation for more details. | object | No | -| `kubeletExtraArgs` | Extra arguments for kubelet. Refer to the [Kubeadm init](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init) documentation for more details. | map | No | -| `files` | Create or specify additional files for the `kubeadmconfig`. Refer to the [Customize Pack](/integrations/ubuntu?22.04.x#customizepack) section to learn more. | list | No | -| `preKubeadmCommands` | Extra commands to issue before kubeadm starts. | list | No | -| `postKubeadmCommands` | Extra commands to issue after kubeadm starts. | list | No | -| `imageRepository` | The container registry to pull images from. If empty, `k8s.gcr.io` will be used by default. | string | No | -| `etcd` | Configuration for etcd. This value defaults to a Local (stacked) etcd. You can specify configurations using [local etcd configuration files](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/), or you can reference [external etcd configurations](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability) or Certificate Authorities (CA). | object | No | -| `dns` | Options for the DNS add-on installed in the cluster. Refer to the [Customizing DNS Service](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/) to learn more. | object | No | - -The following code snippet is an example YAML using all the `kubeadmconfig` parameters listed in the table. The example YAML is only for learning purposes. - - -
- -```yaml -kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - insecure-port: "0" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - authorization-mode: RBAC,Node - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extraVolumes: - - name: audit-log - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - pathType: DirectoryOrCreate - - name: audit-policy - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - readOnly: true - pathType: File - controllerManager: - extraArgs: - profiling: "false" - terminated-pod-gc-threshold: "25" - use-service-account-credentials: "true" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extraArgs: - profiling: "false" - dns: - type: CoreDNS - imageRepository: public.ecr.aws/eks-distro/coredns - imageTag: v1.7.0-eks-1-18-1 - etcd: - local: - dataDir: /var/lib/etcd - imageRepository: public.ecr.aws/eks-distro/etcd-io - imageTag: v3.4.14-eks-1-18-1 - external: - endpoints: - - example.org - caFile: myCa.file - certFile: myCert.file - keyFile: myKey.file - imageRepository: public.ecr.aws/eks-distro/kubernetes - kubeletExtraArgs: - read-only-port : "0" - event-qps: "0" - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - files: - - path: hardening/audit-policy.yaml - targetPath: /etc/kubernetes/audit-policy.yaml - targetOwner: "root:root" - targetPermissions: "0600" - - path: hardening/privileged-psp.yaml - targetPath: /etc/kubernetes/hardening/privileged-psp.yaml - targetOwner: "root:root" - targetPermissions: "0600" - - path: hardening/90-kubelet.conf - targetPath: /etc/sysctl.d/90-kubelet.conf - targetOwner: "root:root" - targetPermissions: "0600" - - preKubeadmCommands: - # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required - - 'echo "====> Applying kernel parameters for Kubelet"' - - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' - postKubeadmCommands: - # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up - - 'export KUBECONFIG=/etc/kubernetes/admin.conf' - # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails - - '[ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"' -``` - -
- - - -Review the parameter documentation before you make changes to the kubeadm configuration. Improper configurations can cause deployment failures. - - - - -Palette also supports Ubuntu Pro. Refer to the [Ubuntu Pro](/integrations/ubuntu?22.04.x#ubuntupro) section below for more details. - -
- -## Usage - - -To use the Ubuntu OS pack, add the pack to your cluster profile when you select the OS layer. Refer to the [Create Cluster Profile](/cluster-profiles/task-define-profile) guide to learn more about creating cluster profiles. - - - -
- - -### Customize Pack - - -You can customize the Ubuntu OS pack using the available configuration parameters in the YAML file. Use the parameters to customize the Kubernetes install process. - - -
- - -#### Add Custom Files - - -You can create custom files that you define in the `files` section that precedes the `preKubeadmCommands` and `postKubeadmCommands` sections. The files are invoked during runtime. - - -
- -```yaml -kubeadmconfig: - files: - - targetPath: /usr/local/share/ca-certificates/mycom.crt - targetOwner: "root:root" - targetPermissions: "0644" - content: | - -----BEGIN CERTIFICATE----- - MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl - cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE - AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA - nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz - qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN - fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2 - 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL - 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK - jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB - /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki - HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y - g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ - ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6 - b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56 - IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc= - -----END CERTIFICATE----- - preKubeadmCommands: - - echo "Executing pre kube admin config commands" - - update-ca-certificates - - 'systemctl restart containerd; sleep 3' - - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done' - postKubeadmCommands: - - echo "Executing post kube admin config commands" -``` - -In the next example, a configuration file is added to a folder. - -
- -```yaml -kubeadmconfig: - files: - - targetPath: /etc/containerd/config.toml - targetOwner: "root:root" - targetPermissions: "0644" - content: | - version = 2 - imports = ["/etc/containerd/conf.d/*.toml"] - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - sandbox_image = "registry.k8s.io/pause:3.9" - device_ownership_from_security_context = true - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - preKubeadmCommands: - - 'echo "====> Applying pre Kubeadm commands"' - postKubeadmCommands: - - 'echo "====> Applying post Kubeadm commands"' -``` - - -### Ubuntu Pro - -Ubuntu Pro is a security and maintenance subscription offering from Canonical that offers long-term security support and many other security hardening features. Ubuntu Pro offers several more benefits than the free Ubuntu offering: - - -
- - -- Extended Security Maintenance - - -- Kernel Livepatch service to avoid reboots - - -- FIPS 140-2 Level 1 certified crypto modules - - -- Common Criteria EAL2 - -For more information, refer to the [Ubuntu Pro](https://ubuntu.com/pro) documentation from Canonical. - - - -You can enable Ubuntu Pro when deploying clusters with Palette. To enable Ubuntu Pro, select Ubuntu as the OS layer for a cluster profile and expand the **Preset Menu** to reveal the Ubuntu Pro parameters. - - -| Parameter| Description | Default Value | -|---|---|----| -|**token**| The Canonical subscription token for Ubuntu Pro. Refer to the Ubuntu Pro [subscribe page](https://ubuntu.com/pro/subscribe) to aquire a subscription token. | `""` | -|**esm-apps**| Expanded Security Maintenance (ESM) for Applications. Refer to the Ubuntu [ESM documentation](https://ubuntu.com/security/esm) to learn more. | Disabled | -| **livepatch** | Canonical Livepatch service. Refer to the Ubuntu [Livepatch](https://ubuntu.com/security/livepatch) documenation for more details. | Disabled | -| **fips** | Federal Information Processing Standards (FIPS) 140 validated cryptography for Linux workloads on Ubuntu. This installs NIST-certified core packages. Refer to the Ubuntu [FIPS](https://ubuntu.com/security/certifications/docs/2204) documentation to learn more. | Disabled | -| **fips-updates** | Install NIST-certified core packages with priority security updates. Refer to the Ubuntu [FIPS Updates](https://ubuntu.com/security/certifications/docs/fips-updates) documentation to learn more. | Disabled | -| **cis** | Gain access to OpenSCAP-based tooling that automates both hardening and auditing with certified content based on published CIS benchmarks. Refer to the Ubuntu [CIS](https://ubuntu.com/security/certifications/docs/2204/usg/cis) documentation to learn more. | Disabled | - - - -Use the following steps to enable Ubuntu Pro. - -
- - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. Navigate to the left **Main Menu** and select **Profiles**. - - -3. Click on **Add Cluster Profile**. - - -4. Fill out the input fields for **Name**, **Version**, **Description**, **Type** and **Tags**. Click on **Next** to continue. - - - -5. Select the infrastructure provider and click on **Next**. - - -6. Select the OS layer and use the following information to find the Ubuntu pack: - - - - **Pack Type** - OS - - - **Registry** - Public Repo - - - **Pack Name** -Ubuntu - - - **Pack Version** - 20.04 or 22.04 - - -7. Modify the Ubuntu **Pack values** to activate the **Presets** options for the Ubuntu YAML file. Click on the **** button to reveal the YAML editor and expand the **Preset Drawer**. - - -
- - ![A view of the cluster profile creation wizard for Ubuntu Pro](/integrations_ubuntu_ubuntu-pro-preset-drawer.png) - - -8. Click the **Ubuntu Advantage/Pro** checkbox to include the Ubuntu Pro parameters in the pack configuration file. - - -9. Toggle options on or off to enable or disable the various Ubuntu Pro services. - - - -10. Click the **Next layer** button to continue to the next layer. - - -11. Complete the remainder of the cluster profile creation wizard by selecting the next cluster profile layers. - - -
- - - - -
- - - -## Prerequisites - -- A minimum of 4 CPU and 4GB Memory - - - -- You can use Ubuntu with a supported Kubernetes version. The table lists Kubernetes dependencies. - - - |Kubernetes Version | Supports Kubernetes | -|------------|----------------------------| -1.26 | ❌ | -1.25 | ❌ | -1.24 | ✅ | -1.23 | ✅ | - -
- - -## Parameters - -The Ubuntu OS pack has no required parameters. - - - -You can customize the Ubuntu OS pack. The following configuration blocks are available for use within the `kubeadmconfig` configuration block in the YAML file. - - -
- - - - -Palette applies a default set of configuration options when deploying Ubuntu. You can override the defaults configurations by using the following parameters but it's not required. - - - - -| Field | Description | YAML Type | Required | -| --- | --- | --- | --- | -| `apiServer` | Extra settings for the Kube API server control plane component. Refer to [Kube API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) documentation for available options. | object | No | -| `controllerManager` | Extra settings for the Kubernetes controller manager control plane component. Review the [Kubernetes controller manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) documentation for more information. | object | No | -| `scheduler` | Extra settings for the Kubernetes scheduler control plane component. Refer to the [Kube scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler) documenation for more details. | object | No | -| `kubeletExtraArgs` | Extra arguments for kubelet. Refer to the [Kubeadm init](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init) documentation for more details. | map | No | -| `files` | Additional files to pass to kubeadmconfig. Refer to the [Customize Pack](/integrations/ubuntu?22.04.x#customizepack) section to learn more. | list | No | -| `preKubeadmCommands` | Extra commands to issue before kubeadm starts. | list | Yes - Auto generated | -| `postKubeadmCommands` | Extra commands to issue after kubeadm starts. | list | Yes - Auto generated | -| `imageRepository` | The container registry to pull images from. If empty, `k8s.gcr.io` will be used by default. | string | No | -| `etcd` | Configuration for etcd. This value defaults to a Local (stacked) etcd. You can specify configurations using [local etcd configuration files](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/), or you can reference [external etcd configurations](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability) or Certificate Authorities (CA). | object | No | -| `dns` | Options for the DNS add-on installed in the cluster. Refer to the [Customizing DNS Service](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/) to learn more. | object | No | - -The following code snippet is an example YAML using all the `kubeadmconfig` parameters listed in the table. The example YAML is only for learning purposes. - - -
- -```yaml -kubeadmconfig: - apiServer: - extraArgs: - secure-port: "6443" - anonymous-auth: "true" - insecure-port: "0" - profiling: "false" - disable-admission-plugins: "AlwaysAdmit" - enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy" - audit-log-path: /var/log/apiserver/audit.log - audit-policy-file: /etc/kubernetes/audit-policy.yaml - audit-log-maxage: "30" - audit-log-maxbackup: "10" - audit-log-maxsize: "100" - authorization-mode: RBAC,Node - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extraVolumes: - - name: audit-log - hostPath: /var/log/apiserver - mountPath: /var/log/apiserver - pathType: DirectoryOrCreate - - name: audit-policy - hostPath: /etc/kubernetes/audit-policy.yaml - mountPath: /etc/kubernetes/audit-policy.yaml - readOnly: true - pathType: File - controllerManager: - extraArgs: - profiling: "false" - terminated-pod-gc-threshold: "25" - use-service-account-credentials: "true" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extraArgs: - profiling: "false" - dns: - type: CoreDNS - imageRepository: public.ecr.aws/eks-distro/coredns - imageTag: v1.7.0-eks-1-18-1 - etcd: - local: - dataDir: /var/lib/etcd - imageRepository: public.ecr.aws/eks-distro/etcd-io - imageTag: v3.4.14-eks-1-18-1 - external: - endpoints: - - example.org - caFile: myCa.file - certFile: myCert.file - keyFile: myKey.file - imageRepository: public.ecr.aws/eks-distro/kubernetes - kubeletExtraArgs: - read-only-port : "0" - event-qps: "0" - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - files: - - path: hardening/audit-policy.yaml - targetPath: /etc/kubernetes/audit-policy.yaml - targetOwner: "root:root" - targetPermissions: "0600" - - path: hardening/privileged-psp.yaml - targetPath: /etc/kubernetes/hardening/privileged-psp.yaml - targetOwner: "root:root" - targetPermissions: "0600" - - path: hardening/90-kubelet.conf - targetPath: /etc/sysctl.d/90-kubelet.conf - targetOwner: "root:root" - targetPermissions: "0600" - - preKubeadmCommands: - # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required - - 'echo "====> Applying kernel parameters for Kubelet"' - - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' - postKubeadmCommands: - # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up - - 'export KUBECONFIG=/etc/kubernetes/admin.conf' - # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails - - '[ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"' -``` - -
- - - -Review the parameter documentation before you make changes to the kubeadm configuration. Improper configurations can cause deployment failures. - - - - -Palette also supports Ubuntu Pro. Refer to the [Ubuntu Pro](/integrations/ubuntu?22.04.x#ubuntupro) section below for more details. - -
- -## Usage - - -To use the Ubuntu OS pack, add the pack to your cluster profile when you select the OS layer. Refer to the [Create Cluster Profile](/cluster-profiles/task-define-profile) guide to learn more about creating cluster profiles. - - - -
- - -### Customize Pack - - -You can customize the Ubuntu OS pack using the available configuration parameters in the YAML file. Use the parameters to customize the Kubernetes install process. - - -
- - -#### Add Custom Files - - -You can create custom files that you define in the `files` section that precedes the `preKubeadmCommands` and `postKubeadmCommands` sections. The files are invoked during runtime. - - -
- -```yaml -kubeadmconfig: - files: - - targetPath: /usr/local/share/ca-certificates/mycom.crt - targetOwner: "root:root" - targetPermissions: "0644" - content: | - -----BEGIN CERTIFICATE----- - MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl - cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE - AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA - nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz - qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN - fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2 - 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL - 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK - jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB - /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki - HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y - g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ - ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6 - b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56 - IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc= - -----END CERTIFICATE----- - preKubeadmCommands: - - echo "Executing pre kube admin config commands" - - update-ca-certificates - - 'systemctl restart containerd; sleep 3' - - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done' - postKubeadmCommands: - - echo "Executing post kube admin config commands" -``` - -In the next example, a configuration file is added to a folder. - -
- -```yaml -kubeadmconfig: - files: - - targetPath: /etc/containerd/config.toml - targetOwner: "root:root" - targetPermissions: "0644" - content: | - ## template: jinja - - # Use config version 2 to enable new configuration fields. - # Config file is parsed as version 1 by default. - version = 2 - - imports = ["/etc/containerd/conf.d/*.toml"] - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - sandbox_image = "registry.k8s.io/pause:3.9" - device_ownership_from_security_context = true - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - preKubeadmCommands: - - 'echo "====> Applying pre Kubeadm commands"' - postKubeadmCommands: - - 'echo "====> Applying post Kubeadm commands"' -``` - - -### Ubuntu Pro - -Ubuntu Pro is a security and maintenance subscription offering from Canonical that offers long-term security support and many other security hardening features. Ubuntu Pro offers several more benefits than the free Ubuntu offering: - - -
- - -- Extended Security Maintenance - - -- Kernel Livepatch service to avoid reboots - - -- FIPS 140-2 Level 1 certified crypto modules - - -- Common Criteria EAL2 - -For more information, refer to the [Ubuntu Pro](https://ubuntu.com/pro) documentation from Canonical. - - - -You can enable Ubuntu Pro when deploying clusters with Palette. To enable Ubuntu Pro, select Ubuntu as the OS for a cluster profile and expand the **Preset Menu** to reveal the Ubuntu Pro parameters. - - -| Parameter| Description | Default Value | -|---|---|----| -|**token**| The Canonical subscription token for Ubuntu Pro. Refer to the Ubuntu Pro [subscribe page](https://ubuntu.com/pro/subscribe) to aquire a subscription token. | `""` | -|**esm-apps**| Expanded Security Maintenance (ESM) for Applications. Refer to the Ubuntu [ESM documentation](https://ubuntu.com/security/esm) to learn more. | Disabled | -| **livepatch** | Canonical Livepatch service. Refer to the Ubuntu [Livepatch](https://ubuntu.com/security/livepatch) documenation for more details. | Disabled | -| **fips** | Federal Information Processing Standards (FIPS) 140 validated cryptography for Linux workloads on Ubuntu. This installs NIST-certified core packages. Refer to the Ubuntu [FIPS](https://ubuntu.com/security/certifications/docs/2204) documentation to learn more. | Disabled | -| **fips-updates** | Install NIST-certified core packages with priority security updates. Refer to the Ubuntu [FIPS Updates](https://ubuntu.com/security/certifications/docs/fips-updates) documentation to learn more. | Disabled | -| **cis** | Gain access to OpenSCAP-based tooling that automates both hardening and auditing with certified content based on published CIS benchmarks. Refer to the Ubuntu [CIS](https://ubuntu.com/security/certifications/docs/2204/usg/cis) documentation to learn more. | Disabled | - - - -Use the following steps to enable Ubuntu Pro. - -
- - -1. Log in to [Palette](https://console.spectrocloud.com). - - - -2. Navigate to the left **Main Menu** and select **Profiles**. - - -3. Click on **Add Cluster Profile**. - - -4. Fill out the input fields for **Name**, **Version**, **Description**, **Type** and **Tags**. Click on **Next** to continue. - - - -5. Select the infrastructure provider and click on **Next**. - - -6. Select the OS layer and use the following information to find the Ubuntu pack: - - - **Pack Type** - OS - - - **Registry** - Public Repo - - - **Pack Name** -Ubuntu - - - **Pack Version** - 20.04 or 22.04 - - -7. Modify the Ubuntu **Pack values** to activate the **Presets** options for the Ubuntu YAML file. Click on the **** button to reveal the YAML editor and expand the **Preset Drawer**. - - -
- - ![A view of the cluster profile creation wizard for Ubuntu Pro](/integrations_ubuntu_ubuntu-pro-preset-drawer.png) - - -8. Click the **Ubuntu Advantage/Pro** checkbox to include the Ubuntu Pro parameters in the pack configuration file. - - -9. Toggle options on or off to enable or disable the various Ubuntu Pro services. - - - -10. Click the **Next layer** button to continue to the next layer. - - -11. Complete the remainder of the cluster profile creation wizard by selecting the next cluster profile layers. - - -
- - -
- - - -All Ubuntu versions less than v20.04.x are considered deprecated. Upgrade to a newer version to take advantage of new features. - - -
- - -# Terraform - -You can reference Ubuntu in Terraform with the following code snippet. - -
- - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "ubuntu" { - name = "edge-native-ubuntu" - version = "22.04" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "ubuntu" { - name = "ubuntu-maas" - version = "22.04" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "ubuntu" { - name = "ubuntu-vsphere" - version = "22.04" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "ubuntu" { - name = "ubuntu-openstack" - version = "22.04" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "ubuntu" { - name = "ubuntu-coxedge" - version = "22.04" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "ubuntu" { - name = "ubuntu-aws" - version = "22.04" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "ubuntu" { - name = "ubuntu-azure" - version = "22.04" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - -```hcl -data "spectrocloud_registry" "public_registry" { - name = "Public Repo" -} - -data "spectrocloud_pack_simple" "ubuntu" { - name = "ubuntu-gcp" - version = "22.04" - type = "helm" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - - - - - - - - -# References - - -- [Create a Cluster Profile](/cluster-profiles/task-define-profile) - - -- [Ubuntu Documentation](https://docs.ubuntu.com) - - -- [Ubuntu Pro Documentation](https://ubuntu.com/server/docs) - - -- [Kubernetes API Server Configuration](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver) diff --git a/content/docs/06-integrations/00-vault.md b/content/docs/06-integrations/00-vault.md deleted file mode 100644 index 8b80c6efbf..0000000000 --- a/content/docs/06-integrations/00-vault.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: 'Vault' -metaTitle: 'Vault Integration with Spectro Cloud' -metaDescription: 'Integration of the Vault add on into Spectro Cloud' -hiddenFromNav: true -type: "integration" -hideToC: false -category: ['security', 'amd64', 'arm64'] -logoUrl: 'https://registry.spectrocloud.com/v1/vault/blobs/sha256:1abda0173be1fd4ddfeccd2ff15089edd38a25e433ad7bb562a770d92992c7af?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Vault - -[Vault](https://www.vaultproject.io/) helps secure, store, and tightly control access to tokens, passwords, certificates, encryption keys for protecting secrets, and other sensitive data using a UI, CLI, or HTTP API. - -## Versions Supported - - - - - -* **0.22.0** - - - - - -* **0.20.1** - - - - -* **0.11.0** - - - - - - * **0.9.0** - - - - - - -* **0.6.0** - - - - - -* **0.3.1** - - - - - -## Components - -Vault integration has the following components: - -* Vault server. -* UI (Optional). -* [Agent injector](https://www.vaultproject.io/docs/platform/k8s/injector/) (Optional). - - - -## Supported Use cases - -1. Running a Vault Service: - * Vault is set up to run in **Dev mode** by default and so, Vault will be unsealed and initialized. - * For production use cases, we recommend disabling Dev mode and enable HA. - * Also, see [Production Checklist](https://www.vaultproject.io/docs/platform/k8s/helm/run#architecture) recommendations. -1. Injecting application secrets from an external Vault into pods (**Agent Injector**). - * For running agent injector alone in the cluster, use v0.6.0 of Vault pack. - * Make sure to set `injector.externalVaultAddr` to point to the external Vault server. - -## How secrets are injected in deployments? - -In Kubernetes clusters with Vault integrated, secrets can be injected into the application pods by adding the following annotations: - -```yaml -vault.hashicorp.com/agent-inject: "true" -vault.hashicorp.com/agent-inject-secret-: /path/to/secret -vault.hashicorp.com/role: "" -``` - -More information on consuming Vault secrets can be found in [Vault docs](https://www.vaultproject.io/docs/platform/k8s/injector) - -# Ingress - -Follow below steps to configure Ingress on Vault Server - -1. Make sure serviceType is not set for Vault Server. That way, serviceType will default to ClusterIP - * Version 0.6.0 - line #289 - * Version 0.3.1 - line #96 -2. Ingress - * Enable Ingress ; Change enabled from false to "true" - * Set Ingress rules like annotations, path, hosts etc. - * Version 0.6.0 - line #146 - * Version 0.3.1 - line #96 - -With these config changes, you can access Vault service on the Ingress Controller LoadBalancer hostname / IP - -## References - -* [Vault Agent injector](https://www.vaultproject.io/docs/platform/k8s/injector/). -* Injecting Vault Secrets Into Kubernetes Pods via a Sidecar - [Blog](https://www.hashicorp.com/blog/injecting-vault-secrets-into-kubernetes-pods-via-a-sidecar/). -* Vault Agent injector - [Examples](https://www.vaultproject.io/docs/platform/k8s/injector/examples/). -* https://www.vaultproject.io/docs/platform/k8s/helm/run diff --git a/content/docs/06-integrations/00-vsphere-csi.md b/content/docs/06-integrations/00-vsphere-csi.md deleted file mode 100644 index a422b7f9b6..0000000000 --- a/content/docs/06-integrations/00-vsphere-csi.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: 'vSphere-csi' -metaTitle: 'vSphere CSI Integration with Spectro Cloud' -metaDescription: 'vSphere CSI storage add on into Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64','fips'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-vsphere-volume/blobs/sha256:2cd106b353cb492d4647a1562fe59db6a1aeb792333900fe4e15237f899298b5?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# vSphere Storage Interface (CSI) - -The main goal of vSphere Container Storage Interface (CSI) is to expose vSphere storage and features to Kubernetes users. It offers an in-tree volume driver that has been actively used on various Kubernetes platforms by service providers, including on-prem. Cloud Native Storage (CNS) is a result of evolution and productization of vSphere Storage for Kubernetes and is also enterprise ready. - - -## vSphere CSI Driver Components - -The vSphere CSI driver includes the following components: - -* vSphere CSI Controller -* vSphere CSI Node -* Syncer - - * Metadata Syncer - * Full Sync - -## vSphere CSI Compatibility Matrix - -|Palette Release| CSI-Driver Version| Kubernetes Versions | -|---------------|-------------------|---------------------| -| 3.0 | 2.6.0 | 1.20.x to 1.24.x | -| 2.2 | 2.3.0 | 1.20.x to 1.23.x | -| 2.8 | 2.5.2 | 1.20.x to 1.23.x | - - -## Versions Supported - - - - -* **2.6.0** - - - - - -* **2.5.2** - - - - -* **2.3.0** - - - - -## Troubleshooting - -Storage classes created by Spectro Cloud are named "spectro-storage-class" and can be fetched from kubectl using the following CLI command: - -```bash -kubectl get storageclass -``` - - -# References - -More info about Storage classes can be found in the following links: - -[Storage Classes][https://kubernetes.io/docs/concepts/storage/storage-classes/] -[CSI Driver Documentation](https://vsphere-csi-driver.sigs.k8s.io/) -[CSI Driver GitHub](https://github.com/kubernetes-sigs/vsphere-csi-driver) diff --git a/content/docs/06-integrations/00-vsphere-volume.md b/content/docs/06-integrations/00-vsphere-volume.md deleted file mode 100644 index 0e1cb17694..0000000000 --- a/content/docs/06-integrations/00-vsphere-volume.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'vSphere-Volume' -metaTitle: 'vSphere volume Integration with Spectro Cloud' -metaDescription: 'vSphere volume storage add on into Spectro Cloud' -hiddenFromNav: true -type: "integration" -category: ['storage', 'amd64'] -logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-vsphere-volume/blobs/sha256:2cd106b353cb492d4647a1562fe59db6a1aeb792333900fe4e15237f899298b5?type=image/png' ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# vSphere Volume - -vSphere volume virtualizes SAN/NAS arrays, enabling an efficient operational model optimized for virtualized environments and centered on the application instead of the infrastructure There are two types of provisioners for vSphere storage classes: - -* CSI provisioner: csi.vsphere.vmware.com -* vCP provisioner: kubernetes.io/vsphere-volume - -## Versions Supported - - - - -* **1.0.0** - - - - -## Notable Parameters - -| Name | Supported Values | Default Value | Description | -| --- | --- | --- | --- | -| diskformat | thin, zeroedthick and eagerzeroedthick | zeroedthick | The storage account type to use | -| datastore | Datastore Name | | If specified, the volume will be created on the datastore specified in the storage class | -| isDefaultClass | true, false | true | Flag to denote if this StorageClass will be the default | - -**References:** - -https://kubernetes.io/docs/concepts/storage/storage-classes/#vsphere - - -# Further Info - -More info about Storage classes can be found in the following links: - -https://kubernetes.io/docs/concepts/storage/storage-classes/ - -# Troubleshooting - -Storage classes created by Spectro will be with the name "spectro-storage-class" and can be fetched from kubectl using the following CLI command: - -```bash -kubectl get storageclass -``` diff --git a/content/docs/06-integrations/01-maintenance-policy.md b/content/docs/06-integrations/01-maintenance-policy.md deleted file mode 100644 index 1034502c72..0000000000 --- a/content/docs/06-integrations/01-maintenance-policy.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: "Maintenance Policy" -metaTitle: "Packs Maintenance Policy" -metaDescription: "Learn about Palette pack update and deprecation schedules." -icon: "" -hideToC: false -fullWidth: true -hideToCSidebar: false ---- - -import {Content} from "shared/layouts/Default"; -import Tabs from "shared/components/ui/Tabs"; -import Packs from "shared/components/common/Integrations/Packs" -import AppTiers from "shared/components/common/Integrations/AppTiers" -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - - -# Overview - -Palette supports two pack categories: *Core* and *Add-on*. Core packs, which we often source from third parties, are infrastructure related or have prominence in container-based environments. They create the layers in Palette profiles. Core packs are grouped as follows: - -- Kubernetes - -- Operating System (OS) - -- Container Network Interface (CNI) - -- Container Storage Interface (CSI) - - -Add-on packs provide additional functionality that you can add to your cluster profile and are grouped as follows: - -- Load Balancer - -- Ingress - -- Logging - -- Monitoring - -- Security - -- Authenticaiton - -- System Apps - - -Check out the [Packs List](/integrations) document, where you can use the filter buttons to display a list of Palette packs in each category and learn about the individual packs. - -
- -## Pack Updates - -Packs undergo rigorous vulnerability scans and penetration testing before they become available in Palette. The following sections describe our update schedule for each core pack category. - -
- -### Kubernetes Packs - -We provide Cloud Native Computing Foundation (CNCF) Kubernetes updates as follows: - -
- -- **Major versions**: Assessed based on the extent of changes. - - -- **Minor versions**: Provided within eight weeks of a new Kubernetes release. - - -- **Patch versions**: Provided within four weeks of a new Kubernetes release. - - - -### OS Packs - -We provide Ubuntu LTS and CentOS updates for IaaS clusters as follows: - -
- -- **Major versions** - Added within 8 weeks of release. - - -- **Patch and Minor versions**: Updated at runtime using Palette’s on-demand or scheduled OS upgrades and patch-on-boot capabilities. - - -### CNI Packs - -We provide CNI pack updates as follows: - -
- -- **Major versions**: Assessed based on the extent of changes. - - -- **Minor version**: Added within 6 weeks of release. - - -- **Patch versions**: Added within 4 weeks of release. - - -### CSI Packs - -We provide CSI pack updates as follows: - -
- -- **Major versions**: Assessed based on the extent of changes. - - -- **Minor version**: Added within 6 weeks of release. - - -- **Patch versions**: Added within 4 weeks of release. - - -### Add-on Packs - -We provide add-on pack updates as follows: - -
- -- **Major versions**: Assessed based on the extent of changes. - - -- **Minor version**: Added within 6 weeks of release. - - -- **Patch versions**: Added within 4 weeks of release. - - -## Packs Deprecation - - -We deprecate and remove packs when a more stable version of the pack is available or when the underlying technology becomes obsolete. When a pack is deprecated, you will still be able to create new cluster profiles using the pack and deploy clusters that use profiles containing the pack. - -Palette displays the deprecation stage when you click the information icon next to the pack name during profile creation. - -![Screenshot showing how Palette indicates a pack's stage of deprecation.](/integrations_deprecation-stage.png) - -An information icon in the profile stack also displays a message that instructs about required pack versions. - -![Screenshot showing message in profile stack that tells you the required pack version to use.](/integrations_deprecation-profile-stack-msg.png) - -We adhere to the following stages of deprecation: - -
- -- **Deprecated**: When a pack or a pack version is deprecated, this indicates it will be removed in the future. You will still be able to create new cluster profiles using the pack and launch clusters using existing profiles that contain the pack. - - The pack remains in *Deprecated* state for three months before it moves to *Disabled* state. - -
- -- **Disabled**: When a pack is disabled, it is no longer available for selection in Palette. When creating new profiles, you must use a newer version of the pack. You can still launch clusters using existing profiles that contain the disabled pack. - - The pack remains in *Disabled* state for three months before it is deleted. - -
- -- **Deleted**: When a pack is deleted, it is removed from Palette. An active cluster that contains the deleted pack will continue to operate. However, you will not be able to deploy a new cluster profile that contains the deleted pack. - - - -For important guidelines on updating pack versions, review [Update the Pack Version](/cluster-profiles/task-update-profile#updatethepackversion). - - - -### Kubernetes Packs - -A minor Kubernetes version is deprecated in Palette when the Kubernetes community announces the version is entering End of Life (EOL). - -
- -### CNI / CSI / Add-on Packs - -Palette supports a minor version of CNI, CSI, and add-on packs until two newer versions are available. At that time, packs in these categories are deprecated. - - -
- -
- diff --git a/content/docs/07-projects.md b/content/docs/07-projects.md deleted file mode 100644 index 1b8662af4e..0000000000 --- a/content/docs/07-projects.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: "Projects" -metaTitle: "Concept: Projects" -metaDescription: "Understanding what Spectro Cloud projects are" -icon: "cog" -hideToC: false -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Projects - -A **Project** helps you organize the cluster resources in a logical grouping. The resources that are created within a project are scoped to that project and not available to other projects. You can also assign users and teams with specific roles to specific projects. - -# Project Dashboard - -The **Tenant Admin** > **Projects** page displays the project-related dashboard cards for all projects in the tenant. - - -## Project Card - -The **Project card** shows the status and relevant details of a cluster, grouping information about healthy, unhealthy, and errored clusters. It calculates cluster health by evaluating the health of each node, taking into account factors such as memory and CPU utilization, disk pressure, and network availability. Additionally, it displays the number of clusters imported and those provisioned by Palette. -### Cores per Project Usage - -By default, the active worker node usage of CPU **Cores** is grouped across all projects and shown as an hourly interval. You can change the interval value to days or months. - - -# Create a Project - -Use the following steps to create a new project. - -
- - - -You can associate users and teams with a project. Check out the [Project Association](/user-management/project-association) page to learn more. - - - -## Prerequisites - -* Tenant admin access - -## Enablement - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to **Tenant Admin** > **Projects** and click the **Create Project** button. - - -3. Fill out the following fields: **Name**, **Description**, and **Tags** to create a Project. - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to **Tenant Admin** > **Projects** - -Your newly created project is listed along with other existing projects. - - -# Delete a Project - - -You can remove projects by following these steps. - -## Prerequisites - -* Tenant admin access. - -* No active clusters in the project. - -## Remove Project - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Switch to **Tenant Admin** scope. - - -3. Navigate to the left **Main Menu** and select **Projects**. - - -4. Locate the project card for the project you want to remove. - - -5. Click on the **three-dot Menu** and select **Delete**. - - -6. A pop-up box will ask you to confirm the action. Confirm the deletion. - - - - -You can delete projects with force as long as there are no active clusters. Force deleting will eliminate all resources linked to the project, such as app profiles, cluster profiles, workspaces, audit logs, and custom project settings. However, if a project has active clusters, you must remove them first before deleting the project. - - - - -## Validate - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to **Tenant Admin** > **Projects** . - -The project you deleted is no longer displayed and available for interaction. \ No newline at end of file diff --git a/content/docs/08-user-management.md b/content/docs/08-user-management.md deleted file mode 100644 index 94b5802296..0000000000 --- a/content/docs/08-user-management.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: "User Management" -metaTitle: "User Management" -metaDescription: "Dive into Palette's user management capabilities and how to manage users' access and setting up controls, integrations, and more." -icon: "roles" -hideToC: false -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; -import Tooltip from "shared/components/ui/Tooltip"; - -# User Management - -This section touches upon the initial login aspects for Tenant Admins and non-admin users and the RBAC setup within Palette. - -## User Login - -For a Tenant admin, the password shall be set upon the initial login. The Tenant admin can add non-admin users. For all users, login can be made available using the following options: - -* Using Palette credentials on the login page. - -* SSO using Identity Providers that use SAML 2.0: - * Azure Active Directory - * Okta - * Keycloak - * OneLogin - * Microsoft ADFS - * Others - -## RBAC - -Palette allows the users that have been added to be allowed or restricted access to resources based on the roles set by the tenant admin. This Role-Based Access Control is explained in detail in the RBAC}>Palette's RBAC design allows granting granular access to resources and its operations [page](/user-management/palette-rbac#rbac). - -# Roles and Permissions - -The Tenant admin can allow or restrict access of resources to users which can differ as per the scenario. A user can have complete access to a specific project but can be restricted access to other projects in which there is no involvement. An intermediate stage is also possible where read-only access can be provided in some projects. The Roles}>A Role is a collection of permissions. and Permissions}>Permissions are associated with specific actions within the platform. sections in the [RBAC](/user-management/palette-rbac#rbac) page provide more details on this. - -To add a user to a project: - 1. Sign in as a Tenant admin and go to the **Users and Teams** section of the [Tenant Admin Dashboard](/getting-started#admindashboard). - - 1. Click on the user that you want to enable access to. - - 1. In the **Role** editor that opens to the side, find the **Project Roles** section and click **Add Role**. - - 1. Select the required **Project** from the dropdown menu and enable the **Roles** as needed. - -# Multi-Organization Support for Users - -Palette is incorporating multi-organization support for its users. With this feature, we provide our users with the flexibility of having a unique email address ID across multiple organizations. Hence, the users can maintain SSO credentials across multiple organizations/tenants. - -The key benefits of this feature are: - -* The use of a single email address ID across multiple organizations. -* Within an organization, maintain a unique email ID. -* In the case of password-based authentication, the same password is applicable across multiple organizations. The change of password, made under a particular organization, is applied across other organizations to maintain a single password across all organizations. -* The password policy stays independent of organizations/tenants. Each tenant retains individual password policy. -* For SSO-based authentication, for each organization/tenant, the individual identity provider client application can be configured. Hence, allowing the configuration of a single SSO with multiple identity providers across multiple tenants/organizations mapping each client app to a tenant. -* However, for self-sign-up, the unique email address ID is enforced across tenants to avoid conflicts. -* In the Palette console, the users can switch between the organizations/tenants using the Organization drop down menu of the login page. diff --git a/content/docs/08-user-management/04-saml-sso/00-enable-saml.md b/content/docs/08-user-management/04-saml-sso/00-enable-saml.md deleted file mode 100644 index ff6e658a55..0000000000 --- a/content/docs/08-user-management/04-saml-sso/00-enable-saml.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Enable SSO in Palette' -metaTitle: 'Enable SSO in Palette' -metaDescription: 'Learn how to enable SSO in Palette' -icon: "" -hideToC: false -fullWidth: false -hideToCSidebar: false -hiddenFromNav: false ---- - -# Overview - -With Spectro Cloud Palette, you can use SAML 2.0 protocols for single sign-on (SSO) authentication using your IdP. - -
- -# Set Up SAML-based SSO - -To set up Spectro Cloud Palette with an identity provider (IdP) SAML based SSO:


-1. Log in to the Palette console as a Tenant Admin.


-2. Select **Tenant Settings** > **SSO Auth Type** > **SAML** to view the SAML panel.


-3. Complete the assertion form with the requested parameters. See below for more details specific to the supported IdPs.


- The following options will be available for configuring SAML SSO within Palette:


- - - **Service** - Choose your IdP (Azure Active Directory, Okta, Keycloak, OneLogin, ADFS, Other).


- - **Identity Provider Metadata** - Enter the Identity Provider Metadata.


- - **Default Teams** - Add the authenticated user's Default Team(s) Palette membership.


- - **NameID Format** - Choose the appropriate version of the format in use (SAML 1.1, SAML 2.0, email address, other).


- - The following parameters will enable Spectro Cloud Palette as a **Service Provider** (SP) in your IdP. Your IdP will require some or all the information listed below to enable SSO with Palette.


- - - **Single Logout URL** - The IdP will use the logout URL for the SAML SSO configuration.


- - **EntityId** - https://www.spectrocloud.com


- - **FirstName** - Attribute in First Name format.


- - **LastName** - Attribute in Last Name format.


- - **Email** - Attribute in Email format.


- - **SpectroTeam** - Attribute in SpectroTeam format.


- - **Service Provider Metadata** - Provide the EntityDescriptor.


- -4. Edit each parameter as necessary and click **Enable** to complete the setup wizard.


- -
- - -# Set Up OIDC-based SSO - -Spectro Cloud Palette supports OpenID Connect (OIDC), a de facto standard of contemporary authentication that provides secured identity management in a highly interoperable format.


- -## Procedure - -To set up an OIDC-based SSO in Spectro Cloud Palette perform the following steps:


- -1. Log in to the Palette console as the Tenant Admin.


-2. Select the **Tenant Settings** > **SSO** > **OIDC** to view the panel.


-3. Enable Spectro Cloud as the **Service Provider** by completing the form with the following parameters. Select the tabs below for more details specific to IdPs supported with Palette.


- - * **Issuer URL** - The URL of the OpenID identity provider.
**Note**: For AWS users, Issuer URL needs to be generated in the format as described below:


- `https://cognito-idp.[REGION].amazonaws.com/[USER-POOL-ID]`


- - - **Client ID** - The ID for the client application that makes authentication requests.


- - **Client Secret** - Enter the secret created by the IdP.


- - **Default Teams** - The Default Palette Team(s) to which authenticated members are assigned automatically.


- - **Scopes** - The user's details will be used as part of SSO, like *email*, *firstname*, *lastname* or *groups*. Each scope returns a set of user attributes, called claims.


Microsoft Azure AD Example: "openid, profile, email, allatclaims"


- - **REQUIRED CLAIMS** - These are the parameter values, claimed by the user, to be mapped with the Identity Provider Platform. Complete the Required Claims:


- - **Email** - Azure AD Example: "email"


- - **First Name** - Azure AD Example: "given_name"


- - **Last Name** - Azure AD Example: "family_name"


- - **Spectro Team Name** - Azure AD Example: "groups".


Any non-admin user that is added to a Tenant, must be added to at least one Team. This Team can be changed later if needed. See the [Teams](/glossary-all#team) section for more details on Teams.


- - - If a user is not added to a Team, the user can still log in successfully but will not be able to see the console until proper Project or Tenant permissions are applied (Tenant Admin, Project Admin, Project Viewer, and so on). The **SpectroTeam** attribute carries forward the available team(s) for the user being authorized. This gives the administrator the flexibility to grant access to Spectro Cloud Palette using either Users or Groups in their IdP or by adding users directly to a Palette Team(s).


- - - The values of the **SpectroTeam** parameter is case-sensitive, so the Tenant Admin should ensure that the team names are identical on both consoles. To sync an IdP group with a Palette Team, ensure the IdP group Name (or if it's Azure Active Directory, use the Object Id corresponding to the IdP group Name) matches the Palette Team name.


- - - A use case example can be where a new member is to be added to the Palette Tenant by the Tenant Admin. The administrator can configure a default Palette Team or a synced IdP group that is common to all authenticated users. This default Palette Team/IdP group can be applied to the Palette SAML Panel as a one-time setting.


- - -Your IdP may require the following settings to configure OIDC SSO with Palette: - - - **Callback URL** - The URL to which Auth0 redirects users after they authenticate. Ensure that this value is configured for the application you registered with the OIDC Identity Provider. - - - **Logout URL** - The IdP will use the logout URL for the OIDC SSO configuration. - - - -## Results -You have now established the minimum configuration that is required to configure Palette OIDC, capable of communicating with other IdPs configured as OpenID Connect Providers. - diff --git a/content/docs/08-user-management/04-saml-sso/02-palette-sso-with-adfs.md b/content/docs/08-user-management/04-saml-sso/02-palette-sso-with-adfs.md deleted file mode 100644 index bbaa119f39..0000000000 --- a/content/docs/08-user-management/04-saml-sso/02-palette-sso-with-adfs.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: 'Palette SSO with Microsoft AD FS' -metaTitle: 'Set up Palette SSO with Microsoft Active Directory Federation Service (AD FS)' -metaDescription: 'Set up Palette SSO with Microsoft Active Directory Federation Service (AD FS)' -icon: "" -hideToC: false -fullWidth: false -hideToCSidebar: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Enable SSO with Microsoft Active Directory Federation Service (AD FS) - -Single sign-on (SSO) is an authentication method that enables users to log in to multiple applications and websites with one set of credentials. SSO works upon a trust relationship established and maintained between the service provider (SP) and an identity provider (IdP) using certificates. Palette supports SSO based on either SAML or OIDC. - -The following steps will guide you to enable Palette SSO with [Microsoft AD FS](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/development/ad-fs-openid-connect-oauth-concepts) based on OIDC. - -
- - - - You cannot use Microsoft AD FS for SAML-based SSO with Palette. Microsoft AD FS does not support the Canonical XML 1.1 standard that Palette employs. You can only use the OIDC-based approach for Microsoft AD FS. - - - - -# Prerequisites -In order to setup OIDC-based SSO with Microsoft AD FS, you need to use one of the following versions: -* Microsoft AD FS 2022 (comes with Windows Server 2022) -* Microsoft AD FS 2019 (comes with Windows Server 2019) -* Microsoft AD FS 2016 (comes with Windows Server 2016) - -If you need to be able to your AD FS service from outside your corporate network, you will also need an AD FS Reverse Proxy. An official Microsoft tutorial for setting up an AD FS Reverse Proxy is not available, but you can use this blog post from [Matrixpost](https://blog.matrixpost.net/set-up-active-directory-federation-services-ad-fs-5-0-adfs-reverse-proxy-part-2/) for additional guidance. - - -# Enablement -## Create the AD FS Application Group for Palette - -1. Open the AD FS Management console on your Windows Server and add a new Application Group for Palette: - -
- -![Add AD FS Application Group](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-application-group.png) - -
- -2. Provide a suitable name for the application group and select **Server Application** from the list of templates. Then click **Next**: - -
- -![Name Application Group](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-application-group.png) - -
- -3. The next screen displays the **Client Identifier** for this Application Group: - -![Get Client Identifier](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_get-client-identifier.png) - -
- -4. Copy the client identifier value and save it somewhere. You will need to enter this value into the Palette SSO configuration later. - - -5. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **SSO** and click **OIDC**. Click the button next to **Callback URL** to copy this value to the clipboard: - -![Copy Callback URL](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-callback-url.png) - -
- -6. Switch back to your AD FS Management console and paste the copied value into the **Redirect URI** field, then click **Add** to add it to the list: - -![Paste Redirect URI](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-redirect-uri.png) - -
- -7. Switch back to Palette in the web browser and click the button next to **Logout URL** to copy this value to the clipboard: - -![Copy Logout URL](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-logout-url.png) - -
- -8. Switch back to your AD FS Management console and paste the copied value into the **Redirect URI** field, then click **Add** to add it to the list: - -![Paste Logout URI](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-logout-uri.png) - -
- -9. These two redirect URIs are required for SSO to work with Palette. You can also add additional redirect URIs. The URIs in the table below are useful when you want to use AD FS for OIDC authentication into your Kubernetes clusters. - -| URL | Type of Access | -| --- | --- | -| `http://localhost:8000` | Using kubectl with the kube-login plugin from a workstation | -| `https://console.spectrocloud.com/v1/shelly/oidc/callback` | Using the web-based kubectl console | -| `https:///oauth/callback` | Using OIDC authentication into Kubernetes Dashboard | - -10. When you have completed entering redirect URIs, click **Next**. On the next page of the wizard, select **Generate a shared secret** and click **Copy to clipboard** to copy the secret value and save it somewhere. You will need to enter this value into the Palette SSO configuration later: - -![Copy Shared Secret](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-shared-secret.png) - -
- -11. Click **Next** and on the Summary screen, click **Next** again to complete the wizard. You need to add another application to the application group. Select the newly created application group and click **Properties**: - -![Open Application Group](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_open-oidc-app.png) - -
- -12. In the Properties screen, click **Add application...**. In the wizard that opens, select **Web API** and click **Next**: - -![Add Web API application](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-web-api.png) - -
- -13. In the **Identifier** field, add the following entries: -* The **Client Identifier** value you saved when creating the application group. -* The base URL of your Palette tenant. This is equal to the URL shown by your browser when logged into Palette minus the path. Example `https://johndoe-spectrocloud.console.spectrocloud.com`. - -
- -![Find Base URL](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_base-url.png) - -
- -![Add Web API Identifiers](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-identifiers.png) - -
- -14. Click **Next** when done. On the next screen, select a suitable policy for who can use this SSO and under what circumstances. If you're not sure which policy to choose, select **Permit everyone**, then click **Next**: - -
- -![Select Access Control Policy](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_select-policy.png) - -
- -15. On the next screen, by default only the **openid** scope is ticked. However, to include the user's groups in the OIDC claim, you need to also enable the **allatclaims** scope. If your AD FS server does not yet have an **allatclaims** scope in the list, click **New scope...** and type `allatclaims` in the Name field, then click **OK** to add it. Ensure both scopes are enabled and then click **Next**: - -![Enable Permitted Scopes](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_enable-scopes.png) - - -16. On the Summary screen, click **Next** to finish the wizard. You need to set the **Issuance Transform Rules** for the Web API application. Open the application again by double-clicking on the Web API entry or clicking **Edit**. - -![Re-open Web API Application](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_reopen-webapi-app.png) - -
- -17. Navigate to the **Issuance Transform Rules** tab and click **Add Rule**. - -![Add Issuance Transform Rule 1](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-1.png) - -
- -18. Select the **Send LDAP Attributes as Claims** template and click **Next**: - -![Send LDAP As Claims Rule](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_ldap-as-claims.png) - -
- -19. Name the rule `OpenID - LDAP Attribute Claims`. Select **Active Directory** as the Attribute store and add the following LDAP mappings: -* **E-Mail-Addresses** --> `email` -* **Given Name** --> `given_name` -* **Surname** --> `family_name` - -You can select the items on the left from the list. You will need to type the items on the right manually. Ensure you use all lowercase characters for the values on the right: - -![Set LDAP Claims](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-ldap-claims.png) - -
- -20. Click **Finish** to add the rule. Now click on **Add Rule...** again to add the second rule: - -![Add Issuance Transform Rule 2](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-2.png) - -
- -21. Select the **Send Group Membership as Claims** template and click **Next**: - -![Send Groups As Claims Rule](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_groups-as-claims.png) - -
- -22. In the next screen, define the group claim as desired. In the following example, a group in Active Directory is called `SpectroTeam - Admins`. The desired behavior is for anyone that is a member of that group, to be issued a `groups` claim with the value `Admins`. In Palette this user will automatically be mapped to a group with the same name, `Admins`. You can assign RBAC permissions to that group in Palette to give it the desired access. - -![Set Group Claim](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-group-claim.png) - -
- -23. Click **Finish** to add the rule. Click **OK** to save the changes to the Web API rule and click **OK** again to save the changes to the application group. - -24. Take note of your AD FS identifier, you will need this for Palette in the next step. Typically this is your AD FS name plus `/adfs`. You can also take the Federation Service identifier and remove `/services/trust` from that URL: - -![Note AD FS Name](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_note-adfs-name.png) - -
- -## Enable OIDC SSO in Palette - -25. Open a web browser and navigate to your [Palette](https://console.spectrocloud.com) subscription. - -Navigate to **Tenant Settings** --> **SSO** and click on **OIDC**. Enter the following information. - -| Parameter | Value | -|-------------------|--------------------------------------------------------------------| -| Issuer URL | Your AD FS issuer URL. Typically this is your AD FS name plus /adfs.| -| Client ID | The client identifier that you saved in step **4**. | -| Client Secret | The shared secret that you generated in step **8**. | -| Default Teams | Leave blank if you don't want users without group claims to be assigned to a default group. If you do, enter the desired default group name. If you use this option, be careful with how much access you give to the group. | -| Scopes | Set this to `openid` and `allatclaims`. | -| Email | Keep `email` as the default. | -| First Name | Keep `given_name` as the default. | -| Last Name | Keep `family_name` as the default. | -| Spectro Team | Keep `groups` as the default. | - -![Enable Palette OIDC SSO](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_configure-palette-oidc.png) - - -26. When all the information has been entered, click **Enable** to enable SSO. You will receive a message stating **OIDC configured successfully**. - - -## Create Teams in Palette - -The remaining step is to create teams in Palette for the group claims that you configured in AD FS, and give them the appropriate permissions. For this example, you will create the `Admins` team and give it **Tenant Admin** permissions. You can repeat this for any other team that you configured with group claims. - -27. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **Users & Teams** --> **Teams** tab, and click **+ Create Team**. - -![Create Palette Team](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_create-team.png) - -
- -28. Specify `Admins` in the **Team name** field. You don't need to set any members now, as this will happen automatically from the SSO. Click **Confirm** to create the team. - -![Name Palette Team](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-team.png) - -
- -29. The list of teams displays again. Select the newly created **Admins** team to review its details. To give this team administrative access to the entire tenant and all the projects in it, assign the **Tenant Admin** role. Select **Tenant Roles** and click **+ Add Tenant Role**: - -![Palette Tenant Roles](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_tenant-roles.png) - -
- -30. Click on **Tenant Admin** to enable the role. Click **Confirm** to add the role. - -![Add Tenant Role](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-tenant-role.png) - -
- -You will receive a message stating **Roles have been updated**. Repeat this procedure for any other teams, taking care to ensure they are given the appropriate permissions. - -31. Click the **X** next to **Team Details** in the top left corner to exit this screen. - -You have now successfully configured Palette SSO based on OIDC with Microsoft AD FS. - - -# Validate - -1. Log in to Palette through SSO as a user that is a member of the `SpectroTeam - Admins` group in Active Directory to verify that users are automatically added to the `Admins` group in Palette. - -If you're still logged into Palette with a non-SSO user, log out by selecting **Logout** in the **User Menu** at top right. - -![User Logout](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_user-logout.png) - -
- - -2. The Palette login screen now displays a **Sign in** button and no longer presents a username and password field. Below the **Sign In** button, there is an **SSO issues? --> Use your password** link. This link can be used to bypass SSO and log in with a local Palette account in case there is an issue with SSO and you need to access Palette without SSO. - -Click on the **Sign in** button to log in via SSO. - -![User SSO Login](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_palette-login.png) - -
- -3. If this is the first time you are logging in with SSO, you will be redirected to the Microsoft AD FS login page. Depending on your organization's SSO settings, this could be a simple login form or require MFA (Multi-Factor Authentication). - -Make sure you log in as a user that is a member of the `SpectroTeam - Admins` group in Active Directory. Once authenticated, you will automatically be redirected back to Palette and logged into Palette as that user. - -4. You are now automatically added to the `Admins` team in Palette. To verify, navigate to the left **Main Menu**, select **Tenant Settings** --> **Users & Teams** --> **Teams** tab. Click the **Admins** team and view the team members section. - -![Palette Team Members](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_team-members.png) - - -The user you logged in as has automatically been added to this team. - - -# Resources - -- [Microsoft AD FS](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/development/ad-fs-openid-connect-oauth-concepts) - -- [Microsoft AD FS Reverse Proxy](https://blog.matrixpost.net/set-up-active-directory-federation-services-ad-fs-5-0-adfs-reverse-proxy-part-2/) - -- [Palette User Management](/user-management) - -- [Palette SSO](/user-management/saml-sso) diff --git a/content/docs/08-user-management/04-saml-sso/03-palette-sso-azure-ad.md b/content/docs/08-user-management/04-saml-sso/03-palette-sso-azure-ad.md deleted file mode 100644 index 89e73cfe95..0000000000 --- a/content/docs/08-user-management/04-saml-sso/03-palette-sso-azure-ad.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: 'Palette SSO with Azure Active Directory' -metaTitle: 'Palette SSO with Azure Active Directory' -metaDescription: 'Learn how to enable SSO in Palette with Azure Active Directory' -icon: "" -hideToC: false -fullWidth: false -hideToCSidebar: false -hiddenFromNav: false ---- - -# Azure Active Directory and OIDC-Based Setup - -After configuration, your organization can integrate Microsoft Azure Active Directory to authenticate access to Spectro Cloud Palette. - -## Prerequisites - -- Microsoft Azure Active Directory with appropriate permissions to create and modify users, groups, Enterprise Applications (SAML) or App Registrations (OIDC).


-- Access to Palette - Request access for a [Free Trial](/getting-started/palette-freemium).


-- Appropriate rights and [enabled token IDs](https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-protocols-oidc#enable-id-tokens) in Azure.


-- [kubelogin](https://github.com/int128/kubelogin) - This is a `kubectl` plugin for Kubernetes OpenID Connect (OIDC) authentication, also known as `kubectl` oidc-login. - -
- -
Kubelogin Architecture
- -
- - ![kubelogin](https://github.com/int128/kubelogin/raw/master/docs/credential-plugin-diagram.svg "Credential Plugin Diagram from kubelogin") - -
- -## Steps for OIDC Integration in Microsoft Azure Active Directory - -From within Microsoft Azure AD, log in and find the Azure Active Directory service page. The following two libraries contain the necessary parameters to configure Palette. - -
- -1. **App registrations** - You will use Azure AD App registrations to configure OIDC SSO with Spectro Cloud Palette.


- -2. **Enterprise applications** - You will use Azure AD Enterprise registrations to configure SAML SSO with Spectro Cloud Palette.


- -![enterprise-app-registration](/oidc-azure-images/enterprise-app-registration.png) -

- -## Integrating OIDC SSO for authenticating access to Kubernetes clusters using Microsoft Azure Active Directory - -This section describes how to enable Azure AD SSO authentication to access a Kubernetes cluster. - -1. From the sidebar menu, select **Tenant Settings** and ensure the **Tenant Admin** from dropdown is selected.


-2. Go to **Profiles** from within Tenant Admin or a Project and select an existing Cluster Profile. Alternatively, if a Cluster Profile does not exist, create a new Cluster Profile with a CNCF Kubernetes distribution. Once you select a profile, you will see the Infrastructure layers in the picture.


-3. Choose the **Kubernetes** layer and select the **Pack Values** to modify.


-4. The Pack Version Settings are exposed with the appropriate privileges (Tenant Admin). Notate the following **Variable** within the pack settings.


- -## Configuring the Application OpenID Configuration in the Cluster - -1. Go to the **Kubeadminconfig**:**apiServer**:**extraArgs** section of the pack layer.


- - - **oidc-groups-claim** - "Groups"


- - **oidc-username-claim** - "Email"


- - **oidc-issuer-url** - "Issuer's URL"


- - **oidc-client-id** - "Client ID"


- - ![kubeadminconfig](/oidc-azure-images/kubeadmconfig.png) - -


- -2. Next, find the **clientConfig** section and modify the following parameters:


- - - **oidc-issuer-url** - This is the provider URL which allows the Palette to discover public signing keys.


- - **oid-client-id** - The client ID is found under the Application Registration/Enterprise Application.


- - **oidc-client-secret** - The secret provided by Azure AD.


- - **oidc-extra-scope** - The scope tags.


- -![oidc](/oidc-azure-images/client-config.png) -


- -## Binding the Cluster Admin Role AD to Cluster Admin via RBAC - -Configure the Role Based Access Control Pack (RBAC).


- -### Adding an RBAC Pack - -1. Under **Tenant Admin**, create an **RBAC Cluster** profile.


-2. Go to **Cluster Profile** > +**Add Cluster Profile** and complete the Basic Information.


-3. Enter the **Name**, **Version**, and **Description** (Optional) and click **Next**.


-4. Under **Type**, select **+Add-on New Pack**.


-5. Select **Authentication** as the Pack Type.


-6. From the **Registry** dropdown, click **Public Repo**.


-7. Choose **Spectro RBAC** as the Pack Name.


-8. Select the Pack Version.


-9. Click the **spectro-rbac 1.0.0** Pack Values to edit the pack layer settings.

- **Note**: This is where you will edit the role settings.


-10. Click the **Confirm & Create** button.


- -### Editing the RBAC Cluster Profile - -1. From Palette, go to **Profiles** and choose the **RBAC** cluster profile.


-2. Click the layer image and specify the ClusterRoleBindings.


-3. Go to the **clusterRoleBindings**:**role** section and type **cluster-admin**.


-4. Change the settings to your requirements and specific groups.


- -For Azure AD integration with RBAC, edit your RBAC pack value to below. Or, copy and paste the entire block to your RBAC pack and modify you inputs where appropriate: - -```yml -pack: - spectrocloud.com/install-priority: "0" -charts: - spectro-rbac: - # Specify one or more ClusterRoleBinding - # Note that the _name_ attribute is optional - clusterRoleBindings: - - role: cluster-admin - name: bind-cluster-admin-role-to-cluster-admin - subjects: - #- type: User - #name: user5 - - type: Group - # For "name", input the Azure AD Group ID name and add a comment on what the Azure AD displayname is that corresponds to the Azure AD Group Name - # Example: Azure AD Group Object Id "70d19fd6-####-####-####-##c6c915e301" is tied to the Azure AD Security Group with the display name of "cluster-admin-role". - # name: "AZURE AD GROUP ID NAME" - name: "INSERT AZURE AD GROUP ID For Cluster Admins" - - role: admin - name: bind-admin-role-to-admin - subjects: - #- type: User - #name: user5 - - type: Group - # For "name", input the Azure AD Group ID name and add a comment on what the Azure AD displayname is that corresponds to the Azure AD Group Name - # Example: Azure AD Group Object Id "064f2e40-####-####-####-##b9f7927976" is tied to the Azure AD Security Group with the display name of "admin-role". - # name: "AZURE AD GROUP ID NAME" - name: "INSERT AZURE AD GROUP ID For Admins" - - role: view - name: bind-view-role-to-view - subjects: - #- type: User - #name: user6 - - type: Group - # For "name", input the Azure AD Group ID name and add a comment on what the Azure AD displayname is that corresponds to the Azure AD Group Name - # Example: Azure AD Group Object Id "732edc96--####-####-####-##851dee3380" is tied to the Azure AD Security Group with the display name of "view-role". - # name: "AZURE AD GROUP ID NAME" - name: "INSERT AZURE AD GROUP ID For Viewers" - #- type: ServiceAccount - #name: group6 - #namespace: foo - - role: edit - name: bind-edit-role-to-edit - subjects: - #- type: User - #name: user6 - - type: Group - # For "name", input the Azure AD Group ID name and add a comment on what the Azure AD displayname is that corresponds to the Azure AD Group Name - # Example: Azure AD Group Object Id "21b55c08-6-####-####-####-##a3e2245ad7" is tied to the Azure AD Security Group with the display name of "edit-role". - # name: "AZURE AD GROUP ID NAME" - name: "INSERT AZURE AD GROUP ID For Edit" - #- type: ServiceAccount - #name: group6 - #namespace: foo - #namespaces: - # Specify one or more RoleBindings - #- namespace: team1 - #createNamespace: true - #roleBindings: - #- role: admin - #name: special-override-name-admin-role - #kind: ClusterRole - #subjects: - #- type: User - #name: user3 - #- type: Group - #name: team1namespaceadmin - #- role: view - #kind: ClusterRole - #subjects: - #- type: User - #name: user4 - #- type: Group - #name: team1namespaceview - #- namespace: team2 - #createNamespace: true - #roleBindings: - #- role: admin - #name: special - #kind: ClusterRole - #subjects: - #- type: User - #name: user1 - #- type: Group - #name: group1 -``` - -**Example**: - -**Azure AD Group Object ID** "70\*\*\*\*\*\*\-355a-453b-aadf-\*\*\*\*\*\*\*\*\*301" is linked to the **Azure AD Security Group** with the display name of **cluster-admin-role**. - -**name**: "AZURE AD GROUP ID NAME" - -![oidc](/oidc-azure-images/client-config.png) - -## Results - -You have now established SSO authentication integrating Microsoft Azure AD and Spectro Cloud Palette using OIDC. - -## References - -[Microsoft Active Directory](https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-protocols-oidc)
-[Credential Plugin Diagram](https://github.com/int128/kubelogin/raw/master/docs/credential-plugin-diagram.svg)
-[kubelogin](https://github.com/int128/kubelogin)
\ No newline at end of file diff --git a/content/docs/08-user-management/04-saml-sso/04-palette-sso-with-okta.md b/content/docs/08-user-management/04-saml-sso/04-palette-sso-with-okta.md deleted file mode 100644 index e1243edcf4..0000000000 --- a/content/docs/08-user-management/04-saml-sso/04-palette-sso-with-okta.md +++ /dev/null @@ -1,337 +0,0 @@ ---- -title: 'Palette SSO with Okta' -metaTitle: 'Set up Palette SSO with Okta' -metaDescription: 'Set up Palette SSO with Okta' -icon: "" -hideToC: false -fullWidth: false -hideToCSidebar: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Enable SSO with Okta - -Single Sign-On (SSO) is an authentication method that enables users to log in to multiple applications and websites with one set of credentials. SSO uses certificates to establish and maintain a trust relationship between the Service Provider (SP) and an Identity Provider (IdP). Palette supports SSO based on either the Security Assertion Markup Language (SAML) or OpenID Connect (OIDC). - -The following steps will guide you on how to enable Palette SSO with [Okta Workforce Identity Cloud](https://www.okta.com/products/single-sign-on/) based on OIDC. - - -# Prerequisites - -- You need to have either a free or paid subscription with Okta. Okta provides free [developer subscriptions](https://developer.okta.com/signup/) for testing purposes. - - -- If you want to use the same Okta application for OIDC-based SSO into your Kubernetes cluster itself, you need to install [kubelogin](https://github.com/int128/kubelogin) on your local workstation to handle retrieval of access tokens for your cluster. - - -# Enablement -## Create the Okta Application - -1. Log in to your Okta Admin console and navigate to **Applications** --> **Applications**. Click the **Create App Integration** button. - -
- - - - Your Okta login URL has the following format, - `https://{your-okta-account-id}-admin.okta.com/admin/getting-started`. - Replace `{your-okta-account-id}` with your Okta account ID. - - - - -2. In the screen that opens, select **OIDC - OpenID Connect**` for the sign-in method, then select **Web Application** for the application type. Then click **Next**. - - -3. The following screen allows you to configure the new Web App Integration. On the **App integration name** field, change the name from `My Web App` to `Spectro Cloud Palette OIDC`. If desired, you can also upload a logo for the application. Leave the **Grant type** to its default value - **Authorization Code**. - -
- - ![Configure General Settings](/oidc-okta-images/oidc-okta_okta-general-settings.png) - -
- - -4. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **SSO** and click **OIDC**. Click the button next to **Callback URL** to copy the value to the clipboard. - -
- - ![Copy Callback URL](/oidc-okta-images/oidc-okta_copy-callback-url.png) - -
- -5. Switch back to your Okta Admin console and paste the copied value into the **Sign-in redirect URIs** field, replacing the existing value: - -
- - ![Paste Redirect URI](/oidc-okta-images/oidc-okta_paste-redirect-uri.png) - -
- -6. Switch back to Palette in the web browser and click the button next to **Logout URL** to copy the value to the clipboard. - -
- - ![Copy Logout URL](/oidc-okta-images/oidc-okta_copy-logout-url.png) - -
- -7. Switch back to your Okta Admin console and paste the copied value into the **Redirect URI** field, then click **Add** to add it to the list: - -
- - ![Paste Logout URI](/oidc-okta-images/oidc-okta_paste-logout-uri.png) - -
- -8. These two redirect URIs are required for SSO to work with Palette. You can also add additional redirect URIs. The URIs in the table below are useful when you want to use Okta for OIDC authentication into your Kubernetes clusters. - -
- - | URL | Type of Access | - | --- | --- | - | `http://localhost:8000` | Using kubectl with the kube-login plugin from a workstation. | - | `https://console.spectrocloud.com/v1/shelly/oidc/callback` | Using the web-based kubectl console. | - | `https:///oauth/callback` | Using OIDC authentication into Kubernetes Dashboard. | - -
- -9. When you have completed entering redirect URIs, scroll down to the **Assignments** section and section and select **Allow everyone in your organization to access**. Leave the **Enable immediate access with Federation Broker Mode** option enabled and click **Save**. - -
- - ![Configure Assignments](/oidc-okta-images/oidc-okta_assignments.png) - -
- -10. You have now created the Okta Application! Next, you need to retrieve the Client ID and Client Secret information, which you will use in the following steps. You should have landed on the **General** tab of your Okta Application. Click the **Copy to clipboard** button next to the **Client ID** to copy the secret value and save it somewhere. You will need this value for later. - -
- - ![Copy Client ID](/oidc-okta-images/oidc-okta_copy-client-id.png) - -
- -11. Click the **Copy to clipboard** button next to the **Client Secret** to copy the secret value and save it. You will need this value for a later step. - -
- - ![Copy Shared Secret](/oidc-okta-images/oidc-okta_copy-shared-secret.png) - -
- -## Create an Okta Authorization Server - -To ensure Okta issues OIDC tokens with the correct claims, you must create a custom Authorization Server. A custom Authorization Server is required to customize the authorization tokens issued by Okta so that they contain the necessary OIDC claims required by Palette and Kubernetes. - -
- -12. Navigate to **Security** --> **API** and on the **Authorization Servers** tab and click **Add Authorization Server**. - -
- - ![Add Authorization Server](/oidc-okta-images/oidc-okta_add-authz-server.png) - -
- -13. Enter a name for the server, for example `Palette OIDC`. For the **Audience** field, enter the client identifier that you saved in step **10**. Optionally provide a description. Then click **Save**. - -
- - ![Name Authorization Server](/oidc-okta-images/oidc-okta_name-authz-server.png) - -
- -14. Navigate to the **Claims** tab and click **Add Claim**. - -
- - ![Add Claims](/oidc-okta-images/oidc-okta_add-claims.png) - - -15. Enter the required information from the following tables below and click **Create**. Use this flow to create three claims in total. First, create two claims for the user information. - -
- - | Claim Name | Include in token type | Value Type | Value | Disable claim | Include In | - |------------|-----------------------|------------|-------|---------------|------------| - | u_first_name | ID Token (Always) | Expression | `user.firstName` | Unchecked | Any scope | - | u_last_name | ID Token (Always) | Expression | `user.lastName` | Unchecked | Any scope | - - -16. Next, create a claim for group membership. The example below will include the names of any groups that the Okta user is a member of, that start with `palette-`, in the `groups` claim of the ticket. For Palette SSO, Palette will make the user a member of Teams in Palette that have the identical name. - -
- - | Claim Name | Include in token type | Value Type | Filter | Disable claim | Include In | - |------------|-----------------------|------------|-------|---------------|------------| - | groups | ID Token (Always) | Groups | Starts with: `palette-` | Unchecked | Any scope | - -
- - ![Claims Result](/oidc-okta-images/oidc-okta_claims-result.png) - -
- -17. Click **<-- Back to Authorization Servers** at the top of the page to navigate back to the list of all servers. The authorization server you created is displayed in the list. Select the **Issuer URI** shown and copy it to the clipboard. Save this value as you will use it in a later step. - -
- - ![Get Issuer URI](/oidc-okta-images/oidc-okta_get-issuer-uri.png) - -
- -18. Navigate to the **Access Policies** tab and click **Add Policy**. - -
- - ![Add Access Policy](/oidc-okta-images/oidc-okta_add-access-policy.png) - -
- -19. Set the **Name** and **Description** fields to `Palette`, then change the **Assign to** option to the Okta Application you created in step three -`Spectro Cloud Palette OIDC`. Type in the first few characters of the application name and wait for a search result to come up that you can click on. - -
- - ![Name Access Policy](/oidc-okta-images/oidc-okta_name-access-policy.png) - -
- -20. Click the **Add rule** button to add a rule to this Access Policy: - -
- - ![Add Policy Rule](/oidc-okta-images/oidc-okta_add-policy-rule.png) - -
- -21. Set the **Rule Name** to `AuthCode`. Then deselect all Grant types but one, only leaving **Authorization Code** selected. Then click **Create Rule**. - -
- - ![Configure Policy Rule](/oidc-okta-images/oidc-okta_configure-policy-rule.png) - -
- -You have now completed all configuration steps in Okta. -
- -## Enable OIDC SSO in Palette - -22. Open a web browser and navigate to your [Palette](https://console.spectrocloud.com) subscription. - -Navigate to **Tenant Settings** --> **SSO** and click on **OIDC**. Enter the following information. - -| Parameter | Value | -|-------------------|--------------------------------------------------------------------| -| Issuer URL | The Issuer URI that you saved in step **15**.| -| Client ID | The client identifier that you saved in step **10**. | -| Client Secret | The shared secret that you generated in step **11**. | -| Default Teams | Leave blank if you don't want users without group claims to be assigned to a default group. If you do, enter the desired default group name. If you use this option, be careful with how much access you assign to the group. | -| Scopes | Keep `openid`, `profile` and `email` as the default. | -| Email | Keep `email` as the default. | -| First Name | Set this to `u_first_name`. | -| Last Name | Set this to `u_last_name`. | -| Spectro Team | Keep `groups` as the default. | - -
- - ![Enable Palette OIDC SSO](/oidc-okta-images/oidc-okta_configure-palette-oidc.png) - -
- -23. When all the information has been entered, click **Enable** to activate SSO. You will receive a message stating **OIDC configured successfully**. - - -## Create Teams in Palette - -The remaining step is to create teams in Palette for the group that you allowed to be passed in the OIDC ticket in Okta, and give them the appropriate permissions. For this example, you will create the `palette-tenant-admins` team and give it **Tenant Admin** permissions. You can repeat this for any other team that you have a matching Okta group for. - -24. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **Users & Teams** --> **Teams** tab, and click **+ Create Team**. - -
- - ![Create Palette Team](/oidc-okta-images/oidc-okta_create-team.png) - -
- -25. Specify `palette-tenant-admins` in the **Team name** field. You don't need to set any members now, as this will happen automatically from the SSO. Click **Confirm** to create the team. - -
- - ![Name Palette Team](/oidc-okta-images/oidc-okta_name-team.png) - -
- -26. The list of teams displays again. Select the newly created **palette-tenant-admins** team to review its details. To give this team administrative access to the entire tenant and all the projects in it, assign the **Tenant Admin** role. Select **Tenant Roles** and click **+ Add Tenant Role**: - -
- - ![Palette Tenant Roles](/oidc-okta-images/oidc-okta_tenant-roles.png) - -
- -27. Click on **Tenant Admin** to enable the role. Click **Confirm** to add the role. - -
- - ![Add Tenant Role](/oidc-okta-images/oidc-okta_add-tenant-role.png) - -
- -You will receive a message stating **Roles have been updated**. Repeat this procedure for any other teams while ensuring they are given the appropriate access permissions. - -28. Click the **X** next to **Team Details** in the top left corner to exit this screen. - -You have now successfully configured Palette SSO based on OIDC with Okta. - - -# Validate - -1. Log in to Palette through SSO as a user that is a member of the `palette-tenant-admins` group in Okta to verify that users are automatically added to the `palette-tenant-admins` group in Palette. If you're still logged into Palette with a non-SSO user, log out by selecting **Logout** in the **User Drop-down Menu** at the top right. - -
- - ![User Logout](/oidc-okta-images/oidc-okta_user-logout.png) - -
- - -2. The Palette login screen now displays a **Sign in** button and no longer presents a username and password field. Below the **Sign In** button, there is an **SSO issues? --> Use your password** link. This link can be used to bypass SSO and log in with a local Palette account in case there is an issue with SSO and you need to access Palette without SSO. Click on the **Sign in** button to log in via SSO. - -
- - ![User SSO Login](/oidc-okta-images/oidc-okta_palette-login.png) - -
- -3. If this is the first time you are logging in with SSO, you will be redirected to the Okta login page. Depending on your organization's SSO settings, this could be a simple login form or require MFA (Multi-Factor Authentication). - -
- - - - Make sure you log in as a user that is a member of the `palette-tenant-admins` group in Okta. Once authenticated, you will automatically be redirected back to Palette and logged into Palette as that user. - - - -
- -4. You are now automatically added to the `palette-tenant-admins` team in Palette. To verify, navigate to the left **Main Menu**, select **Tenant Settings** --> **Users & Teams** --> **Teams** tab. Click the **palette-tenant-admins** team and view the team members section. - - - -# Resources - -- [Okta Workforce Identity Cloud](https://www.okta.com/products/single-sign-on/) - - -- [Palette User Management](/user-management) - - -- [Palette SSO](/user-management/saml-sso) diff --git a/content/docs/08-user-management/1-user-authentication.md b/content/docs/08-user-management/1-user-authentication.md deleted file mode 100644 index b163bcfc41..0000000000 --- a/content/docs/08-user-management/1-user-authentication.md +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: "User Authentication" -metaTitle: "API Key for API Authentication" -metaDescription: "Palette's API key for user authentication for API access " -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -Palette supports three types of user authentication methods. - -* [User Interface (UI)](/user-management/user-authentication/#au-authentication) authentication - -* [API Key](/user-management/user-authentication/#api-key) - -* [Authorization Token](/user-management/user-authentication/#authorization-token) - -The API key and the authorization token method can be used when interacting with Palette REST APIs for automation and programmatic purposes. - - -# UI Authentication - -You can log into Palette by visiting the Palette at [http://console.spectrocloud.com](https://console.spectrocloud.com). If you are a user of a Palette Enterprise instance, then you should use the URL provided by your Palette system -administrator, such as `example-company.console.spectrocloud.com.` - -## Account Sign Up - -You can sign up for a Palette SaaS account by visiting [Palette](https://console.spectrocloud.com) or an Enterprise Palette account under your organization by using your organization's custom Palette URL. - -When you create an account, you can create a username and password or create the account through a third-party identity provider, GitHub, Google, or other OIDC providers that are enabled for your organization. For Palette SaaS, GitHub and Google are automatically enabled for SSO integration. - -## Sign In Flow - -Starting with Palette 3.2, the user sign-in flow can be different depending on how you created your Palette account. If you created your user with a username and password, then you may be prompted to select the organization you wish to log in to. If you are a member of a single organization, then you will not be prompted for an organization selection. - -If you created an account through SSO and are a member of different organizations, then you must first select the organization name you wish to log in to. Click on the **Sign in to your organization** button for the option to specify the organization name. If you need help remembering the organization name, click on the **Forgot your organization name?** button and provide your email address to receive an email containing your organization name and its login URL. - -
- - - -If you are a Palette Enterprise user, use the custom Palette URL for an optimized login experience and avoid specifying the organization name. -Ask your Palette system administrator for the custom Palette URL. - - - -# API Key - -Palette API can also use API Keys to authenticate requests. This is the method of accessing the API without referring to the actual user. - -## Scope of Palette API Keys - -* Tenant admin can create an API Key for any user within the tenant. -* Users can create API Keys for themselves. - -## Creating an API key as a tenant admin - -* Login to Palette using credential with admin role. -* Go to Tenant Settings and select **API Keys**. -* Click on “Add New API Key” to create a new API key. The following information is required for creating a new API Key: - * API Key Name: The tenant/user-specified custom name for the key. - * Description: An optional description about the key. - * Username: Select the user for whom the key is created from the drop-down. - * Expiration Date: Set an expiry date for the key from the options available. The expiration date can be further customized after the key creation. The various options available for the expiration dates are: - * 7 days - * 30 days - * 60 days - * 90 days - * Custom: Select a custom expiry date from the calendar. -* Confirm the information to complete the wizard. - -### Manage API Keys as a tenant admin - -* Log in to Palette using credential with admin role. -* Go to Tenant Settings and select **API Keys**. -* Detailed status of the keys can be observed from the API overview page. In addition to the key's name, description, and expiration date, the overview page displays the API keys, the user to which each key is assigned, and the status of the key. -* To view all the keys assigned to a particular user, select the user's name at **User Name** on top of the page, below the **Manage API Keys**. -* Each API has a settings menu, click on the **three-dot Menu**, to view the following options: - * Edit: The following information can be edited from the menu, - * API Key name - * Description(optional) - * Expiration Date - * Revoke: Change the status of the key from **active** to **inactive**. - * Re-activate: Change the status of the key from ‘inactive’ to ‘active’ as long as expiration date has not passed. - * Delete: Delete the key. - -## Creating an API key for the logged-in user - -* Log in to Palette -* Select **API Keys** from **User Menu**. -* Click on **Add New API Key** to create a new API key. The following information is required for creating a new API Key: - * API Key Name: The tenant/user-specified custom name for the key. - * Description: An optional description about the key. - * Expiration Date: Set an expiry date for the key from the options available. The expiration date can be further customized after the key creation. The various options available for the expiration dates are: - * 7 days - * 30 days - * 60 days - * 90 days - * Custom: Select a custom expiry date from the calendar. -* Confirm the information to complete the wizard. - -### Manage API Keys for the logged-in user - -* Log in to Palette -* Select **API Keys** from the **User Menu**. -* Detailed status of the key can be observed from the API overview page. In addition to the key's name, description, and expiration date, the overview page displays the API keys belonging to the user, and the status of the keys. -* To view all the keys assigned to a particular user, select the user's name at **User Name** on top of the page, below the “Manage API Keys”. -* Each API has a settings menu, click on the **three-dot Menu**, to view the following options: - * Edit: The following information can be edited from the menu, - * API Key name - * Description(optional) - * Expiration Date - * Revoke: Change the status of the key from **active** to **inactive**. - * Re-activate: Change the status of the key from ‘inactive’ to ‘active’ as long as expiration date has not passed. - * Delete: Delete the key. - -## API Key Usage - -You copy your API key from the Palette dashboard and use it for making REST API calls in one of the following ways: - -* Query Parameter - Pass the key as a query parameter`ApiKey=`. Example: - `v1/spectroclusters?ApiKey=QMOI1ZVKVIoW6LM6uXqSWFPsjmt0juvl` -* Request Header - Pass the API Key as a HTTP request header in the following format: - * Key: ApiKey - * Value: API key copied from the Palette Console. E.g.: QMOI1ZVKVIoW6LM6uXqSWFPsjmt0juvl - -# Authorization Token - -* All requests must be authenticated with an API token that is passed using the HTTP request header `Authorization`. -* Users can use the [`/auth/authenticate`](/api/v1/auth) API to authenticate and obtain the authorization token by supplying their username and password. -* Every authorization token is valid for 15 min. -* To refresh the token use: [`GET /v1/auth/refresh/{token}`](/api/v1/auth) diff --git a/content/docs/08-user-management/1.5-new-user.md b/content/docs/08-user-management/1.5-new-user.md deleted file mode 100644 index 4b82d12bd7..0000000000 --- a/content/docs/08-user-management/1.5-new-user.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: "Create a New User" -metaTitle: "Create a New User in Palette " -metaDescription: "Create a new user in Palette " -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Overview - -The section guides you on how to create a user in Palette. - -
- -# Prerequisites - -- A [Palette account](https://console.spectrocloud.com). -- Tenant Admin access. - -< br /> - -# Create a New User - -To create a new user in Palette: - -1. Log in to Palette as a Tenant Admin. - - -2. Select **Users and Teams** from the left **Main Menu** and click on **+Create User** button. - - -3. Provide the following information to the **Create User** wizard: - * First Name - * Last Name - * Email - * Team(s) - - -4. Click on the **Confirm** button to complete the wizard. - - -## Validate - -* A display message will pop up confirming the user creation. - -* The validation can also be done from the Tenant console. Go to **Tenant Settings** from the **Left Main Menu**. - -* Click the **Users & Teams** tab from the **Left Menu**. This page will list all the users under the Tenant scope. - - -# Create Custom Role - -Use the following steps to create a custom resource role: - -1. Log in to Palette as Tenant Admin and select **Roles** from the left **Main Menu**. - - -2. Go to the **Resource Roles** tab from the top menu and click on the **+Create Resource Role** button to open the **Add New Role (Resource)** wizard. Fill out the following inputs: - * Name of the role. - * Assign permissions and operations. - - -3. Once the required permissions are selected, click the **Save** button. - - -4. To **Edit and Delete** the role from the role listing page, click the role to be deleted or edited to go to the role details page. - - -5. Click on **Delete Role or Edit Role** button to delete or edit the role respectively. - - -## Validate - -* A display message will pop up confirming the user creation. You can validate the user creation from the **Users & Teams** page in the Tenant Admin console. - -* The validation can also be done from the **Tenant console**. Go to **Tenant Settings** from the left **Main Menu**. - -* Click the **Roles** tab from the left **Main Menu** and click on the **Resource Roles** tab to find the new role name listed. - -* To **Reset Password** or **Delete** the user, click the user name and go to the **User Details** page. Then, click on **Delete** or **Reset Password** to perform the corresponding operation. - diff --git a/content/docs/08-user-management/1.8-project-association.md b/content/docs/08-user-management/1.8-project-association.md deleted file mode 100644 index 92ac32143b..0000000000 --- a/content/docs/08-user-management/1.8-project-association.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "Project Association" -metaTitle: "Associate a User or Team with a Project" -metaDescription: "Associate a User or Team with a Project" -hideToC: false -fullWidth: false ---- - -# Overview - -Associating a user or team with a specific project creates a clear distinction between who is allowed in the project and their access control permissions. By grouping resources together, you ensure that only designated members have access to and control over the project resources, preventing others from accidentally or intentionally modifying them. This improves resource accountability, reduces confusion and conflicts, and helps maintain the integrity and security of the project. - -User permissions are determined by the combination of their tenant and project roles, as well as any roles inherited from their team association. If a user is a *Tenant Admin*, they have admin permissions in all projects. A user with the *Project Viewer* role at the tenant level has *View* permissions for all projects. However, if a user or team has the *Project Viewer* role assigned to a specific project, they only have view access to that project. The extent of user permissions, either at the tenant or project level, determines the number of projects they can access. - -# Associate a User or Team - -To associate a user or team with a project, use the following steps. - -# Prerequisites - -* Tenant Admin access. - -* An available project. Check out the [Create a Project](/projects#createaproject) guide to learn how to create a project. - -* A user or a team. - -# Enablement - -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Select the **Tenant Admin** scope. - - -3. Navigate to the left **Main Menu** and select **Users & Teams**. - - -4. Select the tab of the resource you want to associate with a project, either a user or a team. - - -5. Click on the row of the user or team to display its overview page. - - -6. Select **Project Roles**. - - -7. A **drop-down Menu** containing all the available projects in the tenant is available. Select the project you want to associate with the project role. - - -8. Next, assign permissions to the project role. To learn more about each permission and available roles, check out the [Palette RBAC](/user-management/palette-rbac) documentation. - - -Click **Confirm** to create the project role and complete the project association process. - - -# Validate - -1. Have a user or a user assigned to a team log in to [Palette](https://console.spectrocloud.com). - -2. Ask the user to switch the scope to the project you associated their role with. - -The user will now be able to select the associated project and complete actions within the scope of their project role permissions. - - -
diff --git a/content/docs/08-user-management/2-palette-rbac.md b/content/docs/08-user-management/2-palette-rbac.md deleted file mode 100644 index 5c7ed0a58e..0000000000 --- a/content/docs/08-user-management/2-palette-rbac.md +++ /dev/null @@ -1,401 +0,0 @@ ---- -title: "Palette RBAC" -metaTitle: "Palette User Access using RBAC " -metaDescription: "Palette User Access control using RBAC" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -## Overview - -RBAC stands for Role-Based Access Control. RBAC allows a single user to have different types of access control based on the resource being accessed. RBAC is the scenario that allows the Tenant Admin to grant full and unrestricted access to some parts of the system and withhold it for some others. - -Palette enforces a very well-structured RBAC design on how to grant granular access to resources and their operations within our management console. We maintain precise Roles and Resource Access Control List. Role-based access control primarily focuses on assigning permissions to roles instead of individual users and then assigning these roles to users. Multiple roles can be assigned to a user, which defines the permitted actions on the resource. This module lists and enumerates all the roles available within the Palette console within specific scopes. - -Palette enables: - -* A role can have multiple permissions. We encourage custom role creation, coupling the wide range of Palette permissions. - -* Multiple roles can be assigned to a single user, defining the permitted actions on a Palette resource. - -## Palette RBAC Model - -The Palette RBAC Model, is based on the following three components: - - -* Scopes -* Permissions -* Roles - -### Scopes - -A Scope defines the resources on which the role has coverage. The scope will be either `Tenant` or `Project`. For example, a role within the scope project can operate within the projects. The combination of user and roles indicates the totality of the accessibility available to that user. Scopes are structured in a parent-child relationship. Each level of hierarchy makes the Scope more specific. The roles are assigned at any of these levels of Scope. The level you select determines how widely the role is applied. Lower levels inherit role permissions from higher levels. -![palette-rbac-scope.png](/palette-rbac-scope.png) - -The following are the major properties of Palette driven Scopes: - - -* Scopes control the visibility of the resource. - - -* Resource created in the higher scope will be visible in the lower scope as read-only. The cluster profiles created by a tenant will be available to all the projects created by that tenant. - - -* Resource Isolation: Resources within the same scope will be restricted to the respective scope entity. - * Cluster Profile created in project-1 will not be available in project-2 of the same tenant - - -* Resource with the same name can co-exist across scopes and will be distinguished with scope prefix (icon) - * A profile with the same name can be created in tenant and project scope. The resource will have the scope information, which helps to distinguish them. - - - -Palette resources can be allocated to roles under **Three Scopes**: - - - -* **System** (The system admin internal to Palette) - - -* **Tenant** - - -* **Project** - - -
- - -![A diagram of Palette's RBAC model](/user-management_palette-rback_palette-rbac-model.png) - -
- -### Permissions - - -Permissions determine the type of operations allowed on a resource. Permissions can be defined in the following format: - -`resourceKey.operation` - -Examples: - -* `cluster.create` -* `cluster.edit` -* `cluster.delete` - -Each permission has a defined scope. The role creation is based on scope, type and permissions. - -
- -#### Palette Permissions - -Palette has a wide range of permissions and these permissions can be combined in any combination as per the user requirements to create a role. If the Palette built-in roles does not meet the specific needs of your organization, custom roles can be created using different combination of these permissions. Just like built-in roles, you can assign custom roles to users or teams within a specific scope (Tenant or Project). Refer to the available set of permissions in the [Palette Resource Scope Matrix](/user-management/palette-rbac#resourcescopematrix). - -
-
- -### Roles -A Role is a collection of permissions. When a role is assigned to a user, it means all the permissions the role contains are assigned to that user. The Role will have a **Scope**. The Type signifies the creator's scope and the Scope signifies the role visibility. The permissions will be restricted to the permission's scope list based on the role's scope. The ProfileEditor will be visible under Tenant, but neither the Tenant nor the Project admins are allowed to modify the Project Scopes. - -
- -## Access Modes -* Tenant -* Project - -### Tenant -Tenant is an isolated workspace within the Palette. `Users` and `Teams` with specific `Roles` can be associated with the Tenant(s) you create. Palette provides a [wide set of permissions](/user-management/palette-rbac/tenant-scope-roles-permissions#globaltenantscope) under the scope of a Tenant. Everyone is a user and there should be at least one user with Tenant Admin privilege to control the product operations. -
- -### Project - -The Global Project Scope holds a group of resources, in a logical grouping, to a specific project. The project acts as a namespace for resource management. Users and Teams with specific roles can be associated with the project, cluster, or cluster profile you create. Users are members of a tenant who are assigned [project scope roles](/user-management/palette-rbac/project-scope-roles-permissions#globalprojectscope) that control their access within the platform. -
- -## Palette Specific (Default) Roles: - -Palette RBAC has several built-in roles that can be assigned to users and teams. Role assignments are the way you control access to Palette resources. -
- -### Tenant Scope Default Roles: - -The Global Tenant Scope holds all the tenant resources of Palette. The list of `Role` types within the `Tenant Scope` are as follows: -
- -1. [Tenant Administrator Role](/user-management/palette-rbac/tenant-scope-roles-permissions#tenantadmin) - - -2. [Tenant Viewer Role](/user-management/palette-rbac/tenant-scope-roles-permissions#tenantviewer) - - -3. [Tenant Project Admin Role](/user-management/palette-rbac/tenant-scope-roles-permissions#tenantprojectadmin) - - -4. [Tenant Cluster Profile Admin Role](/user-management/palette-rbac/tenant-scope-roles-permissions#tenantclusterprofileadmin) - - -3. [Tenant Role Admin Role](/user-management/palette-rbac/tenant-scope-roles-permissions#tenantroleadminrole) - - -4. [Tenant Team Admin Role](/user-management/palette-rbac/tenant-scope-roles-permissions#tenantteamadmin) - - -5. [Tenant User Admin Role](/user-management/palette-rbac/tenant-scope-roles-permissions#tenantuseradminrole) - - -
- -### Project Scope Default Roles: - -The Global Project Scope holds a group of resources in a logical grouping. Users and Teams with specific Roles can be associated with the Project(s) you create. Below is a list of Role types within the Project Scope built in to the Palette console. These Roles can neither be deleted nor edited. - -
- -1. [Project Administrator Role](/user-management/palette-rbac/project-scope-roles-permissions#projectadmin) - - -2. [Project Editor Role](/user-management/palette-rbac/project-scope-roles-permissions#projecteditor) - - -3. [Project Viewer Role](/user-management/palette-rbac/project-scope-roles-permissions#projectviewer) - - -4. [Cluster Profile Admin Role](/user-management/palette-rbac/project-scope-roles-permissions#clusterprofileadmin) - - -5. [Cluster Profile Editor Role](/user-management/palette-rbac/project-scope-roles-permissions#clusterprofileeditor) - - -6. [Cluster Profile Viewer Role](/user-management/palette-rbac/project-scope-roles-permissions#clusterprofileviewer) - - -7. [Cluster Admin Role](/user-management/palette-rbac/project-scope-roles-permissions#clusterprofileadmin) - - -8. [Cluster Editor Role](/user-management/palette-rbac/project-scope-roles-permissions#clustereditor) - - -9. [Cluster Viewer Role](/user-management/palette-rbac/project-scope-roles-permissions#clusterviewer) - - -10. [Cluster Account Admin Role](/user-management/palette-rbac/project-scope-roles-permissions#clusteraccountadmin) - - -11. [Cluster Account Editor Role](/user-management/palette-rbac/project-scope-roles-permissions#clusteraccounteditor) - - -12. [Cluster Account Viewer Role](/user-management/palette-rbac/project-scope-roles-permissions#clusteraccountviewer) - - -13. [Workspace Admin Role](/user-management/palette-rbac/project-scope-roles-permissions#workspaceadmin) - - -14. [Workspace Operator Role](/user-management/palette-rbac/project-scope-roles-permissions#workspaceoperator) - - - -## Assign Palette Specific Roles to Users - -The Default (built-in) roles of Palette can be directly assigned to a user. The roles needs to be assigned based on who needs the access. The roles can be assigned to `Users` or `Teams`. The appropriate role needs to be selected from the list of several built-in roles. If the built-in roles are not meeting the specific needs of your organization, you can [create your own custom roles](/user-management/palette-rbac#customrolesinpalette). - -
- -1. Login to Palette console as `Tenant Admin`. - - -2. Select **Users and Teams** from the left ribbon menu to list the [created users](/user-management#rolesandpermissions). - - -3. From the list of users **select the user** to be assigned with role to open the role addition wizard. - - -4. Make the choice of role category from the top tabs: - * Project Role - * Tenant Role - * Workspace Role - - -5. Once the choice of category is made Click on **+ New Role**. - - -6. In the **Add Roles to User-name** wizard, select the project name from the drop down and select the roles from the list. - - -7. Confirm to complete the wizard. - - -8. The role user association can be edited and deleted from the `kebab menu`. - -
- -## Custom Roles in Palette -Palette enables the users to have custom Roles. These custom roles can be created either under the Tenant Scope or the Project Scope, but not both. These roles need to have unique names for identification. The names are case-insensitive. To create custom role in Palette Platform, we need to understand the components and operations in the Palette Platform enumerated as a `Resource Scope Matrix` as below: - -
-
- -## Resource Scope Matrix - -|Component|Resource Key|Operations|Scope|Usage| -|---------|------------|----------|-----|-----| -|API Key|apiKey|create, get, list, update, delete|Tenant|API Key related operations| -|Appliance|edgehost|create,get,list,update,delete|Project|Edge appliance deployment and management| -|Audit|audit|get, list|Tenant Project|Audit log access| -|Cloud Account|cloudaccount|create, get,list,update,delete|Tenant Project|Cloud account creation and management| -|Cloud Config |cloudconfig|create,update,delete,get,list|Project|Cluster level cloud configuration | -|Cluster|cluster|create,get,list,update,delete|Project|Creation and management of Palette workload clusters| -|Cluster Profile|clusterProfile|update,publish,delete,create,get,list|Tenant Project|Creation and management of Palette cluster profiles| -|DNS Mapping|dnsMapping|create,get,list,update,delete|Project|Domain Name Server mapping services creation and management| -|Location|location|create,get,list,update,delete|Tenant Project| location services related to backup and restore| -|Macro|macro|create,get,list,update,delete|Tenant Project|Key value management for Palette resources | -|Machine|machine|create,get,list,delete,update|Project|Palette node pool management| -|Private Gateway|privateGateway|create,get,list,update,delete|Tenant|PCG creation and maintenance|ack Registry creation and management| -|Registry|packRegistry|create, get, list, update, delete|Tenant|Creation and management of registries| -|Role|role|create,update,delete,get,list|Tenant|creation and management of Palette roles | -|Project|project|create,get,list,delete,update|Project|Creation and management of Palette roles | -|Workspace|workspace|create,list,update,delete,backup,restore,get|Project|Workspace operations including backup and restore|| -|Team|team|create,list,update,delete,get|Tenant|Creation and management of user teams in Palette| -|User|user|create,update,delete,get,list|Tenant|Creation and management of users in Palette| - - - -## Create Custom Role in Palette -To create a custom role, login to the Palette console as `Tenant Admin`: - - -1. Go to **Roles** from the left ribbon menu - - -2. Click **Create Role**, to open the `Add New Role` wizard - - -3. Give a `Role Name` of user choice. - - -4. Clicking on a `Role Name` will show the permissions available under this role. `Default Roles` (built-in into the Palette system) cannot be edited or deleted. Select the scope from the available options: - - * Tenant - * Project - - -5. Make your choice of **Permissions** and **Operations** to create a custom Palette role. After entering the `Role Name`, use the checkboxes to select the permissions. The checkbox list can be expanded to fine-tune the required permissions. - - -6. The created role can be viewed under the `Global Roles` list - - -7. Click on the name of the role to: - - * `View` - * `Edit Role` - * `Delete Role` - -
- -**Example:** - -If the user is creating a role under the Tenant scope for API Key operations, select the `API Key Permissions` and then from the drop-down menu of that permission, check (tick) the required API operations listed under API Key permissions. Similarly, several permissions can be combined to create a **Custom Role**. [The created role can be assigned to an existing or new user.](/user-management#rolesandpermissions) - -
-
- -### Assign Custom Roles to Users - -1. Login to Palette console as `Tenant Admin`. - - -2. Select **Users and Teams** from the left ribbon menu to list the [created users](/user-management#rolesandpermissions). - - -3. From the list of users **select the user** to be assigned with role to open the role addition wizard. - - -4. Make the choice of role category from the top tabs: - * Project Role - * Tenant Role - * Workspace Role - - -5. Once the choice of category is to br made by clicking on **+ New Role**. - - -6. In the **Add Roles to User-name** wizard, select the project name from the drop down and select the roles from the list. - - -7. Confirm to complete the wizard. - - -8. The role user association can be edited and deleted from the `kebab menu`. - -## Example Scenario: - -Palette has a number of permissions that you can potentially include in your custom role. Here is an example scenario enumerating the minimum permissions required for a user to **Create a Cluster** in Palette platform. - -
- -#### 1. Decide the actions, scopes and permissions required by the user to Create a Cluster. - -The role creation is done from the `Tenant Admin` console. For the above scenario, two roles needs to be created under `Project` and `Tenant` scope and attached to the user. - -
- -#### 2. Identify the Permissions required under `Project Scope`: - - * Add the minimum `Project` management permissions - * project.list - * project.get - - - * Add the minimum permissions required for `Cloud Account` creation - * cloudaccount.create - * cloudaccount.get - * cloudaccount.list - - - * Add the `ClusterProfile` permissions - * clusterProfile.create - * clusterProfile.delete - * clusterProfile.get - * clusterProfile.list - * clusterProfile.publish - * clusterProfile.update - - - * Add the `Cluster` permissions (for creating and listing the cluster) - * cluster.create - * cluster.list - * cluster.get - - - * Add the `Location` permission. - * location.list - - - * Add the `Cloud Configuration` permissions for node pool management - * cloudconfig.create - - -#### 3. Identify the Permissions required under `Tenant Scope`: - -To attach the Packs and Integrations from Palette public repository, add the `Registry Permissions`. -The minimum permission required in this scenario is: - - * packRegistry.get - - -#### 4. Attach Roles to the User and Create the Cluster - -* Once both the roles are created with the above scopes, attach them to the user. - -* Login to Palette console using the user credentials to create the cluster profile and the cluster. - - - -
-
diff --git a/content/docs/08-user-management/2-palette-rbac/1-tenant-scope-roles-permissions.md b/content/docs/08-user-management/2-palette-rbac/1-tenant-scope-roles-permissions.md deleted file mode 100644 index 1dd266e82b..0000000000 --- a/content/docs/08-user-management/2-palette-rbac/1-tenant-scope-roles-permissions.md +++ /dev/null @@ -1,345 +0,0 @@ ---- -title: "Tenant Scope Roles and Permissions" -metaTitle: "Tenant Roles" -metaDescription: "The list of Global Tenant Roles under Tenant Scope" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Global Tenant Scope - -Tenant is an isolated workspace within the Palette Console. Users and teams with specific roles can be associated with the [tenants](/glossary-all#organization) and [projects](/glossary-all#project) you create. - -Each user is assigned a role and permissions, which apply to the scopes, resources, and resourceKey. The Permissions format is `resourceKey.operation`, where resourceKey refers to resource or the API functionality, and Operation refers to the permitted action or activity. - -To view the list of the predefined roles and permissions, ensure you are in the project scope **Tenant**. Next, navigate to the left **Main Menu** and click on **Tenant Settings** > **Roles**, and you will find the list of **Global Roles**. If you need to extend permissions, create a custom role by using the [Create Role](/user-management/palette-rbac#createcustomroleinpalette) option. - -Below is the list of Roles and Permissions that already predefined for the Global Tenant Scope. - -
- - - -All users can view tags assigned to a resource. In technical terms, all users inherit the permission `tag.get` by default. - - - -
- -# Tenants ----------------------------- - -|Role Names | Description | -|---|---| -|Tenant Admin |Allows the user to create projects and manage projects within the tenant, covered under all operations related to projects| -|Tenant Viewer| Provides a read only access to all the project resources| -|Tenant Project Admin|The role with complete access to an existing project| - -The table enlists the role wise resourceKeys and Operations that are predefined under the Global Tenant Scope: - -
-
- - - - - -
- -## Tenant Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **apiKey** | √ | √ | √ | √ | √ | | | | | -| **audit** | | | √ | √ | | | | | | -| **cloudaccount** | √ | √ | √ | √ | √ | | | | | -| **cloudconfig** | √ | √ | √ | √ | √ | | | | | -| **cluster** | √ | √ | √ | √ | √ | √ | | | | -| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | -| **clusterRbac** | √ | √ | √ | √ | √ | | | | | -| **dnsMapping** | √ | √ | √ | √ | √ | | | | | -| **edgehost** | √ | √ | √ | √ | √ | | | | | -| **location** | √ | √ | √ | √ | √ | | | | | -| **machine** | √ | √ | √ | √ | √ | | | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | √ | √ | √ | √ | √ | | | | | -| **privateGateway** | √ | √ | √ | √ | √ | | | | | -| **project** | √ | √ | √ | √ | √ | | | | | -| **role** | √ | √ | √ | √ | √ | | | | | -| **sshKey** | √ | √ | √ | √ | √ | | | | | -| **team** | √ | √ | √ | √ | √ | | | | | -| **tag** | | | | | √ | | | | | -| **user** | √ | √ | √ | √ | √ | | | | | -| **workspace** | √ | √ | √ | √ | √ | | | √ | √ | - -
-
-
- -
- - - -## Tenant Viewer - -
- - - - - -
resourceKeysOperations
-
- - | | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | - | ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | - | **apiKey** | | | √ | √ | | | | | | - | **audit** | | | √ | √ | | | | | | - | **cloudaccount** | | | √ | √ | | | | | | - | **cloudconfig** | | | √ | √ | | | | | | - | **cluster** | | | √ | √ | | | | | | - | **clusterProfile** | | | √ | √ | | | | | | - | **clusterRbac** | | | √ | √ | | | | | | - | **dnsMapping** | | | √ | √ | | | | | | - | **edgehost** | | | √ | √ | | | | | | - | **location** | | | √ | √ | | | | | | - | **machine** | | | √ | √ | | | | | | - | **macro** | | | √ | √ | | | | | | - | **packRegistry** | | | √ | √ | | | | | | - | **privateGateway** | | | √ | √ | | | | | | - | **project** | | | √ | √ | | | | | | - | **role** | | | √ | √ | | | | | | - | **sshKey** | | | √ | √ | | | | | | - | **team** | | | √ | √ | | | | | | - | **user** | | | √ | √ | | | | | | - | **workspace** | | | √ | √ | | | | | | - - -
- - -
- -## Tenant Project Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Get** | **Delete** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ------- | ---------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **apiKey** | | √ | | √ | | | | | | -| **audit** | | √ | | √ | | | | | | -| **cloudaccount** | √ | √ | √ | √ | √ | | | | | -| **cloudconfig** | √ | √ | √ | √ | √ | | | | | -| **cluster** | √ | √ | √ | √ | √ | √ | | | | -| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | -| **clusterRbac** | √ | √ | √ | √ | √ | | | | | -| **dnsMapping** | √ | √ | √ | √ | √ | | | | | -| **edgehost** | √ | √ | √ | √ | √ | | | | | -| **location** | √ | √ | √ | √ | √ | | | | | -| **machine** | √ | √ | √ | √ | √ | | | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | √ | √ | √ | √ | √ | | | | | -| **privateGateway** | √ | √ | √ | √ | √ | | | | | -| **project** | √ | √ | √ | √ | √ | | | | | -| **sshKey** | √ | √ | √ | √ | √ | | | | | -| **tag** | | | | | √ | | | | | -| **workspace** | √ | √ | √ | √ | √ | | | √ | √ | - - -
-
- -
-
- -## Cluster Profile - ----------------------------- - -|Role Names | Description | -|---|---| -|Tenant Cluster Profile Admin | A role which has complete access to all the `Cluster Profile` related operations| - -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **tag** | | | | | √ | | | | | -
-
-
- -## Tenant Role - ----------------------------- - -|Role Names | Description | -|---|---| -|Tenant Role Admin | A role which has complete access to all the `Role` related perations | - -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| -------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **role** | √ | √ | √ | √ | √ | | | | | - - -
-
-
- -## Tenant Team - ----------------------------- - -|Role Names | Description | -|---|---| -|Tenant Team Admin | A role which has complete access to all the `Team` related operations | - -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ---------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **apiKey** | | | √ | √ | | | | | | -| **audit** | | | √ | √ | | | | | | -| **team** | √ | √ | √ | √ | √ | | | | | -| **user** | | | √ | √ | | | | | | - - -
-
-
- -## Tenant User - ----------------------------- - -|Role Names | Description | -|---|---| -|Tenant User Admin Role|A role which has complete access to all the `User` related operations| - - - -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ---------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **apiKey** | √ | √ | √ | √ | √ | | | | | -| **audit** | | | √ | √ | | | | | | -| **user** | √ | √ | √ | √ | √ | | | | - - -
- - -# Tenants Cluster Group ----------------------------- - -|Role Names | Description | -|---|---| -|Tenants Cluster Group Admin |Allows the user to create and manage cluster groups within the tenant, covered under all operations related to cluster groups| -|Tenants Cluster Group Editor|The role can perform edit operations related to a cluster group, but the user is not able to create or delete a cluster group| -|Tenants Cluster Group Viewer|Provides a read only access to all the cluster group resources| - -The table lists role resourceKeys and operations that are predefined under the Global Tenant Scope: - -
-
- - - - - -
- -## Tenant Cluster Group Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cluster** | | | √ | √ | | | | | | -| **clusterGroup** | √ | √ | √ | √ | √ | | | | | -| **tag** | | | | | √ | | | | | - - -
-
-
- -
- - - -## Tenant Cluster Group Editor - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cluster** | | | √ | √ | | | | | | -| **clusterGroup** | | | √ | √ | √ | | | | | -| **tag** | | | | | √ | | | | | - - - -
- - -
- -## Tenant Cluster Group Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cluster** | | | √ | √ | | | | | | -| **clusterGroup** | | | √ | √ | | | | | | - - -
-
- -
- - diff --git a/content/docs/08-user-management/2-palette-rbac/2-project-scope-roles-permissions.md b/content/docs/08-user-management/2-palette-rbac/2-project-scope-roles-permissions.md deleted file mode 100644 index b56078a0f2..0000000000 --- a/content/docs/08-user-management/2-palette-rbac/2-project-scope-roles-permissions.md +++ /dev/null @@ -1,830 +0,0 @@ ---- -title: "Project Scope Roles and Permissions" -metaTitle: "Project Roles" -metaDescription: "The list of Global Project Roles under Project Scope" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Global Project Scope - -The Global Project Scope holds a group of resources, in a logical grouping, to a specific project. Users and Teams with specific Roles can be associated with the Project, Cluster, or Cluster Profile you create. - -Palette has adopted the security principle of least privilege. Each user is assigned Roles and Permissions to the Scopes, Resources, and Components. The Permissions format is `resourceKey.operation`, where **resourceKey** refers to a resource or the API functionality, and *operation* refers to the action or activity allowed. - -To view a list of the predefined roles and permissions, go to **Tenant Settings** > **Roles**, and you will find the list of **Global Roles**. If you need to extend your permissions, use the **Create Role** option. - -Below is the predefined list of Roles and Permissions for the Global Project Scope: - -
- - -# App Deployment --------------------------------- - -|Role Name | Description | -|---|---| -|App Deployment Admin |Provides administrative privilege to perform all the App operations on App resources. | -|App Deployment Editor|Allows the user to perform edit operations on an App but not to create or delete an App.| -|App Deployment Viewer|Allows the user to view all the App resources but not to make modifications.| - -
-
- - - -
- -## App Deployment Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **appDeployment** | √ | √ | √ | √ | √ | | | | | -| **appProfile** | | | √ | √ | | | | | | -| **cloudaccount** | | | √ | √ | | | | | | -| **clusterGroup** | | | √ | √ | | | | | | -| **location** | √ | √ | √ | √ | √ | | | | | -| **machine** | | | √ | √ | | | | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **project** | | | √ | √ | | | | | | -| **sshKey** | √ | √ | √ | √ | √ | | | | | -| **tag** | | | | | √ | | | | | -| **virtualCloudconfig**| √ | √ | √ | √ | √ | | | | | -| **virtualCluster** | √ | √ | √ | √ | √ | | | | | - - - -
- - -
- -## App Deployment Editor - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **appDeployment** | | | √ | √ | √ | | | | | -| **appProfile** | | | √ | √ | | | | | | -| **cloudaccount** | | | √ | √ | | | | | | -| **clusterGroup** | | | √ | √ | | | | | | -| **location** | | | √ | √ | √ | | | | | -| **machine** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **project** | | | √ | √ | | | | | | -| **sshKey** | | | √ | √ | √ | | | | | -| **tag** | | | | | √ | | | | | -| **virtualCloudconfig**| | | √ | √ | √ | | | | | -| **virtualCluster** | | | √ | √ | √ | | | | | - -
- - -
- - -
- -## App Deployment Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **appDeployment** | | | √ | √ | | | | | | -| **appProfile** | | | √ | √ | | | | | | -| **cloudaccount** | | | √ | √ | | | | | | -| **clusterGroup** | | | √ | √ | | | | | | -| **location** | | | √ | √ | | | | | | -| **machine** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **project** | | | √ | √ | | | | | | -| **sshKey** | | | √ | √ | | | | | | -| **virtualCloudconfig**| | | √ | √ | | | | | | -| **virtualCluster** | | | √ | √ | | | | | | - - -
-
- -
- - -# App Profile --------------------------------- - -|Role Names | Description | -|---|---| -|App Profile Admin |Provides administrative privilege to perform all the App operations on App profile resources. | -|App Profile Editor|Allows the user to perform edit operations on App profiles but not to create or delete an App profile.| -|App Profile Viewer|Allows the user to view all the App profile resources but not to modify them.| - -
-
- - - -
- -## App Profile Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **appProfile** | √ | √ | √ | √ | √ | | | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **project** | | | √ | √ | | | | | | - -
- - -
- -## App Profile Editor - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **appProfile** | | | √ | √ | √ | | | | | -| **macro** | | | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **project** | | | √ | √ | | | | | | - -
- - -
- - -
- -## App Profile Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **appProfile** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **project** | | | √ | √ | | | | | | - - -
-
- -
- - -# Project --------------------------------- - -|Role Names | Description | -|---|---| -|Project Admin |The Project Admin role is a closure of all the project operations. It is a administrative privilege for the project resources | -|Project Editor|The Project Editor role can perform edit operations within a project, but the user is not able to create or delete a project| -|Project Viewer|The Project Viewer will be able to view all the resources within a project, but not privileged to make modifications| - -
-
- - - -
- -## Project Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **audit** | | | √ | √ | | | | | | -| **cloudaccount** | √ | √ | √ | √ | √ | | | | | -| **cloudconfig** | √ | √ | √ | √ | √ | | | | | -| **cluster** | √ | √ | √ | √ | √ | √ | | | | -| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | -| **clusterRbac** | √ | √ | √ | √ | √ | | | | | -| **dnsMapping** | √ | √ | √ | √ | √ | | | | | -| **edgehost** | √ | √ | √ | √ | √ | | | | | -| **location** | √ | √ | √ | √ | √ | | | | | -| **machine** | √ | √ | √ | √ | √ | | | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **privateGateway** | √ | √ | √ | √ | √ | | | | | -| **project** | | | √ | √ | √ | | | | | -| **sshKey** | √ | √ | √ | √ | √ | | | | | -| **tag** | | | | | √ | | | | | -| **workspace** | √ | √ | √ | √ | √ | | | √ | √ | - -
- - -
- -## Project Editor - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **audit** | | | √ | √ | | | | | | -| **cloudaccount** | | | √ | √ | √ | | | | | -| **cloudconfig** | √ | | √ | √ | √ | | | | | -| **cluster** | | | √ | √ | √ | | | | | -| **clusterProfile** | | | √ | √ | √ | | √ | | | -| **clusterRbac** | | | √ | √ | √ | | | | | -| **dnsMapping** | | | √ | √ | √ | | | | | -| **edgehost** | | | √ | √ | √ | | | | | -| **location** | | | √ | √ | √ | | | | | -| **machine** | | √ | √ | √ | √ | | | | | -| **macro** | | | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **privateGateway** | | | √ | √ | √ | | | | | -| **project** | | | √ | √ | √ | | | | | -| **sshKey** | | | √ | √ | √ | | | | | -| **tag** | | | | | √ | | | | | -| **workspace** | | | √ | √ | √ | | | √ | √ | - -
- - -
- - -
- -## Project Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **audit** | √ | | | | √ | | | | | -| **cloudaccount** | √ | | | | √ | | | | | -| **cloudconfig** | √ | | | | √ | | | | | -| **cluster** | √ | | | | √ | | | | | -| **clusterProfile** | √ | | | | √ | | | | | -| **dnsMapping** | √ | | | | √ | | | | | -| **edgehost** | √ | | | | √ | | | | | -| **location** | √ | | | | √ | | | | | -| **machine** | √ | | | | √ | | | | | -| **macro** | √ | | | | √ | | | | | -| **packRegistry** | √ | | | | √ | | | | | -| **privateGateway** | √ | | | | √ | | | | | -| **project** | √ | | | | √ | | | | | -| **sshKey** | √ | | | | √ | | | | | -| **workspace** | √ | | | | √ | | | | | - - -
-
- -
- - -# Cluster Profile ------------------------------ - -The user with these permissions can manage the Cluster Profiles within a project. - -
- -|Role Names| Description | -|---|---| -|Cluster Profile Admin |Cluster Profile Admin role has admin privileges to all the cluster profile operations| -|Cluster Profile Editor|Cluster Profile Editor role has privileges to edit and list operations on the cluster profile| -|Cluster Profile Viewer|Cluster Profile Viewer role has read-only privileges to cluster profiles| - -
- - - -
- -## Cluster Profile Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | √ | √ | | | | | | | | -| **tag** | | | | | √ | | | | | - -
- -
- - -
- -## Cluster Profile Editor - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterProfile** | | | √ | √ | √ | | √ | | | -| **macro** | | | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **tag** | | | | | √ | | | | | - -
- -
- - -
- -## Cluster Profile Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterProfile** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | | | | | | -| **packRegistry** | | | √ | √ | | | | | | - -
- -
-
- -
- -# Cluster --------------------------------------- -
- -
- -|Role Names| Description | -|---|---| -|Cluster Admin | A cluster admin in Project scope has all the privileges related to cluster operation| -|Cluster Editor | A cluster editor in Project scope has the privileges to update, delete,get and list cluster resources. This role is not privileged for cluster creation | -|Cluster Viewer | A cluster viewer in Project scope is a read-only privilege to cluster operations | - -
- - - - - -
- -## Cluster Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | | | √ | √ | | | | | | -| **cloudconfig** | √ | √ | √ | √ | √ | | | | | -| **cluster** | √ | √ | √ | √ | √ | √ | | | | -| **clusterProfile** | √ | √ | | | | | | | | -| **clusterRbac** | √ | √ | √ | √ | √ | | | | | -| **dnsMapping** | √ | √ | √ | √ | √ | | | | | -| **edgehost** | √ | √ | √ | √ | √ | | | | | -| **location** | √ | √ | √ | √ | √ | | | | | -| **machine** | √ | √ | √ | √ | √ | | | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | √ | √ | | | | | | | | -| **privateGateway** | √ | √ | | | | | | | | -| **tag** | | | | | √ | | | | | -| **sshKey** | √ | √ | √ | √ | √ | | | | | - -
- -
- - -
- -## Cluster Editor -
- - - - - -
resourceKeysOperations
-
- - -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | | | √ | √ | | | | | | -| **cloudconfig** | | | √ | √ | √ | | | | | -| **cluster** | | | √ | √ | √ | | | | | -| **clusterProfile** | | | √ | √ | | | | | | -| **clusterRbac** | | | √ | √ | √ | | | | | -| **dnsMapping** | | | √ | √ | √ | | | | | -| **edgehost** | | | √ | √ | √ | | | | | -| **location** | | | √ | √ | √ | | | | | -| **machine** | | √ | √ | √ | √ | | | | | -| **macro** | | | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **privateGateway** | | | √ | √ | | | | | | -| **tag** | | | | | √ | | | | | -| **sshKey** | | | √ | √ | √ | | | | | - -
- -
- - -
- -## Cluster Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | | | √ | √ | | | | | | -| **cloudconfig** | | | √ | √ | | | | | | -| **cluster** | | | √ | √ | | | | | | -| **clusterProfile** | | | √ | √ | | | | | | -| **clusterRbac** | | | √ | √ | | | | | | -| **dnsMapping** | | | √ | √ | | | | | | -| **edgehost** | | | √ | √ | | | | | | -| **location** | | | √ | √ | | | | | | -| **machine** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **privateGateway** | | | √ | √ | | | | | | -| **sshKey** | | | √ | √ | | | | | | - -
- -
-
- -
- -# Cloud Account ------------------------------ - -
- -|Role Names| Description | -|---|---| -|Cluster Account Admin | An administrative access to cloud account operations| -|Cluster Account Editor | An editor access to cloud cloud account operations | -|Cluster Account Viewer | A read-only role for cloud account operations | - -
- - - - - -
- -## Cluster Account Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ---------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | √ | √ | √ | √ | √ | | | | | - -
- -
- - -
- -## Cluster Account Editor -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ---------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | | | √ | √ | √ | | | | | - -
- -
- - -
- -## Cluster Account Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ---------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | | | √ | √ | | | | | | - -
- -
-
- -# Workspace ------------------------------------- - -
- -|Role Names| Description | -|---|---| -|Workspace Admin | Administrator role to workspace operations| -|Workspace Editor | Editor role to workspace operations | - -
- - - - -
- -## Workspace Admin -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **workspace** | √ | √ | √ | √ | √ | | | √ | √ | - - -
- -
- - -
- -## Workspace Operator - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **workspace** | | | √ | √ | | | | √ | √ | - -
-
-
- -
-
- - -# Virtual Cluster --------------------------------- - -|Role Names | Description | -|---|---| -|Virtual Cluster Admin |Provides administrative privilege to perform all virtual cluster operations on App resources.| -|Virtual Cluster Editor|Allows the user to perform edit operations on a virtual cluster but not to create or delete a virtual cluster.| -|Virtual Cluster Viewer|Allows the user to view all the virtual cluster resources but not to modify them.| - -
-
- - - -
- -## Virtual Cluster Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterGroup** | | | √ | √ | | | | | | -| **location** | | | √ | √ | | | | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **project** | | | √ | √ | | | | | | -| **tag** | | | | | √ | | | | | -| **virtualCloudconfig**| √ | √ | √ | √ | √ | | | | | -| **virtualCluster** | √ | √ | √ | √ | √ | | | | | - - - -
- - -
- -## Virtual Cluster Editor - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterGroup** | | | √ | √ | | | | | | -| **location** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | √ | | | | | -| **project** | | | √ | √ | | | | | | -| **tag** | | | | | √ | | | | | -| **virtualCloudconfig**| | | √ | √ | √ | | | | | -| **virtualCluster** | | | √ | √ | √ | | | | | - -
- - -
- - -
- -## Virtual Cluster Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterGroup** | | | √ | √ | | | | | | -| **location** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | | | | | | -| **project** | | | √ | √ | | | | | | -| **virtualCloudconfig**| | | √ | √ | | | | | | -| **virtualCluster** | | | √ | √ | | | | | | - -
- - -
-
- -
- -
-
-
diff --git a/content/docs/08-user-management/2-palette-rbac/3-resource-scope-roles-permissions.md b/content/docs/08-user-management/2-palette-rbac/3-resource-scope-roles-permissions.md deleted file mode 100644 index a5794033f5..0000000000 --- a/content/docs/08-user-management/2-palette-rbac/3-resource-scope-roles-permissions.md +++ /dev/null @@ -1,304 +0,0 @@ ---- -title: "Palette Resource Roles" -metaTitle: "Palette Global and Custom Resource Roles " -metaDescription: "Palette contains global resource roles and supports the ability to create custom resource roles." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Overview - -Palette support two types of resource roles, global resource roles and custom resource roles: - -
- -* Global Resource Roles are a set of roles built in and available to you. - -* Custom Resource Roles, are roles you can create in Palette using a set of permissions and operations. - -To learn how to create a custom role. Review the [Create Custom Role](/user-management/palette-rbac/resource-scope-roles-permissions#palettecustomresourceroles) guide. - - -# Palette Global Resource Roles - -Palette provides the following built-in global resource roles: - -
- -* [Cluster](/user-management/palette-rbac/resource-scope-roles-permissions#cluster) - * Resource Cluster Admin - - * Resource Cluster Editor - - * Resource Cluster Viewer - -* [Cluster Profile](/user-management/palette-rbac/resource-scope-roles-permissions#clusterprofile) - - * Resource Cluster Profile Admin - - * Resource Cluster Profile Editor - - * Resource Cluster Profile Viewer - - -
- -## Cluster - -
- -|Role Names| Description | -|---|---| -|Resource Cluster Admin | A cluster admin in Project scope has all the privileges related to cluster operation| -|Resource Cluster Editor | A cluster editor in Project scope has the privileges to update, delete,get and list cluster resources. This role is not privileged for cluster creation | -|Resource Cluster Viewer | A cluster viewer in Project scope is a read-only privilege to cluster operations | - -
- - - - - -
- -### Resource Cluster Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | | | √ | √ | | | | | | -| **cloudconfig** | √ | √ | √ | √ | √ | | | | | -| **cluster** | √ | √ | √ | √ | √ | √ | | | | -| **clusterProfile** | √ | √ | | | | | | | | -| **clusterRbac** | √ | √ | √ | √ | √ | | | | | -| **dnsMapping** | √ | √ | √ | √ | √ | | | | | -| **edgehost** | √ | √ | √ | √ | √ | | | | | -| **location** | √ | √ | √ | √ | √ | | | | | -| **machine** | √ | √ | √ | √ | √ | | | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | √ | √ | | | | | | | | -| **privateGateway** | √ | √ | | | | | | | | -| **sshKey** | √ | √ | √ | √ | √ | | | | | - -
- -
- - -
- -### Resource Cluster Editor -
- - - - - -
resourceKeysOperations
-
- - -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | | | √ | √ | | | | | | -| **cloudconfig** | | | √ | √ | √ | | | | | -| **cluster** | | | √ | √ | √ | | | | | -| **clusterProfile** | | | √ | √ | | | | | | -| **clusterRbac** | | | √ | √ | √ | | | | | -| **dnsMapping** | | | √ | √ | √ | | | | | -| **edgehost** | | | √ | √ | √ | | | | | -| **location** | | | √ | √ | √ | | | | | -| **machine** | | √ | √ | √ | √ | | | | | -| **macro** | | | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **privateGateway** | | | √ | √ | | | | | | -| **sshKey** | | | √ | √ | √ | | | | | - -
- -
- - -
- -### Resource Cluster Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | | | √ | √ | | | | | | -| **cloudconfig** | | | √ | √ | | | | | | -| **cluster** | | | √ | √ | | | | | | -| **clusterProfile** | | | √ | √ | | | | | | -| **clusterRbac** | | | √ | √ | | | | | | -| **dnsMapping** | | | √ | √ | | | | | | -| **edgehost** | | | √ | √ | | | | | | -| **location** | | | √ | √ | | | | | | -| **machine** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | | | | | | -| **packRegistry** | | | √ | √ | | | | | | -| **privateGateway** | | | √ | √ | | | | | | -| **sshKey** | | | √ | √ | | | | | | - -
- -
-
- -
- - -## Cluster Profile - - -The user with these permissions can manage the Cluster Profiles within a project. - -
- -|Role Names| Description | -|---|---| -|Cluster Profile Admin |Cluster Profile Admin role has admin privileges to all the cluster profile operations| -|Cluster Profile Editor|Cluster Profile Editor role has privileges to edit and list operations on the cluster profile| -|Cluster Profile Viewer|Cluster Profile Viewer role has read-only privileges to cluster profiles| - -
- - - -
- -### Resource Cluster Profile Admin - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | -| **macro** | √ | √ | √ | √ | √ | | | | | -| **packRegistry** | √ | √ | | | | | | | | - -
- -
- - -
- -### Resource Cluster Profile Editor - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterProfile** | | | √ | √ | √ | | √ | | | -| **macro** | | | √ | √ | √ | | | | | -| **packRegistry** | | | √ | √ | | | | | | - -
- -
- - -
- -### Resource Cluster Profile Viewer - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **clusterProfile** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | | | | | | -| **packRegistry** | | | √ | √ | | | | | | - -
- -
-
- -
- - - -# Palette Custom Resource Roles - -
- -The following is a list of platform permissions and operations supported by Palette. Use these permissions to [create custom role](/user-management/new-user#createcustomrole) to control the cluster access. For every **Resource Keys** available **operations** can be added as per your requirements. - -
- -## List of Custom Permissions - -
- - - - - -
resourceKeysOperations
-
- -| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | -| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | -| **cloudaccount** | | | √ | √ | | | | | | -| **cloudconfig** | | √ | √ | √ | √ | | | | | -| **cluster** | | √ | √ | √ | √ | | | | | -| **clusterProfile** | | √ | √ | √ | √ | | √ | | | -| **dnsMapping** | | | √ | √ | | | | | | -| **location** | | | √ | √ | | | | | | -| **machine** | | | √ | √ | | | | | | -| **macro** | | | √ | √ | | | | | | -| **packRegistry** | | | √ | √ | | | | | | - - -## Resources - -[Resource Scope Matrix](/user-management/palette-rbac#resourcescopematrix) - - diff --git a/content/docs/08-user-management/3-palette-resource-limits.md b/content/docs/08-user-management/3-palette-resource-limits.md deleted file mode 100644 index 26aa187482..0000000000 --- a/content/docs/08-user-management/3-palette-resource-limits.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Palette Resource Limits" -metaTitle: "Default Palette Resource Limits" -metaDescription: "Understand the default resource limits for Palette and learn how to set resource limits for your Palette tenant." -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; - -# Default Palette Resource Limits - - -Tenant admins can set and update resource limits for Palette. The resource limits determine the maximum number of resources that can be created in Palette. The resource limits are set at the tenant level and apply to all projects in the tenant. - -The following table lists the default resource limits for Palette: - -|**Resources** | **Max Limit** | **Scope** | -|--------------------|----------------------| ---- | -|Users | 300 | Tenant| -|Teams | 100 | Tenant| -|Projects | 50 | Tenant | -|Workspaces | 50 | Tenant | -|Roles | 100 | Tenant | -|Cloud Accounts | 200 | Tenant | -|Cluster Profiles | 200 | Tenant | -|Registries | 50 | Tenant | -|Private Gateway | 50 | Tenant | -|API Keys | 20 | User | -|Backup Locations | 100 | Tenant | -|Certificates | 20 | Tenant | -|Macros | 200 | Project| -|SSH Keys | 300 | Tenant | -|Alerts or Webhook | 100 | Project| -|Clusters | 10,000 | Tenant | -|Edge Hosts | 200 | Tenant | - -# Set Resource Limit - -Use the following steps to set or update resource limits for your Palette tenant. - -## Prerequisites - -* You must have access to the *tenant admin* role. - - -## Update Limits - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and select **Tenant Settings**. - - -3. Select **Resource Limits** from the **Tenant Settings Menu**. - - -4. Set the values for the different Palette resources. - - -5. Save your changes. - - -## Validate - -You can validate the updated resource limits by creating a resource of the same type you updated. For example, you can create five API keys if you updated the **API Key** to five. If you attempt to create a sixth API key, you will receive an error message. - - - -
-
diff --git a/content/docs/08-user-management/4-saml-sso.md b/content/docs/08-user-management/4-saml-sso.md deleted file mode 100644 index 2303277943..0000000000 --- a/content/docs/08-user-management/4-saml-sso.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: "SAML and SSO Setup" -metaTitle: "SAML and SSO Setup" -metaDescription: "Detailed instructions on creating Single Sign-on to log in to Palette using SAML 2.0" -icon: "" -hideToC: false -fullWidth: false ---- -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Overview - -Palette supports Single Sign-On (SSO) with a variety of Identity Providers (IDP). You can enable SSO in Palette by using the following protocols for authentication and authorization. - -
- - -- Security Assertion Markup Language (SAML) - SAML is a standalone protocol that requires a centralized identity provider (IDP) to manage user identities and credentials. SAML supports SSO and is commonly used for enterprise applications. - - -- OpenID Connect (OIDC) - OIDC more modern protocol designed for web and mobile applications. OIDC is built on top of [OAuth 2.0](https://www.rfc-editor.org/rfc/rfc6749), a widely used authorization framework. OIDC supports distributed identity providers and supports social login providers such as Google or GitHub. - -Enable SSO by following our [Enable SSO in Palette](/user-management/saml-sso/enable-saml) guide. - -# Resources - -- [Enable SSO in Palette](/user-management/saml-sso/enable-saml) - - -- [Palette SSO with Azure Active Directory](/user-management/saml-sso/palette-sso-azure-ad) - - -- [Enable SSO with Microsoft Active Directory Federation Service (AD FS)](/user-management/saml-sso/palette-sso-with-adfs) - - -- [Palette SSO with Okta](/user-management/saml-sso/palette-sso-with-okta) diff --git a/content/docs/09-registries-and-packs.md b/content/docs/09-registries-and-packs.md deleted file mode 100644 index d36cf2e9c8..0000000000 --- a/content/docs/09-registries-and-packs.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Registries and Packs" -metaTitle: "Registries and Packs" -metaDescription: "Learn about Packs, how to use and combine Packs, and how to create your Pack ." -icon: "nodes" -hideToC: false -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; -import Tabs from "shared/components/ui/Tabs"; - -# Packs - - -A **Cluster Profile** is made up of preconfigured layers, each of which is called a pack. In other words, **Packs** are the building block of a cluster profile to create layers such as operating systems, Kubernetes, network, storage, and add-ons. Packs can be broadly categorized into two types: - -- **Core** packs - These packs model the core layers to provision a Kubernetes cluster. These packs include the operating system, Kubernetes, the container network interface (CNI), and the container storage interface (CSI) specifications. Spectro Cloud builds and maintains these core packs for updates. - - -- **Add-On** packs - These packs model the infrastructure integrations and applications that exist on top of the core packs. Examples of applications are system, authentication, security, monitoring, logging, ingress, load balancer, service mesh, or helm charts. - -Both the core and add-on packs described above are configurable, and you can define new add-on custom packs from scratch as well. The use case for defining new add-on packs is to have desired consistent governance across your profile deployments. - - -## Pack Structure - -Palette provides a rich collection of out-of-the-box packs for various integrations and also offers extensibility through custom-built packs. To configure an existing pack (core or add-on) or to define a new add-on custom pack from scratch, it is essential to understand the pack structure. - -Each pack is a collection of files such as manifests, helm charts, Ansible roles, configuration files, and more. Ansible roles, if provided, are used to customize cluster VM images, whereas Kubernetes manifests and Helm charts are applied to the Kubernetes clusters after deployment. The following is a typical pack structure: - - -| **Pack Name** |**Requirement** | **Description** | -|-|-|-| -| `pack.json` | mandatory| Pack metadata.| -| `values.yaml`| mandatory| Pack configuration, parameters exposed from the underlying charts, and templated parameters from Ansible roles. | -| `charts/`| mandatory| Mandatory for Helm chart-based packs. Contains the Helm charts to be deployed for the pack. | -| `manifests/`| mandatory| Mandatory for Manifest-based packs. Contains the manifest files to be deployed for the pack. -| `ansible-roles`| optional| Ansible roles used to install the pack.| -| `logo.png`| optional| Contains the pack logo. | -| `README.md`|optional| The pack description. | - - -Let's look at the examples below to better understand pack structure.

- - - - - - - -The example shows the structure of a Helm chart-based pack, **istio-1.6.2**, which is made up of two charts: *istio-controlplane* and *istio-operator*. Each chart has its **values.yaml** file. In this example, we have a pack-level **values.yaml** file and individual chart-level **values.yaml** files.

- -```bash -. -├── charts/ -│   ├── istio-controlplane.tgz -│   ├── istio-controlplane -│   │   ├── Chart.yaml -│   │   ├── templates/ -│   │   └── values.yaml -│   ├── istio-operator.tgz -│   └── istio-operator -│      ├── Chart.yaml -│      ├── templates/ -│      └── values.yaml -├── logo.png -├── pack.json -└── values.yaml -``` - -
- - - -This example shows the structure of a Manifest-based pack, *kubeflow-1.2.0*, made up of **kubeflow-kfdef.yaml** and **kubeflow-operator.yaml** manifests. - -```bash -. -├── manifests/ -│   ├── kubeflow-kfdef.yaml -│   └── kubeflow-operator.yaml -├── logo.png -├── pack.json -└── values.yaml -``` - - - -
- -# Registries - - -The pack registry is a server-side application to store and serve packs to its clients. Packs from a pack registry are retrieved and presented as options during the creation of a cluster profile. Palette supports the configuration of multiple registries. - -## Default Registry - -The default pack registry is Spectro Cloud's public pack registry. It consists of several packs that make it easy for a user to quickly create a cluster profile and launch a Kubernetes cluster with their choice of integrations. Palette maintains all packs in this pack registry and takes care of upgrading packs in the pack registry whenever required. - -## Custom Pack Registry - -Users can set up a custom pack registry using a Docker image provided by Spectro Cloud to upload and maintain custom packs. Spectro Cloud provides a CLI tool to interact with and manage pack content in the pack registry. Custom registries offer a mechanism of extending the capabilities of a platform by defining additional integrations. - -# Spectro CLI - -The Spectro Cloud Command Line Interface (CLI) is a tool to interact with a Spectro Cloud pack registry. You can use the CLI to upload and download packs. The CLI must authenticate with the pack registry before executing any CLI commands. Review the [Spectro Cloud CLI](/registries-and-packs/spectro-cli-reference) reference page for usage instructions. - -
diff --git a/content/docs/09-registries-and-packs/1-adding-a-custom-registry.md b/content/docs/09-registries-and-packs/1-adding-a-custom-registry.md deleted file mode 100644 index 28e770252c..0000000000 --- a/content/docs/09-registries-and-packs/1-adding-a-custom-registry.md +++ /dev/null @@ -1,308 +0,0 @@ ---- -title: "Add a Custom Registry" -metaTitle: "Add a Custom Registry" -metaDescription: "Learn how to create and use custom made packs and registries in Spectro Cloud" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Add Custom Registries - -Setting up a custom pack registry is a two-step process. The first step is to deploy a pack registry server using a Docker image provided by us. While deploying a pack registry server, you can employ a TLS certificate from a Certificate Authority (CA) or a self-signed certificate. The current guide will provide instructions for both methods - using TLS and self-signed certificates. You can check out the [Advanced Configuration](/registries-and-packs/advanced-configuration) guide to learn about the customization options while deploying a pack registry server. - -After deploying a pack registry server, the next step is configuring the pack registry server in Palette. Once you finish configuring the pack registry server in Palette, Palette will synchronize the pack contents from the pack registry server periodically. - -# Prerequisites - -* Ensure you have a Docker container runtime Docker to be installed on the machine. - - -* The HTTP utility *htpasswd* is required to be installed for user authentication encryption. - - -* The minimum machine compute specifications are 1 vCPU and 2 GB Memory. - - -* Firewall ports 443/80 are required to be opened on the machine to allow traffic from the Palette console and Spectro CLI tool. - -* [OpenSSL](https://www.openssl.org/source/) if creating a self-signed certificate. Refer to the [Self-Signed Certificates](#self-signedcertificates) section below for more guidance. - - - -Please ensure that the ports 443 and 80 are exclusively allocated to the registry server and are not in use by other processes. - - - -# Deploy Pack Registry Server with Let's Encrypt - -We provide a Docker image for setting up a pack registry server. Use the following steps to deploy a pack registry server using the designated Docker image and a TLS certificate issued by [Let's Encrypt](https://letsencrypt.org/). -
- -1. Create a folder that contains an httppasswd file. -
- - ```bash - mkdir spectropaxconfig - ``` - -2. Create a htpasswd file. -
- - ```shell - htpasswd -Bbn admin "yourPasswordHere" > spectropaxconfig/htpasswd-basic - ``` - - -3. Create a pax registry configuration file titled **myconfig.yml** in the **spectropaxconfig** directory. The YAML code block below displays the sample content for the **myconfig.yml** file. The current example assumes that your pack registry server will be hosted at `yourhost.companydomain.com` and the email id for notifications is `you@companydomain.com`. Replace the `host` and `email` attribute values as applicable to you. -
- - ```yaml - version: 0.1 - log: - level: debug - storage: inmemory - http: - addr: :5000 - tls: - letsencrypt: - cachefile: /etc/spectropaxconfig/le-cache - email: you@companydomain.com - hosts: - - yourhost.companydomain.com - auth: - htpasswd: - realm: basic-realm - path: /etc/spectropaxconfig/htpasswd-basic - ``` - -3. Start the container image with the following flags. - -
- - ```bash - docker run \ - --rm \ - --port 443:5000 \ - --name spectro-registry \ - --volume $(pwd)/spectropaxconfig/:/etc/spectropaxconfig/ \ - gcr.io/spectro-images-public/release/spectro-registry:4.0.2 \ - serve /etc/spectropaxconfig/myconfig.yml - ``` - -You can now access the pack registry at `https://yourhost.companydomain.com/v1/`. -You will be prompted to give the user admin and the password of your choice. - -# Deploy a Pack Registry Server with Self-Signed Certificates - -The following steps need to be performed to deploy the pack registry server using self-signed certificates: - -1. Configure the user credentials by using the `htpasswd` utility and store the credentials in a file locally. This file will be mounted inside the pack registry docker container. -
- - ```bash - mkdir -p /root/auth - ``` - -2. For admin users, the command below has a placeholder to specify your unique secure password for admin users. -
- - ```bash - htpasswd -Bbn admin "yourPasswordHere" > /root/auth/htpasswd-basic - ``` - -3. For other users. The following command has the placeholder to specify your unique secure password for read-only users. -
- - ```bash - htpasswd -Bbn spectro "yourPasswordHere" >> /root/auth/htpasswd-basic - ``` - -4. If HTTPS mode is used, create a directory called `certs`. -
- - ```shell - mkdir -p /root/certs - ``` - -5. Copy the **tls.crt** and **tls.key** files from the CA into the **/roots/certs** directory. This directory will be mounted inside the registry Docker container. - - -6. Pack contents in a pack registry can be stored locally on the host or an external file system. -An external file system is recommended so that the pack contents can be mounted on another pack -registry instance in the event of restarts and failures. -Create a directory or mount an external volume to the desired storage location. Example: `/root/data` - - -7. Issue the following command to pull the pack registry server image. The image will help you instantiate a Docker container as a pack registry server. -
- - ```shell - docker pull gcr.io/spectro-images-public/release/spectro-registry:3.4.0 - ``` - -8. Use the `docker run` command to instantiate a Docker container. If you encounter an error while instantiating the Docker container, below are some common scenarios and troubleshooting tips. - - * The Registry CLI login command fails with the error message `x509: cannot validate certificate for ip_address, because it doesn't contain any IP SANs`. The error occurs when a self-signed certificate is created using an IP address rather than a hostname. To resolve the error, recreate the certificate to include an IP SAN or use a DNS name instead of an IP address. - - * The Registry CLI login command fails with the error message `x509: certificate signed by unknown authority`. The error occurs when the self-signed certificate is invalid. To resolve the error, you must configure the host where CLI is installed to trust the certificate. - - - - - - -```bash -docker run -d \ - -p 443:5000 \ - --restart=always \ - --name spectro-registry \ - --mount type=bind,source=/root/auth,target=/auth,readonly \ - --mount type=bind,source=/root/data,target=/data \ - --mount type=bind,source=/root/certs,target=/certs,readonly \ - -e REGISTRY_LOG_LEVEL=info \ - -e REGISTRY_AUTH=htpasswd \ - -e REGISTRY_AUTH_HTPASSWD_REALM="Registry Realm" \ - -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd-basic \ - -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/tls.crt \ - -e REGISTRY_HTTP_TLS_KEY=/certs/tls.key \ - gcr.io/spectro-images-public/release/spectro-registry:4.0.2 -``` - - - - - - -```shell -docker run -d \ - -p 80:5000 \ - --restart=always \ - --name spectro-registry \ - --mount type=bind,source=/root/auth,target=/auth,readonly \ - --mount type=bind,source=/root/data,target=/data \ - -e REGISTRY_LOG_LEVEL=info \ - -e REGISTRY_AUTH=htpasswd \ - -e REGISTRY_AUTH_HTPASSWD_REALM="Registry Realm" \ - -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd-basic \ - gcr.io/spectro-images-public/release/spectro-registry:4.0.2 - ``` - -
- - - -Registry servers configured in HTTP mode require the `--insecure` CLI flag when using the Spectro Cloud CLI's `login` command. - -
- -```shell -spectro registry login --insecure http://example.com:5000 -``` - -
- -
- -
- -
- - -9. Expose the container host's port publicly to allow the console to interact with the pack registry. - This would be typically done via environment-specific constructs like Security Groups, Firewalls, etc. - - -10. Verify the installation by invoking the pack registry APIs using the curl command. This should result in a 200 response. - - - - - - ```bash - curl --cacert tls.crt -v [REGISTRY_SERVER]/health - curl --cacert tls.crt -v -u [USERNAME] [REGISTRY_SERVER]/v1/_catalog - ``` - - - - - - ```bash - curl -v [REGISTRY_SERVER]/health - curl -v -u [USERNAME] [REGISTRY_SERVER]/v1/_catalog - ``` - - - - - - - -# Configure a Custom Pack Registry in Palette - -Once you deploy the pack registry server, use the following steps to configure the pack registry server in Palette. -
- -1. Log in to Palette, and switch to the tenant admin view. - - -2. Navigate to the **Tenant Settings** > **Registries** > **Pack Registries** section. - - -3. Click on the **Add New Pack Registry**. Palette will open a pop-up window asking for the fields to configure a pack registry server, as highlighted in the screenshot below. - - ![A screenshot highlighting the fields to configure a custom pack registry. ](/registries-and-packs_adding-a-custom-registry-tls_certificate.png) - - -4. Provide the pack registry server name, endpoint, and user credentials in the pop-up window. Ensure to use an "https://" prefix in the pack registry server endpoint. - - -5. If you want Palette to establish a secure and encrypted HTTPS connection with your pack registry server, upload the certificate in the **TLS Configuration** section. The certificate file must be in the PEM format and have a complete trust chain. - - If you used a TLS certificate issued by a CA while configuring the pack registry server, check with your CA to obtain a certificate chain. If you used a self-signed certificate, upload the entire certificate trust chain. The file content must have the server, the intermediate, and the root certificates. - - Once you upload the *.pem* certificate file and click the **Validate** button, Palette will perform the TLS verification to affirm the certificate's authenticity before establishing a communication channel. - - -6. Select the **Insecure Skip TLS Verify** checkbox if you do not want an HTTPS connection between Palette and your pack registry server. If you upload a TLS certificate and also select the **Insecure Skip TLS Verify** checkbox. The **Insecure Skip TLS Verify** checkbox value will take precedence in that case. - - -7. Click the **Confirm** button to finish configuring the pack registry server. After you finish the configuration, Palette will periodically synchronize with the pack registry server to download pack updates, if any. - - -# Self-Signed Certificates - -For self-signed certificates, use the following command to generate certificates. - -
- - ```bash - openssl req \ - -newkey rsa:4096 -nodes -sha256 -keyout tls.key \ - -x509 -days 1825 -out tls.crt - ``` - -Provide the appropriate values while ensuring that the Common Name matches the registry hostname. - -
- - ```text hideClipboard - Country Name (2 letter code) [XX]: - State or Province Name (full name) []: - Locality Name (eg, city) [Default City]: - Organization Name (eg, company) [Default Company Ltd]: - Organizational Unit Name (eg, section) []: - Common Name (eg, your name or your server's hostname) []:[REGISTRY_HOST_DNS] - Email Address []: - - Example: - REGISTRY_HOST_DNS - registry.com - ``` - - -
diff --git a/content/docs/09-registries-and-packs/2-spectro-cli-reference.md b/content/docs/09-registries-and-packs/2-spectro-cli-reference.md deleted file mode 100644 index b831dac1de..0000000000 --- a/content/docs/09-registries-and-packs/2-spectro-cli-reference.md +++ /dev/null @@ -1,350 +0,0 @@ ---- -title: "Spectro Cloud CLI Tool" -metaTitle: "Spectro Cloud CLI Tool" -metaDescription: "A reference sheet for the Spectro Cloud CLI tool" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -The Spectro CLI tool is a command-line interface for the Spectro Cloud Pack Registry server to upload or download the packs using commands. - -# Prerequisites - -- A custom pack registry server must be up and running. - -# Installation - -The Spectro CLI tool is currently available for OSX and Linux. - -1. Download the CLI binary file: - - - - - - ```bash - wget https://software.spectrocloud.com/spectro-registry/cli/v4.0.1/osx/spectro - ``` - - - - - - ```bash - wget https://software.spectrocloud.com/spectro-registry/cli/v4.0.1/linux/spectro - ``` - - - - - -2. Provide the executable permission to the CLI spectro. - - ```bash - chmod +x spectro - ``` - -# Global Arguments - -List of Arguments available to all the Spectro CLI commands: - -# Global Flags - -* List of Flags available to all the Spectro CLI commands: - - * h, --help - help for each command - -# Commands - - - - - -## LOGIN - -Authenticate user with Spectro Cloud pack registry by using the login command: - - - - - -```bash - spectro registry login [SERVER] -``` - - - - - -```bash - spectro registry login spectro.io:5000 -``` - -```bash - spectro registry login spectro.io:5000 --insecure --default -``` - - - - - -### Args - -SERVER - Spectro Cloud pack registry server in the format [host:port] - - -### Flags - --i, --insecure - Insecure is used when the pack registry is installed in HTTP or HTTPS with self-signed certificates. - --d, --default - Set the server as default Spectro Cloud pack registry for all the CLI commands. - -**Note:** In case of HTTPS, if you have access to the pack registry's CA certificate, there is no need for the insecure flag; simply place the CA certificate at /etc/spectro/certs.d/[SERVER]/ca.crt. - - - - - -## PUSH - -Upload the pack content from the pack source dir to the Spectro Cloud pack registry. - - - - - -```bash - spectro pack push [PACK_SOURCE_DIR] [flags] -``` - - - - - -```bash - spectro pack push /tmp/packs/nginx-1.16.1 -``` - -```bash - spectro pack push /tmp/packs/nginx-1.16.1 --registry-server spectro.io:5000 -``` - -```bash - spectro pack push /tmp/packs/nginx-1.16.1 --force --message "updated nginx pack values" -``` - - - - - -### Args - -PACK_SOURCE_DIR: Directory location where pack content is located. - -### Flags - --r, --registry-server string - To override the default Spectro Cloud pack registry - --f, --force - If a pack with the same tag already exists in the registry, then the *force* option can be used to overwrite the pack contents in the registry. - --m, --message - A short description about the pack changes. It is mandatory to set this flag when the force option is enabled. - ---skip-digest-check - By default, the *force* option can push the pack only if the pack content digest is different than the registry pack digest. So the *skip digest* command can be used to skip the comparison of the digests. - - - - - -## LIST - -List all the packs from the Spectro Cloud pack registry: - - - - - -```bash - spectro pack ls [flags] -``` - - - - - -```bash - spectro pack ls spectro.io:5000 -``` - -```bash - spectro pack ls spectro.io:5000 --name ubuntu --registry-server spectro.io:5000 -``` - - - - - -### Flags - --n, --name string - packs can be filtered by pack name - --r, --registry-server string - To override the default Spectro Cloud pack registry - - - - - -## PULL - -Download the packs from the Spectro Cloud pack registry to a pack target location: - - - - - -```bash - spectro pack pull NAME[:TAG|@DIGEST] TARGET_DIR [flags] -``` - - - - - -```bash - spectro pack pull nginx:1.16.1 /tmp/packs -``` - -```bash - spectro pack pull nginx@sha256:5269f073ac8e3c2536270b496ca1cc537e32e44186a5a014b8c48cddca3c6e87 /tmp/packs --registry-server spectro.io:5000 -``` - - - - - -### Args - -PACK_NAME: TAG|@DIGEST - Name of the pack for a particular tag or a sha digest. - -PACK_TARGET_DIR - Directory location where pack content will be pulled. - -### Flags - --r, --registry-server string - To override the default Spectro Cloud pack registry. - - - - - -## ADD (Add a Tag) - -Create a new tag to a pack which is already pushed to the Spectro Cloud pack registry: - - - - - -```bash - spectro pack tag add SOURCE_PACK:TAG TARGET_LABEL [flags] -``` - - - - - -```bash - spectro pack tag add ubuntu:lts__14.4.3 stable -``` - -```bash - spectro pack tag add ubuntu:lts__14.4.3 14.4.3-beta -g lts -r spectro.io:5000 -``` - - - - - -**Note:** Tag is a combination of label and the group name. The label is mandatory whereas the group is optional. - -tag → <group>__<label> - -Ex. lts___14.4.3 : lts → group, 14.4.3 → label - -### Args - -PACK_NAME: TAG - Name of the pack for a particular tag to which a new tag will be created. - -TARGET_LABEL - Target tag label. - -### Flags - --g, --group string - Target tag group. - --r, --registry-server string - To override the default Spectro Cloud pack registry. - - - - - -## DELETE (Delete a tag) - -Delete a tag to a pack that is already pushed to the Spectro Cloud pack registry. - - - - - -```bash - spectro pack tag delete PACK:TAG [flags] -``` - - - - - -```bash - spectro pack tag delete ubuntu:14.4.3 -``` - -```bash - spectro pack tag delete ubuntu:14.4.3 -r spectro.io:5000 -``` - - - - - -**Note:** Parent tags like major version (Ex: 14.x) and minor version (Ex: 14.4.x) can not be deleted as these are auto-generated by the system. So, when no tags are associated with the pack then these are auto-deleted by the system. When a tag (Ex: 14.4.3) is deleted then the major and minor version tags are auto-linked to the remaining tags of a pack. - -### Args - -PACK_NAME: TAG - Pack name and Tag which needs to be deleted. - -### Flags - --r, --registry-server string - To override the default Spectro Cloud pack registry. - - - - - -## VERSION - -Check the version of the Spectro CLI that is currently installed. - -```bash - spectro version -Spectro ClI Version 4.0.1 linux/amd64 -``` - - - - diff --git a/content/docs/09-registries-and-packs/3-add-custom-packs.md b/content/docs/09-registries-and-packs/3-add-custom-packs.md deleted file mode 100644 index 9674b900b8..0000000000 --- a/content/docs/09-registries-and-packs/3-add-custom-packs.md +++ /dev/null @@ -1,518 +0,0 @@ ---- -title: "Add a Custom Pack" -metaTitle: "Add a Custom Pack" -metaDescription: "How to create and use custom made packs and registries in Spectro Cloud" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Add a Custom Pack - -Custom packs are built by users and deployed to custom registries using the Spectro Cloud CLI tool. To get started with Spectro Cloud CLI, review the Spectro Cloud CLI installation [instructions](/registries-and-packs/spectro-cli-reference). - -## Prerequsites - -The following items are required to create a custom pack. - -- [Spectro Cloud CLI](/registries-and-packs/spectro-cli-reference) -- A Spectro Cloud [account](https://www.spectrocloud.com/) -- [Custom Pack registry](/registries-and-packs/adding-a-custom-registry) - -## JSON Schema - -Each pack contains a metadata file named `pack.json`. The table below explains in greater detail the JSON schema attributes. - - -| Property Name | Data type | Required | Description | -| --- | --- | --- | --- | -| name | String | True | Name of the pack | -| displayName | String | True | Name of the pack as it is to be displayed on the Palette management console. | -| layer | String | True | Relevant layer that this pack should be part of; such as os, k8s, cni, csi, addon | -| addonType | String | False | Addon-type must be set for packs that have the layer set to Addon. The value must be one of the following: logging, monitoring, load balancer, authentication, ingress, security. Setting a relevant correct addon type ensures packs are organized correctly on the management console making it easy for profile authors to find packs. | -| version | String | True | A Semantic version for the pack. It is recommended that the pack version be the same as the underlying integration it is being created for. For example, the version for the pack that will install Prometheus 2.3.4, should set to 2.3.4. | -| cloudTypes | Array | True | You can provide one or more types for a pack. Supported values are as follows:

**all**, **aws**, **azure**, **gcp**, **tencent**, **vsphere**, **openstack**, **baremetal**, **maas**, **aks**, **eks**, **tke**, **edge**, **edge-native**, **coxedge**, and **libvirt** (virtualized edge). -| group | String | False | Optional categorization of packs. For example, LTS can be set for Ubuntu OS packs. | -| annotations | Array | False | Optional key-value pairs required during pack installation. Typically, custom packs do not need to set annotations. Some packs like the ones for OS require annotations that need to be set with an image id. | -| eol | String | False | End of life date for integration. | -| KubeManifests | Array | False | Relative path to Kubernetes manifest yaml files. | -| ansibleRoles | Array | False | Relative part to the Ansible role folders. These folders should contain all the artifacts required by Ansible. Please refer to the Ansible documentation for more details on how Ansible roles are constructed. | -| | | | In Palette, Ansible roles are used to customize the OS image used for cluster nodes. Typically, these are roles that perform tasks like hardening the OS, installing monitoring agents, etc. | -| charts | Array | False | Relative path to the helm chart archives. | - -The following is the JSON schema for packs. Review the schema to ensure your JSON configuration is defined correctly. - -
- - -```json -{ - "type": "object", - "required": [ - "name", - "displayName", - "version", - "layer" - ], - "properties": { - "name": { - "$ref": "#/definitions/nonEmptyString" - }, - "displayName": { - "$ref": "#/definitions/nonEmptyString" - }, - "version": { - "$ref": "#/definitions/nonEmptyString" - }, - "layer": { - "$ref": "#/definitions/layer" - }, - "group": { - "type": "string" - }, - "cloudTypes": { - "type": "array", - "items": { - "type": "string", - "enum": [ - "all", - "aws", - "azure", - "gcp", - "vsphere", - "openstack", - "baremetal", - "maas", - "aks", - "eks", - "tencent", - "tke", - "edge", - "libvirt", - "edge-native", - "coxedge" - ] - } - }, - "cloudType": { - "type": "string", - "enum": [ - "all", - "aws", - "azure", - "gcp", - "vsphere", - "openstack", - "baremetal", - "maas", - "aks", - "eks", - "tencent", - "tke", - "edge", - "libvirt", - "edge-native", - "coxedge" - ] - }, - "eol": { - "type": "string" - }, - "addonType": { - "type": "string" - }, - "addonSubType": { - "type": "string" - }, - "ansibleRoles": { - "type": "array", - "items": { - "type": "string" - } - }, - "charts": { - "type": "array", - "items": { - "type": "string" - } - }, - "kubeManifests": { - "type": "array", - "items": { - "type": "string" - } - }, - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "constraints": { - "$ref": "#/definitions/constraints" - } - }, - "definitions": { - "nonEmptyString": { - "type": "string", - "minLength": 1 - }, - "layer": { - "type": "string", - "enum": [ - "kernel", - "os", - "k8s", - "cni", - "csi", - "addon" - ] - }, - "constraints": { - "type": "object", - "properties": { - "dependencies": { - "type": "array", - "items": { - "$ref": "#/definitions/dependency" - } - }, - "resources": { - "type": "array", - "items": { - "$ref": "#/definitions/resource" - } - } - } - }, - "dependency": { - "type": "object", - "required": [ - "packName", - "layer", - "type" - ], - "properties": { - "packName": { - "$ref": "#/definitions/nonEmptyString" - }, - "layer": { - "$ref": "#/definitions/layer" - }, - "minVersion": { - "type": "string" - }, - "maxVersion": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "required", - "optional", - "notSupported", - "upgrade" - ] - } - } - }, - "resource": { - "type": "object", - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "cpu", - "memory", - "diskSize" - ] - }, - "minLimit": { - "type": "number" - }, - "components": { - "type": "array", - "items": { - "$ref": "#/definitions/component" - } - } - } - }, - "component": { - "type": "object", - "required": [ - "scheduleType" - ], - "properties": { - "scheduleType": { - "type": "string", - "enum": [ - "all", - "master", - "worker" - ] - }, - "resourceRequestParamRef": { - "type": "string" - }, - "replicaCountParamRef": { - "type": "string" - } - } - } - } -} -``` - -# Create a Custom Pack - -Follow the steps below to create a custom pack. - -1. Create a directory with a suitable name for all the pack contents. - - Example: `prometheus_1_0` - - -2. Create a metadata file named `pack.json` to describe the pack. - - Example: - - ```json - { - "annotations": { - "name": "value" - }, - "ansibleRoles": [], - "displayName": "", - "eol": "2028-04-30", - "group": "", - "kubeManifests": [ - "manifests/deployment.yaml" - ], - "layer": "", - "name": "", - "version": "" - } - ``` - -3. Create a file named `values.yaml`. This file consists of configurable parameters that need to be exposed to the end-users during the creation of a cluster profile. - - - -A values.yaml file is mandatory for every pack. For an OS pack, there are typically no configurable parameters, but an empty file still needs to be added to the OS pack. - - - -Parameters for all charts, manifests, and Ansible roles defined in the pack are defined in the `values.yaml` file. -*Helm* charts natively support values override. Any values defined are merged with those defined within a chart. *Manifests* and *Ansible* roles need to be explicitly templatized if parameter configuration is desired. - -```yaml - pack: - namespace : - charts: - chart1: - - chart2: - - manifests: - manifest1: - - manifest2: - - ansibleRoles: - role1: - - role2: - -``` - -4. A pack must have the logo file named `logo.png` and must be copied into the pack directory. - - -5. Login to the pack registry using the following command: - - ```bash - spectro registry login [REGISTRY_SERVER] - ``` - -6. Push the newly defined pack to the pack registry using the following command: - - ```bash - spectro pack push [PACK_DIR_LOCATION] --registry-server [REGISTRY_SERVER] - ``` - -7. To overwrite contents of a previously deployed pack, use the force option as follows: - - ```bash - spectro pack push [PACK_DIR_LOCATION] -f --registry-server [REGISTRY_SERVER] - ``` - -# Adding an OS Pack - -The OS is one of the Core Layers in a cluster profile. An OS pack can be built to use a custom OS image for cluster nodes. This might be desirable if an organization wants to use an approved hardened OS image for their infrastructure. There are typically the following two scenarios for the OS image: - -
- -1. **Pre-Installed Kubernetes** - The OS image has the desired version of Kubernetes components like kubelet, kubectl, etc installed. - - -2. **Vanilla OS Image** - Kubernetes components are not installed. - -Additionally, for both scenarios additional components or packages may need to be installed at runtime to prepare the final OS image. This can be done by specifying one or more Ansible roles in the pack. The following are a few examples of building custom OS pack to cover the some of these scenarios. - -A few sample pack manifests for building a custom OS pack are shown in the following examples. These are examples for images that do not have Kubernetes components pre-installed. Palette installs these components at the time of provisioning. The version of Kubernetes that gets installed depends on the Kubernetes pack configuration in the cluster profile. If Kubernetes is pre-installed in the image, set the flag `skipK8sInstall` to true. - -# Examples - - - - - -### AWS Custom-OS Pack - -```yaml -{ - "annotations": { - "cloudRegion": "us-east-1", - "imageId": "ami-071f6fc516c53fca1", - "imageOwner": "421085264799", - "osName": "centos", - "os_spectro_version": "0", - "sshUsername": "centos", - "skipK8sInstall": "false" - }, - "ansibleRoles": [ - "harden_os" - ], - "cloudTypes": ["aws"], - "displayName": "CentOS", - "eol": "2024-06-30", - "group": "", - "kubeManifests": [], - "layer": "os", - "name": "golden-centos-aws", - "version": "7.7.1908" -} -``` - - - - - -### Azure Custom OS Pack - -```yaml -{ - "annotations": { - "imageOffer": "CentOS", - "imagePublisher": "OpenLogic", - "imageSKU": "7.7", - "osName": "centos", - "os_spectro_version": "0", - "sshUsername": "centos", - "skipK8sInstall": "true" - }, - "ansibleRoles": [ - "harden_os" - ], - "cloudTypes": ["azure"], - "displayName": "CentOS", - "eol": "2024-06-30", - "group": "", - "kubeManifests": [], - "layer": "os", - "name": "golden-centos-azure", - "version": "7.7.1908" -} -``` - - - - - -### VMware Custom OS Pack - Local Image - -```yaml -{ - "annotations": { - "folder": "spectro-templates", - "imageId": "/Datacenter/vm/spectro-templates/base-images/centos-7-vanilla-with-vm-tools", - "osName": "centos", - "os_spectro_version": "0", - "sshPassword": "password", - "sshUsername": "root", - "skipK8sInstall": "false" - }, - "ansibleRoles": [ - "harden_os" - ], - "cloudTypes": ["vsphere"], - "displayName": "CentOS", - "eol": "2024-06-30", - "group": "", - "kubeManifests": [], - "layer": "os", - "name": "golden-centos-vsphere", - "version": "7.7.1908" -} -``` - -### VMware Custom OS Pack - Remote Image - -```yaml -{ - "annotations": { - "folder": "spectro-templates", - "imageId": "https://cloud-images.ubuntu.com/releases/18.04/release/ubuntu-18.04-server-cloudimg-amd64.ova", - "osName": "ubuntu", - "os_spectro_version": "0", - "sshUsername": "ubuntu", - "skipK8sInstall": "false" - }, - "ansibleRoles": [ - "harden_os" - ], - "cloudTypes": ["vsphere"], - "displayName": "Ubuntu", - "eol": "2028-04-30", - "group": "LTS", - "kubeManifests": [], - "layer": "os", - "name": "golden-ubuntu-vsphere", - "version": "18.04.4" -} -``` - - - - - -# Ansible Roles - -In all the previous examples, additional customization in the form of an Ansible role called `harden_os` is specified in the pack manifest. The tasks and other files for the implementation of this role need to be included in the pack. The final directory structure of for the pack would be as follows: - -``` -./pack.json -./logo.png -./values.yaml -./harden_os -./harden_os/tasks -./harden_os/tasks/main.yml -./harden_os/files -./harden_os/files/sec_harden.sh -``` - -Ansible roles are optional and only required if additional runtime customization is required. Once an OS pack is constructed, push it to the pack registry using the Spectro CLI tool. - - - -During the image customization phase of a cluster deployment, failures related to missing packages or package version mismatch could occur when using a custom OS pack. These errors are presented on the console. The image needs to be updated to resolve any such issues. - - diff --git a/content/docs/09-registries-and-packs/4-adding-add-on-packs.md b/content/docs/09-registries-and-packs/4-adding-add-on-packs.md deleted file mode 100644 index 9ef769b13a..0000000000 --- a/content/docs/09-registries-and-packs/4-adding-add-on-packs.md +++ /dev/null @@ -1,429 +0,0 @@ ---- -title: 'Add an Add-on Pack' -metaTitle: 'Add an Add-on Pack' -metaDescription: 'How to create custom made packs using Helm Charts and Manifests in Spectro Cloud' -icon: '' -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Add-on Packs - -An Add-on Pack defines deployment specifics of a Kubernetes application to be installed on a running Kubernetes cluster. Palette provides several Add-on packs out of the box for various layers of the Kubernetes stack. For example: - - -* **Logging** - elastic search, fluentd - - -* **Monitoring** - Kubernetes dashboard, Prometheus - - -* **Load Balancers** - Citrix - - -* **Security** - Dex, Vault, Permissions manager - - -* **Service Mesh** - Istio - - -Custom Add-on packs can be built to extend the list of integrations. Two different methods are used in the following examples to create custom add-on packs. -
- - - - - -The following example shows how to build the Prometheus-Grafana monitoring pack and push to a pack registry server using the Spectro Cloud CLI: - -1. Create the pack directory named *prometheus-grafana*. - - -2. Create the metadata file named `pack.json`. - - ```json - { - "addonType": "monitoring", - "annotations": { - }, - "ansibleRoles": [ - ], - "cloudTypes": ["all"], - "displayName": "Prometheus-Grafana", - "eol": " ", - "group": " ", - "kubeManifests": [ - ], - "charts": [ - "charts/prometheus-grafana.tgz" - ], - "layer":"addon", - "name": "prometheus-grafana", - "version": "9.7.2" - } - ``` - -3. Download the desired version of the Prometheus-Grafana Helm charts archive. - - -4. Create a subdirectory called **charts** and copy the downloaded Helm chart archive to this directory. Refer to the relative location of this archive in the pack manifest file, `pack.json` as shown in step 2. - - -5. Create a file called `values.yaml` for configurable chart parameters. This can be a subset of the `values.yaml` file shipped within the chart. Copy the entire file as is, if all chart parameters need to be made configurable. For the Prometheus-Grafana pack, the `values.yaml` could look like this: - - ```yaml - pack: - #The namespace (on the target cluster) to install this chart - #When not found, a new namespace will be created - namespace: "monitoring" - - charts: - prometheus-operator: - - # Default values for prometheus-operator. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - ## Provide a name in place of prometheus-operator for `app:` labels - ## - nameOverride: "" - - ## Provide a name to substitute for the full names of resources - ## - fullnameOverride: "prometheus-operator" - - ## Labels to apply to all resources - ## - commonLabels: {} - # scmhash: abc123 - # myLabel: aakkmd - - ## Create default rules for monitoring the cluster - ## - defaultRules: - create: true - rules: - alertmanager: true - etcd: true - general: true - k8s: true - kubeApiserver: true - kubePrometheusNodeAlerting: true - kubePrometheusNodeRecording: true - kubernetesAbsent: true - kubernetesApps: true - kubernetesResources: true - kubernetesStorage: true - kubernetesSystem: true - kubeScheduler: true - network: true - node: true - prometheus: true - prometheusOperator: true - time: true - - ## Labels for default rules - labels: {} - ## Annotations for default rules - annotations: {} - - ## Provide custom recording or alerting rules to be deployed into the cluster. - ## - additionalPrometheusRules: [] - # - name: my-rule-file - # groups: - # - name: my_group - # rules: - # - record: my_record - # expr: 100 * my_record - - ## - global: - rbac: - create: true - pspEnabled: true - - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - # - name: "image-pull-secret" - - ## Configuration for alertmanager - ## ref: https://prometheus.io/docs/alerting/alertmanager/ - ## - alertmanager: - - ## Deploy alertmanager - ## - enabled: true - - ## Service account for Alertmanager to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configure pod disruption budgets for Alertmanager - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## This configuration is immutable once created and will require the PDB to be deleted to be changed - ## https://github.com/kubernetes/kubernetes/issues/45398 - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - ## Alertmanager configuration directives - ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file - ## https://prometheus.io/webtools/alerting/routing-tree-editor/ - ## - config: - global: - resolve_timeout: 5m - route: - group_by: ['job'] - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: 'null' - routes: - - match: - alertname: Watchdog - receiver: 'null' - receivers: - - name: 'null' - - ## Pass the Alertmanager configuration directives through Helm's templating - ## engine. If the Alertmanager configuration contains Alertmanager templates, - ## they'll need to be properly escaped so that they are not interpreted by - ## Helm - ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function - ## https://prometheus.io/docs/alerting/configuration/#%3Ctmpl_string%3E - ## https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - tplConfig: false - - ## Alertmanager template files to format alerts - ## ref: https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - ## - templateFiles: {} - # - ## An example template: - # template_1.tmpl: |- - # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} - # - # {{ define "slack.myorg.text" }} - # {{- $root := . -}} - # {{ range .Alerts }} - # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` - # *Cluster:* {{ template "cluster" $root }} - # *Description:* {{ .Annotations.description }} - # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> - # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> - # *Details:* - # {{ range .Labels.SortedPairs }} * *{{ .Name }}:* `{{ .Value }}` - # {{ end }} - - ingress: - enabled: false - ... - ``` - -6. Log in to the pack registry using the following command: - - ```bash - spectro registry login [REGISTRY_SERVER] - ``` - -7. Using the Spectro CLI, push the newly built pack to the pack registry: - - ```bash - spectro pack push prometheus-grafana --registry-server [REGISTRY-SERVER] - ``` - -
- -
- - - -Add-on packs can be built using Kubernetes manifests as well. These manifests contain deployment specifications for Kubernetes objects like pods, services, deployments, namespaces, or secrets. - -The example below shows how to build the Permission Manager auth pack and push to the pack registry server using the Spectro Cloud CLI. - -1. Create the pack directory named **permission-manager**. - - -2. Create the metadata file named `pack.json`. - - ```json - { - "addonType":"authentication", - "cloudTypes": ["all"], - "displayName": "Permission Manager", - "kubeManifests": [ - "manifests/permission-manager.yaml" - ], - "layer": "addon", - "name": "permission-manager", - "version": "1.1.0" - } - ``` - -3. Create a sub-directory called **manifests**. - - -4. Copy the desired manifest files to the **manifests** directory and reference them in `pack.json` as shown in step 2. If the configurability of the manifest is desired, then the manifest files must be templatized to introduce parameters. For example, _{{.Values.namespace}}_. These parameters are defined with default values in the `values.yaml` file and can be overridden in the cluster profile. - - **permission-manager.yaml (partial)** - ```yaml - apiVersion: v1 - kind: Namespace - metadata: - name: {{ .Values.namespace | quote }} - - --- - - apiVersion: v1 - kind: Secret - metadata: - name: auth-password-secret - namespace: {{ .Values.namespace | quote }} - type: Opaque - stringData: - password: {{ .Values.authPassword }} - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: template-namespaced-resources___operator - rules: - - apiGroups: - - "*" - resources: - - "*" - verbs: - - "*" - - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: template-namespaced-resources___developer - rules: - - apiGroups: - - "*" - resources: - # - "bindings" - - "configmaps" - - "endpoints" - # - "limitranges" - - "persistentvolumeclaims" - - "pods" - - "pods/log" - - "pods/portforward" - - "podtemplates" - - "replicationcontrollers" - - "resourcequotas" - - "secrets" - # - "serviceaccounts" - - "services" - # - "controllerrevisions" - # - "statefulsets" - # - "localsubjectaccessreviews" - # - "horizontalpodautoscalers" - # - "cronjobs" - # - "jobs" - # - "leases" - - "events" - - "daemonsets" - - "deployments" - - "replicasets" - - "ingresses" - - "networkpolicies" - - "poddisruptionbudgets" - # - "rolebindings" - # - "roles" - verbs: - - "*" - - --- - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: template-namespaced-resources___read-only - rules: - - apiGroups: - - "*" - resources: - - "configmaps" - - "endpoints" - - "persistentvolumeclaims" - - "pods" - - "pods/log" - - "pods/portforward" - - "podtemplates" - - "replicationcontrollers" - - "resourcequotas" - - "secrets" - - "services" - - "statefulsets" - - "cronjobs" - - "jobs" - - "events" - - "daemonsets" - - "deployments" - - "replicasets" - - "ingresses" - - "networkpolicies" - - "poddisruptionbudgets" - verbs: ["get", "list", "watch"] - - --- - ... - ``` - -5. Create a file called `values.yaml` to provide configurable manifest parameters. - - **values.yaml:** - - ```yaml - manifests: - permission-manager: - - #Namespace under which permission-manager will be deployed - namespace: "permission-manager" - - #Log in password for permission-manager - authPassword: "welcome123" - ``` - -6. Log in to the pack registry using the following command: - - ```bash - spectro registry login [REGISTRY_SERVER] - ``` - -7. Using Spectro Cloud CLI push the newly built pack to the pack registry: - - ```bash - spectro pack push permission-manager --registry-server [REGISTRY-SERVER] - ``` - - - -
- diff --git a/content/docs/09-registries-and-packs/4.5-deploy-pack.md b/content/docs/09-registries-and-packs/4.5-deploy-pack.md deleted file mode 100644 index e4547162f8..0000000000 --- a/content/docs/09-registries-and-packs/4.5-deploy-pack.md +++ /dev/null @@ -1,947 +0,0 @@ ---- -title: 'Deploy an Add-On Pack' -metaTitle: 'Deploy an Add-On Pack' -metaDescription: 'How to create and deploy an add-on pack using the manifest files or Helm charts in Spectro Cloud.' -icon: '' -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Create and Deploy a Custom Add-On Pack -Custom add-on packs allow you to deploy Kubernetes applications in clusters and reuse them in multiple deployments. This ensures uniformity across your clusters. The primary use cases for creating custom packs are: - -- Aggregated configuration and application dependencies simplify deployment and consumption. - -- Open-source contributors can add new Kubernetes applications to a custom add-on pack for the community. - -- Enterprises can add proprietary Kubernetes applications to a custom add-on pack. - -In this tutorial, you will create a custom add-on pack to package a sample Kubernetes application, [Hello Universe](https://github.com/spectrocloud/hello-universe#hello-universe), and deploy that application to a cluster. You will learn to create the pack in two ways, using manifest files and Helm charts. - -After defining the custom pack, you will set up a registry server, publish the pack to that registry, and configure the registry server in Palette. Lastly, you will create a cluster profile that contains your custom pack and apply the profile to a cluster using either Palette or Terraform. - - -# Prerequisites -To complete the tutorial, you will need the following items: -
- -1. A Spectro Cloud account. Visit [https://console.spectrocloud.com](https://console.spectrocloud.com) to create an account. - - -2. Tenant admin access to Palette for the purpose of adding a new registry server. - - -3. A cloud account, such as AWS, Azure, or GCP, added to your Palette project settings. - - -4. An SSH key created in the region where you will deploy the cluster. - - -5. [Docker Desktop](https://docs.docker.com/get-docker/) installed on your local machine to start the tutorials container. - - -6. Basic knowledge of Docker containers and Kubernetes manifest file attributes. - - - - -# Set Up the Tutorial Environment -You will work in a Docker container pre-configured with the necessary tools for this tutorial. However, you can practice this tutorial in any `linux/amd64` or `x86_64` environment by installing the [necessary tools](https://github.com/spectrocloud/tutorials/blob/main/docs/docker.md#docker) and cloning the [GitHub repository](https://github.com/spectrocloud/tutorials/) that contains the tutorial files. Here are the steps to start the tutorials container. -
- -Start the Docker Desktop on your local machine and ensure the daemon is available by issuing a command to list the currently active containers. - -
- -```bash -docker ps -``` - -Download the `ghcr.io/spectrocloud/tutorials:1.0.4` image to your local machine. The Docker image includes the necessary tools. -
- -```bash -docker pull ghcr.io/spectrocloud/tutorials:1.0.4 -``` - -Next, start the container, and open a bash session into it. -
- -```bash -docker run --name tutorialContainer --publish 7000:5000 --interactive --tty ghcr.io/spectrocloud/tutorials:1.0.4 bash -``` - -If port 7000 on your local machine is unavailable, you can use any other port of your choice. -
- - - -Wait to exit the container until the tutorial is complete. Otherwise, you may lose your progress. - - - - - -## Tools and Starter Code -After opening a bash session in the active container, verify that the tools necessary for this tutorial are installed. -
- -Check the Spectro CLI version. -
- -```bash -spectro version -``` - -Check the Spectro registry server version. -
- -```bash -registry --version -``` - -Check the Terraform version. -
- -```bash -terraform --version -``` - -In addition to these tools, the tutorials container has other tools, such as `ngrok`, `git`, and `nano`. - -Examine the directories that pertain to the current tutorial in the **root** directory. -
- -```bash -. -├── packs -│ └── hello-universe-pack # Contains the pack files -└── terraform - └── pack-tf # Contains the .tf files for creating Spectro Cloud resources -``` -The **packs** directory contains the pack files. The **terraform** directory contains the Terraform files used to create Spectro Cloud resources, which you will use later in this tutorial. - - -# Build a Pack - -Building a custom pack requires defining specific files. -As outlined in the [Adding Add-on Packs](/registries-and-packs/adding-add-on-packs) guide, you can define a custom pack in two ways: using manifest files or Helm charts. The file structure varies for manifest-based packs and Helm chart-based packs. Below is the reference file structure for each: -
- - - - - -
- -```bash -. -├── pack.json # Mandatory -├── values.yaml # Mandatory -├── manifests # Mandatory - ├── manifest-1.yaml - ├── manifest-2.yaml -│ └── manifest-3.yaml -├── logo.png # Mandatory -└── README.md # Optional -``` - -
- - - -
- -```bash -. -├── pack.json # Mandatory -├── values.yaml # Mandatory. Pack-level values.yaml file. -├── charts # Mandatory -│ ├── chart-1 # Can have nested charts -│ │ ├── Chart.yaml -│ │ ├── templates -│ │ │ ├── template-1.yaml -│ │ │ └── template-2.yaml -│ │ └── values.yaml # Chart-level values.yaml file. -│ ├── chart-1.tgz -│ ├── chart-2 -│ │ ├── Chart.yaml -│ │ ├── templates -│ │ │ ├── template-1.yaml -│ │ │ └── template-2.yaml -│ │ └── values.yaml # Chart-level values.yaml file. -│ └── chart-2.tgz -├── logo.png # Mandatory -└── README.md # Optional -``` - -
- -
- -
- -To simplify this tutorial, we provide you with the manifest file for the *Hello Universe* application in the **packs/hello-universe-pack** folder. Change the directory to the **packs/hello-universe-pack** folder. -
- -```bash -cd /packs/hello-universe-pack -``` -Ensure you have the following files in the current directory. -
- -```bash -. -├── pack.json # Mandatory -├── values.yaml # Mandatory -├── manifests # Mandatory -│ └── hello-universe.yaml -├── logo.png # Mandatory -└── README.md # Optional -``` -
- -## Pack File Structure - -Go ahead and review each of the following five files in the pack. -
- -* **pack.json** - This file contains the pack metadata such as `addonType`, `cloudTypes`, and the `kubeManifests` array that contains the list of manifest files: `layer`, `name`, and `version`. Refer to the [JSON Schema](/registries-and-packs/add-custom-packs#jsonschema) for a list of attributes and respective data types. The schema validation will happen when you push a pack to the registry. - -
- - ```json - { - "addonType":"app services", - "cloudTypes": [ "all" ], - "displayName": "Hello Universe", - "kubeManifests": [ - "manifests/hello-universe.yaml" - ], - "layer": "addon", - "name": "hellouniverse", - "version": "1.0.0" - } - ``` - -
- - - -* **values.yaml** - This file contains configurable parameters you can define while adding the current pack to a cluster profile. In the **values.yaml** file for this tutorial, the `pack/namespace` attribute specifies the namespace on the target cluster to deploy the pack. If the **values.yaml** specifies a namespace value, then Palette first checks to see if the namespace has been created. If so, Palette uses the existing namespace. If the namespace has not been created, Palette creates a new one using the value specified in the YAML file. - - If the **values.yaml** does not specify a namespace value, Palette deploys the application to the default namespace. - - The `manifests` section exposes the configurable parameters for each manifest file listed in the **manifests** directory. For example, in the sample code snippet below, the `hello-universe` attribute exposes the `registry`, `repository`, and `tag` parameters. -
- - ```yaml - pack: - namespace: "hello-universe" - manifests: - hello-universe: - registry: ghcr.io - repository: spectrocloud/hello-universe - tag: 1.0.12 - ``` - -
- - You can optionally define *presets*, which are predefined values to use in the **values.yaml**. You define presets in a separate **presets.yaml** file. The presets become available when you create the cluster profile. Presets facilitate configuring the profile and avoid errors that can happen by manually editing the **values.yaml** file. Refer [Pack Presets](/registries-and-packs/pack-constraints#packpresets) for details and examples of how to define presets. - - The example below shows the parameters you can configure in the **values.yaml** for the `hello-universe` manifest when you create the cluster profile. - -
- - ![Screenshot of the configurable parameters in the values.yaml file.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-values-yaml.png ) - -
- -* **manifests** - This directory contains the manifest files for your Kubernetes application. This tutorial has only one file, **hello-universe.yaml**. Note that the **values.yaml** file has a corresponding `manifests/hello-universe` element with the same name as the YAML file. -
- -* **logo.png** - This file contains a logo that displays when you create a cluster profile. -
- - -* **README.md** - This file may contain the pack description, purpose, authors, and other relevant information. The README in the current example introduces the application used in the pack. -
- - -After finalizing all files in the pack directory, the next step is to set up a registry server and publish the pack to that registry, where you can access it directly from Palette. - -
- -# Set Up the Registry Server - -The tutorials environment already has the Spectro registry service and other necessary tools available. The following sections will guide you to start the registry server, expose the service to the external world using [Ngrok](https://ngrok.com/) reverse proxy, and log in to the registry server to push your custom add-on pack to it. - -## Start and Expose the Registry Server -Start the registry server by issuing the following command from the bash session you opened into the tutorials container. -
- -```bash -registry serve /etc/spectro/config.yml > /var/log/registry.log 2>&1 & -``` - -The registry server will start in HTTP mode (not HTTPS). Refer to the [Add a Custom Registry](/registries-and-packs/adding-a-custom-registry) guide to learn more about deploying an HTTPS registry server. - - -Next, expose the registry server to the public so that you can configure it later in Palette. Use Ngrok reverse proxy to expose the registry server listening on port 5000 via an HTTP tunnel using the following command. -
- -```bash -ngrok http 5000 --log-level debug -``` - -The command above will reserve the current bash session and display the status of each HTTP request made to the Ngrok server later in this tutorial. The screenshot below shows the registry server successfully exposed via Ngrok. - -
- -![Screenshot of registry server exposed via ngrok](/tutorials/deploy-pack/registries-and-packs_deploy-pack_ngrok-start.png ) - -
- -Verify the registry server is accessible from outside the tutorials container by visiting the `/health` endpoint. Access the *https://Your-URL-Here/health* in your host browser. Replace the base URL with the Ngrok URL output you received. You should receive a `{"status":"UP"}` response. - -
- -## Log in to the Registry Server -Once the registry server's `/health` endpoint shows `UP` status, the next step is to log in and then push the pack to it. The pack you will push is in the tutorials container. Open another bash session into the tutorials container from your local terminal. -
- -```bash -docker exec -it tutorialContainer bash -``` - -Log in to the registry server using Ngrok's public URL assigned to you. Issue the command below, but replace the URL with your Ngrok URL. The command below uses these credentials to log in to the registry server: `{username: admin, password: admin}`. -
- -```bash -spectro registry login --insecure --default --username admin --password admin \ -f59e-49-36-220-143.ngrok-free.app -``` - - - -Do not use https:// or http:// keyword in the Ngrok URL. Using either of these keywords will result in an authorization issue. - - - - -You will receive a `Login Succeeded` response upon successful login. -
- -```bash -# Output condensed for readability -WARNING! Your password will be stored unencrypted in /root/.spectro/config.json. -Login Succeeded -``` -
- - -## Push the Pack to the Registry Server -When you are logged in, push the pack to the registry server using the following command. -
- -```bash -spectro pack push /packs/hello-universe-pack/ -``` - -You can verify that the pack is in the registry by using the `ls` command. This command lists all packs in the registry. -
- -```bash -spectro pack ls -``` - -Verify the pack you pushed is listed, as shown in the screenshot below. - -
- -![Screenshot of spectro pack ls](/tutorials/deploy-pack/registries-and-packs_deploy-pack_pack-push.png) - -
- -If you need help with the Spectro CLI commands, such as deleting a pack, refer to the [Spectro CLI commands](/registries-and-packs/spectro-cli-reference#commands) guide. -
- -## Configure the Registry Server in Palette -After you push the pack to the registry server, log in to Palette and configure the registry service so that you can access it when you create your cluster profile. - - -Log in to [Palette](https://console.spectrocloud.com), and switch to the tenant admin view. -
- -![Screenshot of Palette tenant settings.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_tenant-admin.png) - -
- - -Navigate to **Tenant Settings** > **Registries** > **Pack Registries** section. Click on the **Add New Pack Registry**. Palette will open a pop-up window asking for the fields to configure a custom pack registry, as highlighted in the screenshot below. - -![A screenshot highlighting the fields to configure a custom pack registry. ](/registries-and-packs_adding-a-custom-registry-tls_certificate.png) - - -Provide the pack registry name, endpoint, and user credentials in the pop-up window. For a consistent experience in this tutorial, we suggest using the name **private-pack-registry**. Use your Ngrok URL as the pack registry endpoint. Ensure to use an "https://" prefix in the pack registry endpoint. - -In the **TLS Configuration** section, select the **Insecure Skip TLS Verify** checkbox. This tutorial does not establish a secure HTTPS connection between Palette and your pack registry server. Therefore, you can skip the TLS verification. Instead, this tutorial uses an unencrypted HTTP connection. However, in a production environment, you can upload your certificate in the **TLS Configuration** section if you need Palette to have a secure HTTPS connection while communicating with the pack registry server. - -Click on **Validate** to ensure the URL and credentials are correct, then click on **Confirm** to finish configuring the registry server. - -
- -![Screenshot of registry server edit option in Palette tenant settings.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-edit.png) - -
- - -Palette syncs the registry server periodically. However, you can sync it manually the first time you add a server by clicking the **three-dot Menu** next to the registry server name and selecting **Sync**. - -
- -![Screenshot of registry server sync in Palette](/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-sync.png) - -
- - -# Create a Cluster Profile and Deploy a Cluster - -This tutorial guides you to create a cluster profile for AWS. However, you can choose any other cloud service provider, provided you configure the following two items: -
- -* **Cloud account**: A cloud account added to your Palette project settings. - - The AWS cloud account name in this tutorial example is **spectro-cloud**. You can choose another name if desired. The screenshot below shows how to add and verify the AWS cloud account with your project. Navigate to **Project Settings** > **Cloud Accounts** > **AWS** > **Add AWS Account** in Palette. Check out the [Register and Manage AWS Accounts](/clusters/public-cloud/aws/add-aws-accounts) guide for additional help. - -
- - ![Screenshot of Cloud Accounts in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_palette-cloud-account.png) - -
- - -* **SSH key**: An SSH key created in the region where you will deploy the cluster. - - This tutorial example will deploy the cluster in the **us-east-2** region, and the SSH key name used in this example is **aws_key_sk_us_east_2**. You must choose the desired region and the available SSH key name from your AWS account. - -
- -Create a cluster profile and deploy it to a cluster using either Palette or Terraform code. -
- - - - - - -## Create a Cluster Profile -Switch to the **Default** project scope for creating a cluster profile. -
- -![Screenshot of the Palette Default scope.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_default-scope.png) - -
- -Select the **Profile** section in the left **Main Menu** to create a cluster profile that will combine the core infrastructure and add-on layers. Click on the **Add Cluster Profile** button, and provide the details in the wizard that follows. The wizard displays the following sections. -
- -### Basic Information -Use the following values in the **Basic Information** section. - -|**Field**|**Value**| -|---|---| -|Name|pack-tutorial-profile| -|Version|`1.0.0`| -|Description|Cluster profile as part of the pack tutorial.| -|Type|Full| -|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:true`| - -Click on **Next** to continue. -
- -### Cloud Type -In the **Cloud Type** section, choose AWS as the infrastructure provider for this tutorial, and click on **Next** at the bottom to move on to the next section. -
- - - -If you choose a different cloud service provider, the core infrastructure layers options, as outlined in the **Profile Layers** section below, will differ from this tutorial. - - - -
- -### Profile Layers -In the **Profile Layers** section, add the following core infrastructure layers if you have chosen the AWS cloud service provider. To deploy your resource to Azure or Google Cloud, use the core infrastructure layers outlined in [Cloud Service Provider Configurations](https://github.com/spectrocloud/tutorials/tree/main/terraform/pack-tf/README.md#cloud-service-provider-configurations). - -|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| -|---|---|---|---| -|OS|Public Repo|Ubuntu|`LTS__20.4.x`| -|Kubernetes|Public Repo|Kubernetes|`1.24.x`| -|Network|Public Repo|Calico|`3.25.x`| -|Storage|Public Repo|Amazon EBS CSI|`1.16.x`| - -As you add each layer, click on the **Next layer** button. After you add the **Storage** layer, click on the **Confirm** button to complete the core infrastructure stack. Palette displays the newly created infrastructure profile as a layered diagram. You can select any layer to make further edits or change the version if desired. - -Now you are ready to add the add-on layers. Click the **Add New Pack** button. - -Add the Spectro Proxy pack to enable a reverse proxy to connect to the cluster's API. Adding this pack is *optional*, but it will help connect your local machine to the cluster's API for debugging. -Refer to the [Spectro Proxy](/integrations/frp/) guide for more details. - -|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| -|---|---|---|---| -|Authentication | Public Repo| Spectro Proxy | `1.3.x`| - -Click on the **Confirm & Create** button to finish adding the Spectro Proxy pack. Also, add the following certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `apiServer` parameter section to configure the Spectro Proxy pack. -
- -```yaml -certSANs: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` -
- -![Screenshot of the certificate Subject Alternative Name.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-certsan.png) - -
- -Next, add the following **Hello Universe** pack. This is the custom add-on pack you defined and pushed to the **private-pack-registry** earlier in this tutorial. - -|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| -|---|---|---|---| -|App Services | private-pack-registry | Hello Universe | `1.0.x` | - - -Click on the **Confirm & Create** button to finish adding the Hello Universe pack. - - -If there are no errors or compatibility issues, Palette displays the newly created full cluster profile. Verify the layers you added, and click **Next**. - - -
- -![Screenshot of the Profile Layers success.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-layer.png) - -

- - -### Review -Review once more and click **Finish Configuration** to create the cluster profile. -
- -## Create a Cluster -From the **Profile** page, click on the newly created cluster profile to view its details page. Palette displays all the layers and allows you to edit any of them. - -Click the **Deploy** button to deploy a new cluster. The cluster deployment wizard will displays the following sections. -
- -### Basic Information -Use the following values in the first section, **Basic Information**. - -|**Field**|**Value**| -|---|---| -|Cluster name| pack-tutorial-cluster | -|Description| Cluster as part of the pack tutorial.| -|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:true`| -|Cloud Account|spectro-cloud| - -Note that the AWS cloud account name in this tutorial example is **spectro-cloud**. If you used a different cloud account name, choose the name configured in your Palette's project settings. - -Click **Next** to continue. - -
- -### Parameters -The **Parameters** section allows you to change the profile configurations. For example, clicking on the **Hello Universe 1.0.x** layer allows you to configure the `registry`, `repository`, and `tag` parameters defined in the **values.yaml** file. -
- -![Screenshot of the Cluster layers.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-layers.png) - -
- -Keep the default values and click **Next**. - -
- -### Cluster config -In the **Cluster config** section, ensure the **Static Placement** field is unchecked. If checked, the **Static Placement** will deploy the cluster in an existing VPC, and you will need the [Amazon Resource Names](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html) (ARNs) for the existing subnets, roles, and other resources. For this tutorial, we will use dynamic placement, where Palette creates a new VPC and all other resources needed for the cluster. - -For the **Region** field, select the region of your choice. The tutorial example will deploy the cluster in the **us-east-2** region. For the **SSH Key Pair Name** field, choose the SSH key pair name from the selected region. You must have an SSH key created already in the AWS region where you will deploy the cluster. - -Click **Next** to continue. - -
- -### Nodes config -In the **Nodes config** section, provide the details for the master and the worker pools. For this tutorial, you can use the following minimal configuration: - -|**Field** | **Value for the master-pool**| **Value for the worker-pool**| -|---| --- | ---| -|Node pool name| master-pool | worker-pool | -|Number of nodes in the pool| `1` | `1` | -|Allow worker capability| Checked | Not applicable | -|Enable Autoscaler | Not applicable | No | -|Rolling update | Not applicable | Expand First.
Launch a new node first, then shut down the old one. | - -Keep the **Cloud Configuration** the same for the master and worker pools. - -|**Field** | **Value**| -|---| --- | ---| -|Instance Type | General purpose `m4.xlarge`
A minimum allocation of four CPU cores are required for the master node. | -|Availability zones | Choose any *one* availability zone.
This tutorial example will deploy to the `us-east-2a` availability zone. | -|Disk size | 60 GiB | - -Click **Next** to continue. -
- -### Settings -The **Settings** section displays options for OS patching, scheduled scans, scheduled backups, and cluster role binding. Use the default values, and click on the **Validate** button. - -
- -### Review -Review all configurations in this section. The **Review** page displays the cluster name, tags, cloud account name, node pools, layers, and an estimated hourly cost. If everything looks good, click on the **Finish Configuration** button to finish deploying the cluster. Deployment may take up to *20 minutes* to finish. - -While deployment is in progress, Palette displays the cluster status as **Provisioning**. While you wait for the cluster to finish deploying, you can explore the various tabs on the cluster details page, such as **Overview**, **Workloads**, and **Events**. - -
- -
- - - -The [Spectro Cloud Terraform provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) allows you to create and manage Palette resources using Infrastructure as Code (IaC). This offers such advantages as automating infrastructure, facilitating collaboration, documenting infrastructure, and keeping all infrastructure in a single source of truth. - -## Starter Code -Navigate back to your tutorials container bash session to locate the starter Terraform files. If you have closed the terminal session, you can reopen another bash session in the tutorials container using the following command. -
- -```bash -docker exec -it tutorialContainer bash -``` - -Switch to the **/terraform/pack-tf** directory, which contains the Terraform code for this tutorial. -
- -```bash -cd /terraform/pack-tf -``` - -## Set Up the Spectro Cloud API Key - -To get started with Terraform code, you need a Spectro Cloud API key to authenticate and interact with the Palette API endpoint. To add a new API key, log in to Palette, click on the user **User Menu** at the top right, and select **My API Keys**, as shown in the screenshot below. - -
- -![Screenshot of generating an API key in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_generate-api-key.png ) - -
- -Below are the steps to add and export an API key: - - -1. Fill in the required fields, such as the API key name and expiration date, and confirm your changes. - - - -2. Copy the key value to your clipboard, and switch back to the tutorials container environment. - - - -3. Export the API key as an environment variable in the tutorials container bash session so the Terraform code can authenticate with Palette API. -
- - ```bash - export SPECTROCLOUD_APIKEY= - ``` -
- -## Review Terraform Files -Ensure you have the following files in the current working directory. -
- -```bash -. -├── profile.tf # Resource -├── cluster.tf # Resource -├── data.tf # Spectro Cloud data resources -├── inputs.tf # Input variables -├── terraform.tfvars # Variable definitions file -├── outputs.tf # Output variables -└── provider.tf # Spectro Cloud Terraform provider -``` - -Note that the Terraform code will deploy the resources to **AWS**. - -We recommend you explore all Terraform files. Below is a high-level overview of each file: -
- -- **profile.tf** - contains the configuration for the `spectrocloud_cluster_profile` resource. Review the core infrastructure layers that make up the `spectrocloud_cluster_profile` resource. - - - -- **cluster.tf** - contains the configuration for the `spectrocloud_cluster_aws` resource. The cluster resource depends upon the `spectrocloud_cluster_profile` resource. - - - -- **data.tf** - contains the configuration for the resources to retrieve data from Palette dynamically. The table below lists the pack details required for each pack layer in order to deploy the `spectrocloud_cluster_profile` resource. - - |**Pack Type**|**Registry**|**Pack Name**|**Tag**| **Version** | - |---|---|---|---| - |OS|Public Repo|`ubuntu-aws`|`LTS__20.4.x`| `20.04`| - |Kubernetes|Public Repo|`kubernetes`|`1.24.x`| `1.24.10` | - |Network|Public Repo|`cni-calico`|`3.25.x`|`3.25.0`| - |Storage|Public Repo|`csi-aws-ebs`|`1.16.x`|`1.16.0`| - - Note that using this Terraform code will deploy the resources to AWS. To deploy your resource to Azure or Google Cloud, use the layer details outlined in [Cloud Service Provider Configurations] (https://github.com/spectrocloud/tutorials/tree/main/terraform/pack-tf/README.md#cloud-service-provider-configurations). - - - -- **inputs.tf** - contains the variables used in the tutorial such as the names of cluster profile, cluster, cloud account, SSH key name, AWS region, pack name, and registry server. - - Some variables have a default value, but you *must* provide the values for `cluster_cloud_account_aws_name`, `aws_region_name`, `ssh_key_name`, and `private_pack_registry` variables. You will find a `#ToDo` tag next to each variable to update. Provide the values for these variables in a separate file, **terraform.tfvars**. Use default values for the remaining variables. - - - -- **terraform.tfvars** - contains the variable definitions. The list of variables is outlined in the code block below. You *must* specify the values for all variables that are marked `"REPLACE ME"`. Read the inline comments below to understand each variable. - - - For example, the value for `cluster_cloud_account_aws_name` will be the name of the cloud account added to your Palette project settings. In this tutorial example, the cloud account name is **spectro-cloud**. - - - For `aws_region_name`, you can choose any [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) for your deployment. This tutorial example uses **us-east-2** region. - - - The value for `ssh_key_name` will be the name of the SSH key available in the region where you will deploy the cluster. The SSH key name used in this example is **aws_key_sk_us_east_2**. - - - Lastly, provide your registry server name for the `private_pack_registry` variable. You can provide the **private-pack-registry** as the value if you have followed the same naming convention as this tutorial. -
- - ```bash - cluster_cloud_account_aws_name = "REPLACE ME" # Name of the cloud account added to your Palette project settings - aws_region_name = "REPLACE ME" # Use "us-east-2" or any other AWS region - ssh_key_name = "REPLACE ME" # Name of the SSH key available in the region where you will deploy the cluster - private_pack_registry = "REPLACE ME" # Your registry server name. This tutorial uses "private-pack-registry". - ``` - - - -- **outputs.tf** - contains the output variables to expose information. - - - -- **provider.tf** - contains the provider configuration and version. - -
- -## Deploy Terraform -After you update the **terraform.tfvars** file and carefully review the other files, initialize the Terraform provider. -
- -```bash -terraform init -``` - -The `init` command downloads plugins and providers from the **provider.tf** file. Next, preview the resources Terraform will create. -
- -```bash -terraform plan -``` - -The output displays the resources Terraform will create in an actual implementation. -
- -```bash -# Output condensed for readability -Plan: 2 to add, 0 to change, 0 to destroy. -``` - -Finish creating all the resources. -
- -```bash -terraform apply -auto-approve -``` - -It can take up to 20 minutes to provision the cluster. When cluster provisioning completes, the following message displays. -
- -```bash -# Output condensed for readability -Apply complete! Resources: 2 added, 0 changed, 0 destroyed. -``` - -You can observe the cluster deployment progress in Palette by navigating back to Palette. -
- - -## Check the In-Progress Deployment -Log into the [Palette](https://console.spectrocloud.com/), and navigate to the **Profile** section in the left **Main Menu**. If the Terraform deployment is successful, the newly created cluster profile is displayed as shown in the screenshot below. - -
- -![Screenshot of the successful Profile in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_verify-profile.png) - -
- -
- -
- -
- -# Validate -In Palette, navigate to the left **Main Menu** and select **Clusters**. Next, select your cluster to display the cluster Overview page and monitor cluster provisioning progress. - -
- -![Screenshot of the cluster health.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-health.png) - -
- -When cluster status displays **Running** and **Healthy**, you can access the application from the exposed service URL with the port number displayed. For the Hello Universe application, port 8080 is exposed. Click on the URL to access the application. -
- - - -We recommend waiting to click on the service URL, as it takes one to three minutes for DNS to properly resolve the public load balancer URL. This prevents the browser from caching an unresolved DNS request. - - - -
- -![Screenshot of the successful accessing the application using the load balancer URL.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_success.png) - -
- -You can also look at real-time metrics, such as CPU and memory consumption, in the cluster's **Overview** tab in Palette. - -
- -![Screenshot of the cluster metrics.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-metrics.png) - -
- -Using your custom pack in the cluster, you have successfully deployed the Hello Universe application to the cluster. - -
- -# Cleanup -Delete the cluster, cluster profile, and registry server, and remove the registry service configuration from Palette's settings. - -The following steps will guide you in cleaning up your environment. Follow the steps for Palette if you used Palette to deploy the cluster. Use Terraform commands to delete the cluster if you used Terraform for deployment. - -
- - - - - -
- -## Delete the Cluster and Profile using Palette -Navigate to the **Cluster** section in Palette's left **Main Menu**, and view the details page of the **pack-tutorial-cluster**. To delete the cluster, click on the **Settings** button to expand the **drop-down Menu**, and select the **Delete Cluster** option. Palette prompts you to enter the cluster name and confirm the delete action. Type the cluster name to proceed with the delete step. - -
- -![Screenshot of deleting the cluster in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-cluster.png) - -
- -The cluster status displays **Deleting**. Deletion takes up to 10 minutes. -
- - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for force deletion. Navigate to the cluster's details page and click on **Settings**. Select **Force Delete Cluster**. Palette automatically removes clusters that are stuck in the cluster deletion phase for over 24 hours. - - -
- -After you delete the cluster, go ahead and delete the profile. From the left **Main Menu**, click **Profiles** and select the profile to delete. Choose the **Delete** option in the **three-dot Menu**. - -
- -![Screenshot of deleting the profile in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-profile.png) - -
- -Wait for the resources to clean up and ensure they are successfully deleted. - -
- -
- - - -
- -## Delete the Cluster and Profile using Terraform -If you've used Terraform to deploy the cluster, switch back to the tutorials container, and issue the following command from within the **/terraform/pack-tf** directory: -
- -```bash -terraform destroy -auto-approve -``` - -Wait for the resources to clean up. Deleting the Terraform resources may take up to 10 minutes. -
- -```bash -# Output condensed for readability -Destroy complete! Resources: 2 destroyed. -``` - -
- -
- -
- -
- -## Delete the Registry Server -After deleting the cluster and cluster profile, navigate to **Tenant Settings** > **Registries** > **Pack Registries** to delete the registry service configuration from Palette. -
- -![Screenshot of registry server delete in Palette](/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-delete.png) - -
- -Stop the registry server by closing the tutorials container bash session that serves the Ngrok reverse proxy server. At this point, you can close all the bash sessions. To remove the container and the image from the local machine, issue the following commands: -
- -```bash -docker container rm --force tutorialContainer -docker image rm --force ghcr.io/spectrocloud/tutorials:1.0.3 -``` - -
- - -# Wrap-Up - -In this tutorial, you learned how to create a custom pack using manifest files. You packaged up an application in a custom pack that you pushed to a private registry server and added to Palette. - -Next, you created a cluster profile that included all the core infrastructure layers, such as the OS, Kubernetes distribution, and more. You also added your custom pack to the cluster profile so your application could be deployed to a Kubernetes cluster. - -Packs are the building blocks of cluster profiles, allowing you to customize your Kubernetes clusters. Palette enables you to use different packs to create multiple cluster profiles, each for specific purposes. As a result, you can ensure all Kubernetes deployments contain all the required dependencies and applications without developing complicated deployment scripts. All you need to do is maintain the cluster profiles. - -To learn more about packs in Palette, we encourage you to check out the reference resources below. -
- -- [Custom OS Pack](/registries-and-packs/add-custom-packs#addinganospack) - - -- [Add-on Packs](/registries-and-packs/adding-add-on-packs) - - -- [Pack Constraints](/registries-and-packs/pack-constraints) - -

\ No newline at end of file diff --git a/content/docs/09-registries-and-packs/5-pack-constraints.md b/content/docs/09-registries-and-packs/5-pack-constraints.md deleted file mode 100644 index f298477954..0000000000 --- a/content/docs/09-registries-and-packs/5-pack-constraints.md +++ /dev/null @@ -1,681 +0,0 @@ ---- -title: "Pack Constraints" -metaTitle: "Pack Constraints" -metaDescription: "Description of pack constraints and their usages within Spectro Cloud" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -Pack constraints are a set of rules defined at the pack level to validate the packs for a Profile or a Cluster *before* it gets created or updated. Packs must be validated before the cluster is submitted to ensure a successful deployment. - -
- - - -You can find information about the JSON schema for the pack metadata file in the [JSON schema](/registries-and-packs/add-custom-packs#jsonschema) section of the documentation. - - - - -# Pack Values Constraints - -A Spectro Pack currently supports various configurations through a configuration file called `values.yaml`. The values defined in the config file are applied while deploying the Kubernetes cluster. The values defined in the pack are default values and can be overridden in the Cluster Profile or during the Cluster deployment. - -Since the default pack values can be overridden, users may inadvertently set incorrect values leading to cluster deployment failure. These failures can occur at any point during the cluster deployment process. If the system is capable of detecting invalid pack values before the cluster is submitted for deployment, then deployment failures can be overcome to some extent. - -Pack value constraints are additional information provided through a template file called `schema.yaml` in the pack. They define the schema format of the pack values. The pack constraints framework auto-checks for any schema constraints defined in the pack and validates the pack values. This checking occurs while creating or updating Cluster Profiles and Clusters. - -## Schema Constraints - -Every schema constraint consists of a key name and the schema template. The key name must be the complete path of the parameter which is defined in the config file. - -**Required** - -Defines whether the pack value is optional or required. - -```bash -registry.hostname: - schema: '{{ required }}' -``` - -**Readonly** - -The pack value is not editable if marked as readonly. - -```bash -registry.hostname: - schema: '{{ readonly }}' -``` - - -**Format** - -Defines the pack value format: the value is valid only when the value format matches the format defined in the pack. - -**Format Syntax** - -A format template consists of one or more format types along with the optional regex and number range values. - -```bash -registry.hostname: - schema: '{{ required | format "${FORMAT_TYPE:/REGEX/ OR [NUMBER RANGE] OR [LIST_OPTIONS]}" }}' -``` - - - -The syntax of the regex accepted is the same general syntax used by Perl, Python, and other languages. More precisely, it is the syntax accepted by RE2 and described [here](https://golang.org/s/re2syntax). - - - -**Format Types** - - - - - -The string format type checks if the input value is a string and supports the regex in the template. If regex is specified in the template then the input value must match the regex. - -```bash -registry.hostname: - schema: '{{ format "${string}" }}' -registry.hostname: - schema: '{{ format "${string:/^([a-z0-9].*)$/}" }}' -``` - - - - - -The number format type checks if the input value is a number, and supports the regex and the number range in the template. - -```bash -registry.port: - schema: '{{ format "${number}" }}' -registry.port: - schema: '{{ format "${number:[5000-5005]}" }}' -registry.port: - schema: '{{ format "${number:/^(500[0-5])$/}" }}' -``` - - - - - -The bool format type checks if the input value is true or false. - -```bash -registry.private: - schema: '{{ format "${boolean}" }}' -``` - - - - - -The password format is a string type with masked values in the pack parameters of Cluster profiles and Clusters. - -```bash -registry.password: - schema: '{{ format "${password}" }}' -registry.password: - schema: '{{ format "${password:/^([a-z0-9].*)$/}" }}' -``` - - - - - -The list format checks if the input value matches with any of the options specified in the template. - -```bash -registry.type: - schema: '{{ format "${list:[PACK,GIT,CHART]}" }}' -``` - - - - - -The ipv4 format type checks if the input value is a valid ipv4. - -```bash -registry.hostIp: - schema: '{{ format "${ipv4}" }}' -``` - - - - - -The version format type checks if the input value is a semantic version. - -```bash -registry.version: - schema: '{{ required | format "${version}" }}' -``` - - - - - -Hints are optional short descriptions of the parameter. If defined in the schema template, these descriptions are visible in the UI while configuring the pack parameters in the Profile or the Cluster. One or more descriptions can be combined by using the pipe(|) separator. - -```bash -registry.type: - schema: '{{ hints "description A" "description B" }}' -``` - - - - - - - -**Examples** - -Schema constraints can be combined to support multiple validations using a single template. - - - - - -```bash -registry.addresses.$[]: - schema: '{{ required | format "${ipv4} - ${ipv4}" | hints "ip pool range"}}' -``` - -`registry.addresses.$[]` is an array data type in the config file. The schema template defines that the value is required and the format must match - `${ipv4} - ${ipv4}` - -**Examples**: - -10.10.10.10 - 10.10.10.255 → valid - -10.10.10.10 → invalid - -10.10.10.10-10.10.10.255 → invalid - - - - - -```bash -storageType: - schema: '{{ required | format "${string}, ${string:/size=\d+/}" }}' -``` - -**Examples**: - -type-zeroedthick, size=150 → valid - -type-zeroedthick, size=150 → invalid - -type-zeroedthick, size=s → invalid - - - - - -# Pack Dependency Constraints - -Spectro Cloud provides the flexibility to choose any pack of any version in the profile. Clusters are deployed based on the packs selected in the profile. While this works for most of the cases, it is sometimes required to select a minimum or maximum pack version, or to have dependencies between the packs to ensure the Kubernetes cluster is deployed successfully as desired. - -Pack dependency constraints are the rules defined in the pack metadata file `pack.json`. They are used to define the minimum and maximum supported versions, and also to specify which pack is required or not supported. The pack constraints framework auto-checks for any schema constraints defined in the pack and validates the pack values. This checking occurs while creating or updating Cluster Profiles and Clusters. - -## Pack metadata JSON - -Pack dependency constraints must be defined in the `pack.json` file. The sample pack metadata shown below defines the dependencies under `constraints` key. - -```json -{ - "addonType": "system app", - "cloudTypes": [ - "all" - ], - "displayName": "Test Pack", - "kubeManifests": [], - "layer": "addon", - "name": "pack-constraints-test", - "version": "1.0.0", - "constraints": { - "dependencies": [ - { - "packName": "vault", - "minVersion": "0.6.0", - "maxVersion": "", - "type": "optional" - }, - { - "packName": "csi-vsphere-volume", - "minVersion": "1.0.0", - "maxVersion": "", - "type": "notSupported" - }, - { - "packName": "kubernetes", - "minVersion": "1.17.0", - "maxVersion": "1.18.6", - "type": "required" - } - ] - } -} -``` - - - - - -If the minimum and maximum versions are not mentioned, the validation is skipped. - - - -## Pack Dependency Attributes - - - - - -Name of the dependent pack. - -**Example**: In the example, the three dependent packs are identified by unique pack names such as `vault`, `csi-vsphere-volume`, and `kubernetes`. - - - - - -Minimum supported dependent pack version, any version below the minimum version is not valid. - -**Example**: pack `pack-constraints-test` must require pack `vault` of min version `0.6.0`. - - - - - -Maximum supported dependent pack version, any version above the maximum version is not valid. - -**Example**: pack `pack-constraints-test` must require pack `kubernetes` of min version `1.18.6`. - - - - - - - - - -The dependent pack is optional but validates minimum or maximum versions if the pack is selected. - -**Example**: `vault` pack is optional. - - - - - -The dependent pack is mandatory and must contain a version within the minimum or maximum supported versions, if defined. - -**Example**: `kubernetes` pack must be required of min version `1.17.0` and max version `1.18.6`. Any Kubernetes version below `1.17.0` and above `1.18.6` is not valid. - - - - - -Pack versions within the range of the mentioned minimum and maximum (including the minimum and maximum) are not supported. - -**Example**: `csi-vsphere-volume` pack is not supported if the version selected falls within the min and max versions. - - - -If the minimum and maximum versions are not mentioned, the validation is skipped and all versions are allowed. - - - - - - - - - - - -# Pack Resource Constraints - -A successful Kubernetes Cluster deployment is possible only when the cluster has sufficient hardware requirements. We consider the CPU, Memory, and Disk size as the hardware requirements. The minimum resource requests can be varied depending on the workload to be deployed in the cluster. Spectro Cloud users are allowed to select the desired instance type, and the disk size while configuring the machine pool in the Cluster deployment procedure. If the user selects the instance type which does not satisfy the minimum CPU or Memory or Disk size requirements, then there is a high probability that the cluster deployment may not succeed due to insufficient CPU or Memory or Disk size. - -Pack Resource Constraints are a set of rules defined in the pack metadata `pack.json` to specify the minimum CPU, Memory, and Disk size requirements. The pack constraints framework auto-checks the resource constraints and validates the user-selected instance type specifications before the cluster is submitted for deployment. The total input resource capacity is evaluated against the machine pool size with the actual hardware specifications of a selected instance type. - -## Pack metadata JSON - -Pack resource constraints must be defined in the `pack.json` file. The sample pack metadata is shown below to define the `resources` under `constraints` key. - -```json -{ - "addonType": "system app", - "cloudTypes": [ - "all" - ], - "displayName": "Test Pack", - "kubeManifests": [], - "layer": "addon", - "name": "pack-constraints-test", - "version": "1.0.0", - "constraints": { - "resources": [ - { - "type": "cpu", - "minLimit": 2000, - "components": [ - { - "resourceRequestParamRef": "requests.cpu", - "replicaCountParamRef": "replicas", - "scheduleType": "all" - } - ] - }, - { - "type": "memory", - "minLimit": 2048, - "components": [ - { - "resourceRequestParamRef": "requests.memory", - "replicaCountParamRef": "replicas", - "scheduleType": "worker" - } - ] - }, - { - "type": "diskSize", - "minLimit": 10 - } - ] - } -} -``` - -## Pack Resources Attributes - - - - - -The type of resource - -* cpu -* memory -* diskSize - - - - - -The minimum limit of the resource will be considered during the machine pool validation. The resource limit value is required to have the below unit depending on the resource type. Any change of unit will cause inaccurate computation of the total minimum requirement. - -* cpu - millicore (m) -* memory - Mibibyte (Mi) -* diskSize - Gigabyte (GB) - - - - - -The minLimit is the minimum resource requirement for each worker pool in the cluster. This value is sufficient for the basic resource validation, but in some cases where the pack contains one or more associated components, then each component can define its CPU or memory resource requests in the config file `values.yaml`. In this case, a single `minLimit` value is not sufficient as the minimum requirements can be different for each component. - - - -If the components are defined then `minLimit` is ignored during resource validation. - - - -The `components` field is an array of the component which consists of these attributes. - - - - - -Resource requests and limits can be defined in the pack `values.yaml`. It is required for the pack constraints framework to know the parameter name from where the resource request value can be read during the resource validation. So, the `resourceRequestParamRef` is the configuration parameter name of the resource request defined in the `values.yaml`. - - - - - -The Kubernetes pod can run in one or more replicas based on the replica count configured in the `values.yaml` file. The resource request values defined in `values.yaml` are for one replica, and the requests must be multiplied by the number of replicas which gives the actual minimum requirement. So, the `replicaCountParamRef` is the configuration parameter name of the replica count defined in the `values.yaml` - - - - - -Kubernetes provides a way to schedule the pods on master/worker nodes or both. Pack Constraints framework must know where the pods are scheduled because the resource validation validates only the master machine pool when the pods are scheduled on master nodes. Similarily, if the pods are scheduled on worker nodes, then only the worker machine pool will be validated. In the case of daemon sets, the pods are scheduled in both master and worker nodes, and the framework validates both master and worker machine pool configurations before the cluster is submitted for deployment. - -* master - pods are scheduled only on master nodes -* worker - pods are scheduled only on worker nodes -* all - pods are scheduled on both master and worker nodes - - - - - - - - - -# Pack Presets - -Pack Presets are the predefined values in a file called `presets.yaml` in the pack. It contains an array of the presets for the pack, and is visible in the pack parameters of the Cluster profile and the Cluster. Users can select any preset from the available pack presets, and the predefined values of a selected preset are applied automatically by the Spectro Cloud UI. Presets make pack configuration much easier as multiple pack values are updated at a time and the user does not need to understand all the configuration parameters which get changed depending on various factors. - -## Presets Metadata YAML - -This `presets.yaml` shows two presets - -* `privatePackRegistry` -* `publicPackRegistry` - -with a different set of pre-defined values. - -```bash -presets: -- name: "privatePackRegistry" - displayName: "Private Registry" - group: "registry" - remove: ["registry.ingress.host"] - add: | - registry: - private: true - type: "PACK" - host: - ip: "127.0.0.1" - port: 5000 -- name: "publicPackRegistry" - displayName: "Public Registry" - group: "registry" - remove: ["registry.ingress.host"] - add: | - registry: - private: false - type: "PACK" - host: - ip: "13.233.2.255" - port: 80 -``` - -## Preset Attributes - - - - - -*Name of the preset.* It must be unique. - - - - - -*Name of the preset.* It is visible in the parameters configuration - - - - - -*An array of parameter names.* These are removed from the pack values when a preset is selected. - - - - - -*A set of values in YAML format.* These are added/updated in the pack values when a preset is selected. - - - - - -One or more presets can be categorized into a common group, but only one preset can be selected from the same group of presets. - - - - - -# Pack Macros - -Pack macros are the variables defined in the Cluster profile or in Cluster pack values, and these variables are resolved only at the cluster deployment time. - -## Types of Macros - - - - - -System macros are variables defined by the system. Users are allowed to use these variables and the system is capable of resolving all the variables to values at the time of cluster deployment. - - - - - -```bash -user: - name: "{{ .spectro.system.[VARIABLE_NAME] }}" -``` - -### Supported Variables - - -| Macro | Description | -|-------|-------------| -| `{{.spectro.system.user.name}}`| The name of the user currently logged in. | -| `{{.spectro.system.user.uid}}` | The unique identifier of the user currently logged in. | -| `{{.spectro.system.user.email}}` | The email address of the user currently logged in. | -| `{{.spectro.system.tenant.uid}}` | The unique identifier of the current tenant. | -| `{{.spectro.system.project.name}}` | The name of the project. | -| `{{.spectro.system.project.uid}}` | The unique identifier of the project. | -| `{{.spectro.system.clusterprofile.name}}`| The name of the cluster profile associated with the current project. | -| `{{.spectro.system.clusterprofile.uid}}` | The unique identifier of the cluster profile the pack is part of. | -| `{{.spectro.system.clusterprofile.version}}`| The current version of the cluster profile the pack is part of.| -| `{{.spectro.system.cluster.name}}` | The name of the cluster. | -| `{{.spectro.system.cluster.uid}}` | The unique identifier of the cluster. | -| `{{.spectro.system.cloudaccount.name}}` | The name of the cloud account associated with the current project. | -| `{{.spectro.system.cloudaccount.uid}}` | The unique identifier of the cloud account associated with the current project. | -| `{{.spectro.system.kubernetes.version}}` | The version of Kubernetes currently running on the cluster. | -| `{{.spectro.system.reverseproxy.server}}` | The hostname of the reverse proxy server. | -| `{{.spectro.system.reverseproxy.port}}` | The port number of the reverse proxy server. | -| `{{.spectro.system.reverseproxy.protocol}}` | The protocol used by the reverse proxy server, either HTTP or HTTPS. | -| `{{.spectro.system.reverseproxy.vhostport}}` | The port number used by the virtual host on the reverse proxy server. | -| `{{.spectro.system.cloud.type }}` | The type of cloud provider being used, such as AWS, GCP, Azure or other providers. | -| `{{.spectro.system.cloud.region }}` | The region where the cloud resources are located. | -| `{{.spectro.system.clusterprofile.infra.name}}` | The name of the cluster profile. | -| `{{.spectro.system.clusterprofile.infra.uid}}` | The unique identifier of the cluster profile. | -| `{{.spectro.system.clusterprofile.infra.version}}` | The version of the cluster profile. | -| `{{.spectro.system.cluster.kubevip}}`| The IP address of the virtual IP (VIP) assigned to the cluster and load balancer for the control plane. This macro is only available for Edge and vSphere cluster deployments. | - - - - - -```bash -user: - name: "{{ .spectro.system.user.name }}" - uid: "{{ .spectro.system.user.uid}}" -``` - - - - - - - - - -Pack reference macros are custom variables that must be defined in a pack and then can be used as a variable in any pack. If the variable is not defined with a value, then the default value is applied, if specified. If the default value is not specified, then the variable will be resolved to an empty value. - - - - - -```bash -k8s: - version: "{{ .spectro.pack.[PACK_NAME].[VARIABLE_NAME] }}" -``` - -`PACK_NAME` - the name of the pack where the variable is defined - -`VARIABLE_NAME` - the fully qualified name of the variable defined in the pack - - - - - -Referencing Kubernetes pack variable version in CentOS pack values: - -centos values.yaml - -```bash -k8s: - version: "{{ .spectro.pack.kubernetes.version }}" -``` - -kubernetes values.yaml - -```bash -version: 1.18.0 -``` - - - - - - - - - -## Additional Capabilities - -### Sprig Template Functions - -Users are allowed to use the [sprig template functions](http://masterminds.github.io/sprig/) to modify the resolved variable value. - -**Examples** - -```bash -user: - name: "{{ .spectro.system.user.name | upper }}" -``` - -`upper` - sprig template function which converts resolved user name to upper case - -### How to set the default value? - -```bash -k8s: - version: "{{ .spectro.pack.kubernetes.version | default \"1.19.0\"}}" -``` - - - -If the variable `version` is not defined in the pack `kubernetes`, then the default value `1.19.0` will be applied at deployment. In case the default value is not specified then the empty value will be applied. - - diff --git a/content/docs/09-registries-and-packs/6-helm-charts.md b/content/docs/09-registries-and-packs/6-helm-charts.md deleted file mode 100644 index 9e5f063c20..0000000000 --- a/content/docs/09-registries-and-packs/6-helm-charts.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: "Helm Registries" -metaTitle: "Helm Registries" -metaDescription: "Learn how to add your own Helm Registries to Palette" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - - -# Palette Helm Registry -Helm Charts are a collection of Kubernetes resource files capable of deploying services of varying complexity. Palette provides some stable default Helm charts in its public Helm Chart Registry. - -Palette also supports creating custom Helm registries. You can add your own public or private Helm registries to Palette's Helm registry. - -The Helm Chart registry synchronizes charts with Palette, so you can use them when you create cluster profiles. - -# Prerequisite -For security, Palette requires Helm OCI version 3.7.0 and higher. - -# Add a Helm Chart Registry to Palette - -To add your private Helm Chart registry to Palette: - -1. Log in to Palette as a Tenant administrator. - -2. From the **Main Menu** navigate to **Tenant Settings > Registries**. - -3. From the **Helm Registries** tab, click **Add New Helm Registry** and type the registry name and endpoint. If the registries list is long, you may need to scroll down to see the Add link. - -4. Type the name of your registry and its endpoint. - -5. Choose **Protected** mode based on whether your network is private or public: - - - * Toggle **Protected** mode to *on* if your Helm registry is deployed in a private network. Palette downloads and deploys charts from protected chart registries instead of scanning a private network for them. - - When your registry is protected: - - * Charts are not synchronized with Palette, and you must type Helm chart names and versions when you create Cluster Profiles. - * The **Last Synced** column in the **Helm Registries** tab displays *n/a*. -
-
- - * Leave **Protected** mode toggled *off* if your Helm registry is deployed in a public network. We refer to Helm registries with this option disabled as being unprotected. - - When your registry is unprotected: - - * Palette synchronizes Helm charts with the console so you can select charts and versions from drop-down menus. - * The **Last Synced** column in the **Helm Registries** tab displays the date that charts were last synchronized in Palette. -
- -6. If you haven’t set up credentials for your registry, leave **No Authentication** toggled *on*. - - If your registry has credentials, toggle **No Authentication** to *off* and type the registry **Username** and **Password**. -
- -7. Confirm your changes. - -Your chart is now deployed in Palette's Helm Chart registry and is ready to model in cluster profiles. - -# Validate - -You can find your Helm registry listed in the **Helm Registries** tab in **Tenant Settings > Registries**. Use charts from Helm registries in your cluster profiles. - -The following applies when adding Helm charts to cluster profiles. - -* When using charts from protected registries, you must type the chart name and version you want to use. These must match the name and version hosted in the Helm registry. -* For unprotected Helm registries, charts are synchronized in Palette, which allows you to select them from lists and dropdowns. - -# Resources - -[Create Cluster Profiles](https://docs.spectrocloud.com/cluster-profiles/task-define-profile) - -
- diff --git a/content/docs/09-registries-and-packs/6-oci-registry.md b/content/docs/09-registries-and-packs/6-oci-registry.md deleted file mode 100644 index d052cdeafb..0000000000 --- a/content/docs/09-registries-and-packs/6-oci-registry.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: "OCI Registry" -metaTitle: "Spectro Cloud OCI Registry" -metaDescription: "creation and usages of OCI Registry within Spectro Cloud" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Overview -Palette supports OCI registries to serve the “filesystem bundle” unpacked on disk as helm registries. Helm charts hosted in OCI registries can be added to cluster profiles and deployed to Kubernetes clusters. We support all OCI complaint registries. - -# Setup OCI Registry: - -* Login as **Tenant Admin**. - - -* Click **Registries** to open **Manage Registries**. - - -* Select the **OCI Registries** tab and click **Add New OCI Registry** button. - - -* Provide the following information to the Add OCI registry wizard: - * Name: An unique registry name - * Provide registry endpoint - * OCI Authentication type: Basic or ECR - * Provide authentication details based on the authentication type selected - - -* Click on **Confirm** to complete the registry creation process. - - -* Once the registry is created, and charts are added, they can be [attached as part of an add-on cluster profile](/registries-and-packs/oci-registry#useyourociregistry). - -# BASIC Authentication of Azure Container Registry - -Palette supports basic authentication for [Azure Container Registry](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-portal?tabs=azure-cli). Azure Container Registry is a private registry service for building, storing, and managing container images and related artifacts. - -## Pre-requisite - -In Azure portal: - - * Create Azure Container Registry. - - - * Go to Azure Container Registry, select AccessKeys and enable AdminUser to generate the password. - -## How to authenticate: - - * Go to Palette Console,Create OCI Registry providing the following details: - - * EndPoint : Azure Container Registry Details - Login server End Point - * Username : Azure Container Registry UserName - * Password : Azure Container Registry Password - * Authentication type : Basic - -# Amazon ECR Authentication - -Choose among one of the following ECR protection modes: -* Un-protected Mode: No credentials required. - - -* Protected Mode: Toggle the “protected” button for protected registry creation and authenticate the AWS account using credentials or STS. - * For the credentials method of authentication, use the Access Key and Secret Access Key of the role created and validate the credentials. - * For STS, use the unique ARN of the AWS role and validate. - - -To provision ECR based OCI Authentication make sure that the User's STS Role has the ECR policy configured. - - -## ECR Policy: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ecr-public:DescribeRegistries", - "ecr:DescribeImageReplicationStatus", - "ecr:ListTagsForResource", - "ecr:ListImages", - "ecr:DescribeRepositories", - "ecr:BatchCheckLayerAvailability", - "ecr:GetLifecyclePolicy", - "ecr-public:DescribeImageTags", - "ecr-public:DescribeImages", - "ecr:GetRegistryPolicy", - "ecr-public:GetAuthorizationToken", - "ecr:DescribeImageScanFindings", - "ecr:GetLifecyclePolicyPreview", - "ecr:GetDownloadUrlForLayer", - "ecr-public:GetRepositoryCatalogData", - "ecr:DescribeRegistry", - "ecr:GetAuthorizationToken", - "ecr-public:GetRepositoryPolicy", - "ecr-public:DescribeRepositories", - "ecr:BatchGetImage", - "ecr:DescribeImages", - "ecr-public:GetRegistryCatalogData", - "ecr-public:ListTagsForResource", - "ecr-public:BatchCheckLayerAvailability", - "ecr:GetRepositoryPolicy" - ], - "Resource": "*" - } - ] -} -``` -## Multi-Region Support for AWS ECR registries: - -Palette supports the parameterization of AWS ECR registry endpoint to support cross-region replicated registries. For performance considerations, Helm chart content may be replicated across multiple AWS regions and served to the clusters from within the region of cluster deployment. To support this, the variable “{{.spectro.system.aws.region}}” can be used in the registry endpoint. This variable is substituted at the time of cluster deployment with the region selected for deployment. - - -**Region Parameter:** - -```json -{{.spectro.system.aws.region}} -``` -**Endpoint format:** - -```json -.dkr.ecr.{{.spectro.system.aws.region}}.amazonaws.com -``` -**A sample Endpoint:** - -```json -214575254960.dkr.ecr.{{.spectro.system.aws.region}}.amazonaws.com -``` -Specify a default region to fall back to when the deployment region does not contain the requested helm chart. -(Eg:, Default region: us-west-1) - -# Use Your OCI Registry -Charts from the OCI registry can be used in your **Add on** cluster profiles as follows: -* From the Repository menu, select the desired OCI registry. - - -* Key in the required chart name and version. The name and version should exactly match the chart name and version hosted in the OCI registry. - - -* Click done to get your OCI-helm layer added to the cluster profile. - diff --git a/content/docs/09-registries-and-packs/7-advanced-configuration.md b/content/docs/09-registries-and-packs/7-advanced-configuration.md deleted file mode 100644 index 2ebfccbd99..0000000000 --- a/content/docs/09-registries-and-packs/7-advanced-configuration.md +++ /dev/null @@ -1,240 +0,0 @@ ---- -title: 'Advanced Configuration' -metaTitle: 'Advanced Configuration' -metaDescription: 'Learn how to apply advanced concepts by customizing the deployments of the Packs registry.' -icon: '' -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Advanced Configuration - -You can modify the deployment of the pack registry by providing a YAML configuration file. You can also override default configuration options through the usage of environment variables. - -The configuration file is divided into keys and values. The following is an example of a YAML configuration. - -
- -```yaml -version: 0.1 -log: - level: info - fields: - environment: test -``` - - -The key `version` has a number value. The `log` key has a value with multiple keys, which in turn have more keys. - -To override the value of `log.level` you can specify an environment variable named -`REGISTRY_LOG_LEVEL`. - -
- -```shell -export REGISTRY_LOG_LEVEL=debug -``` - -
- -## Default Configuration - -The docker image for the registry contains the following default configuration values. - -
- -```yaml -version: 0.1 -log: - fields: - service: registry -storage: - cache: - blobdescriptor: inmemory - filesystem: - rootdirectory: /data/.spectro-server -http: - addr: :5000 - headers: - X-Content-Type-Options: [ nosniff ] - Strict-Transport-Security: [ max-age=63072000; includeSubdomains; preload ] - Content-Security-Policy: [ img-src 'self'; script-src 'self'; style-src 'self ] - X-Frame-Options: [ DENY ] - X-XSS-Protection: [ 1; mode=block ] - Referrer-Policy: [ same-origin ] -auth: - htpasswd: - realm: basic-realm - path: /auth/htpasswd-basic -``` - -The server is started with the command `registy serve /etc/spectro/config.yml`. -You can override the default values with specific values through environment -variables, or you can use your own configuration file. - -For example, you can start the docker container image with the following environment by using the -variables to override the basic auth realm and logging level. In the following example, the `-e` flag is used to provide environment variables to the container. - -
- -```bash -docker run -d \ - --rm \ - --port 443:5000 \ - --name spectro-registry\ - --volume $(pwd)/spectropaxconfig/:/etc/spectropaxconfig/ - -e REGISTRY_LOG_LEVEL=debug \ - -e REGISTRY_AUTH=htpasswd \ - -e REGISTRY_AUTH_HTPASSWD_REALM="My Enterprise Realm" \ - gcr.io/spectro-images-public/release/spectro-registry:3.4.0 -``` - -Alternatively, you can start the container by mounting a directory with a new configuration file and pointing the server command to the configuration file. - -
- -```shell -docker run -d \ - --rm \ - --port 443:5000 \ - --name spectro-registry \ - --volume $(pwd)/myconfig.yml:/etc/myconfig.yml \ - gcr.io/spectro-images-public/release/spectro-registry:3.4.0 \ - serve /etc/spectropaxconfig/myconfig.yml -``` -## Storage Backend - -The pack registry can store data on a file system through a mounted -volume, or you can specify object storage such as AWS S3. - -The following is an example of a configuration using a file system backend. - -```yaml -storage: - cache: - blobdescriptor: inmemory - filesystem: - rootdirectory: /tmp/registry/data/.spectro-server -``` - -If you are using S3 Storage, ensure you specify the required S3 parameters. - -
- -```yaml -storage: - cache: - blobdescriptor: inmemory - s3: - region: us-east-1 - bucket: my-bucket - rootdirectory: /registry - encrypt: true|false - secure: false|true - accesskey: SAMPLEACCESSKEY - secretkey: SUPERSECRET - host: OPTIONAL_MINIO_HOST_IF_USING - port: OPTIONAL_MINIO_PORT_IF_USING -``` - -You can also use ephemeral storage. We recommend using ephemeral storage for testing purposes. Production environments should use object storage or a file system. - -
- -```yaml -storage: inmemory -``` - -## Authentication - -You can configure basic HTTP Auth. Basic Auth requires providing the pack registry server with an httppasswd file containing the credentials. - -
- -```yaml -auth: - htpasswd: - realm: basic-realm - path: /auth/htpasswd-basic -``` - -## HTTP - -The following options are available for modifying HTTP transport: - -### Server and Port - -For serving content on all interfaces on port 5000: - -
- -```yaml -http: - addr: :5000 -``` -Alternatively, the server can bind to a single IP and different port: - -
- -```yaml -http: - addr: 192.168.122.77:25000 -``` -### HTTP Headers - -The following headers are the default, and can be overridden: - -
- -```yaml -http: - headers: - X-Content-Type-Options: [ nosniff ] - Strict-Transport-Security: [ max-age=63072000; includeSubdomains; preload ] - Content-Security-Policy: [ img-src 'self'; script-src 'self'; style-src 'self ] - X-Frame-Options: [ DENY ] - X-XSS-Protection: [ 1; mode=block ] - Referrer-Policy: [ same-origin ] -``` -### TLS - -TLS can be configured using [Let's Encrypt](https://letsencrypt.org) or custom TLS certificates: - -When using Let's Encrypt, your registry server must be assigned to a public IP address accessible for HTTP-based validation by the Let's Encrypt services. Check out the [Deploy Pack Registry Server with Let's Encrypt](/registries-and-packs/adding-a-custom-registry#deploypackregistryserverwithletsencrypt) guide to learn more. - -
- -```yaml -http: - addr: :5000 - tls: - letsencrypt: - cachefile: le-cache - email: oz@spectrocloud.com - hosts: - - pax-registry.spectrocloud.com -``` - -Let's Encrypt limits the number of free certificates issued for each domain for a set time. -We recommend you mount a volume where the certificates are permanently stored. Use the -option `cachefile` to enable this behavior. - -You can specify custom certificates by providing the file path to the certificate files. - -
- -```yaml -http: - tls: - certificate: /path/to/x509/certificate/file - key: /pat/to/x509/key/file/which contains/private key/for x509 certificate above - clientcas: /path/to/file/with one or more/CA certificates encoded as PEM - minimumtls: minimum tls version to use -``` diff --git a/content/docs/09.5-security.md b/content/docs/09.5-security.md deleted file mode 100644 index dfef7e41ba..0000000000 --- a/content/docs/09.5-security.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: "Security" -metaTitle: "Spectro Cloud Security" -metaDescription: "Get an overview of Palette's security controls, security-aware culture, and where you can report any security issues." -icon: "lock" -hideToC: false -fullWidth: false ---- - -# Overview - -We view security as more than a feature. It is a fundamental aspect of our business and culture through transparency, continuous learning, and a security-first mindset. - -By instilling a sense of collective responsibility for security, everyone at Spectro Cloud contributes to our overall security posture. Our dedication to security helps protect your interests and enhances the quality and reliability of our software and services. - -
- -## Security-Aware Culture - -All Spectro Cloud employees receive mandatory security training. Our developers receive additional training focused on software security, as they are the first line of defense through secure coding practices, and they complete Open Worldwide Application Security Project (OWASP) Top 10 training to understand, identify and mitigate the most critical security risks and vulnerabilities that affect web applications. - -
- -## Product Security - -Palette uses a micro services-based architecture, and we take steps to ensure each service is secured. Product functionality is broken down logically into isolated services within containers. Containers are deployed in a Kubernetes cluster, called a management cluster, that Palette hosts and manages in SaaS mode or that users can host and manage in a self-hosted environment. Learn more by reviewing [Secure Product Architecture](/security/product-architecture). - -
- -## Compliance & Standards - -We believe adherence to industry standards and regulations is critical to maintaining the highest levels of security for our customers. We ensure our software complies with all relevant laws and regulations, and we continuously evaluate and update our compliance efforts to stay current with emerging regulations and requirements. To learn about our product certifications, check out the [Compliance](/compliance) reference. - -
- -## Transparency - -We list any Common Vulnerabilities and Exposure (CVE) issues that affect Palette or any part of its infrastructure in our [Security Bulletins](/security/security-bulletins) along with the fix applied and any workarounds. - -
- -## Report Security Issues - -Please contact our Security team at security@spectrocloud.com to report any security issues. - - -
- -# Resources - - -- [Core Principles](/security/core-principles) - - -- [Lifecycle](/security/lifecycle) - - -- [Secure Development](/security/lifecycle/secure-development) - - -- [Release Process](/security/lifecycle/release-process) - - -- [Secure Product Architecture](/security/product-architecture) - - -- [Platform Security](/security/product-architecture/platform-security) - - -- [Data Encryption](/security/product-architecture/data-encryption) - - -- [SaaS Operation](/security/product-architecture/saas-operation) - - -- [Self-Hosted Operation](/security/product-architecture/self-hosted-operation) - - -- [Tenant Cluster Security](/security/product-architecture/tenant-cluster) - - -- [Security Bulletins](/security/security-bulletins) - - -
- -
- -
- -
- diff --git a/content/docs/09.5-security/05-core-principles.md b/content/docs/09.5-security/05-core-principles.md deleted file mode 100644 index b73a47b941..0000000000 --- a/content/docs/09.5-security/05-core-principles.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: "Core Principles" -metaTitle: "Core Principles" -metaDescription: "Learn about Spectro Cloud security principles for Palette." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Security is about controlling who can interact with your information, what they can do with it, and when they can interact with it. - -We use the Confidentiality, Integrity, and Availability (CIA) Triad as the framework that guides the security and controls we provide in our products and services. This framework is often extended with Authentication, Authorization, and Auditing. Components of the CIA Triad are described below. -
- -- **Confidentiality**: Preserve authorized restrictions on information access and disclosure to protect personal privacy and proprietary data. This includes confirming identity and access rights to resources. - - -- **Integrity**: Protect data against unauthorized modification - either accidental or deliberate. - - -- **Availability**: Protect the services that provide data access. - - -- **Authorization**: Apply access rights through privileges or access levels to secure resources such as data, services, files, applications, and more. - - -- **Authentication**: Confirm the identity of the entity that wants access to a secure system. - - -- **Auditing**: Track implementation-level and domain-level events to ensure certain actions have been performed in the product. - - -# Core Principles - -Our security philosophy is grounded in the following core principles that we apply to our decision-making and product. - -
- -## Secure by Design - -Your data security is a core business requirement, not just a technical feature. We apply this principle during the design phase of our product feature development lifecycle to dramatically reduce the number of exploitable flaws and prevent them from being introduced in a release. - -
- -## Secure by Default - -We believe that security should be the default setting for all of our systems and software. Our products are secure to use out-of-the-box with little or no configuration needed and at no additional cost – such as audit logs and access control for sensitive information. Palette also supports Multi-Factor authentication (MFA) using external Identify Providers (IDP), such as Okta. - -
- -## Never Rely Just on Obscurity - -We believe that using security through obscurity by itself is the absence of a security strategy. While some organizations use this method as their main security method, it puts their network at risk if an attacker gains access to obscure resources. - -Determined attackers use various methods to discover the hidden details of a system, and discovery eventually happens - either accidentally or deliberately. We believe that while obscurity alone is not a robust security strategy, it can be layered with security policies and controls. This is the principle of Defense in Depth. - -
- -## Defense in Depth - -We believe security should be layered and redundant with multiple defenses in place to protect against different types of attack. The intent is to provide redundancy in the event a security control fails or a vulnerability is exploited. - -
- -## Least Privilege - -This principle encourages system designers and implementers to allow runtime code with only the permissions needed to complete the required tasks and no more. - -We use the principle of least privilege to ensure that all users have only the necessary access rights to fulfill their job roles. To ensure the security of our users and systems, we use mechanisms such as defined access rights, regular reviews, restricted privileges, and system monitoring. - -
- -## Secrets Handling - -We use the following methods for secrets handling, which contribute to a robust and resilient security infrastructure. - -
- -- Secure password manager. - - -- Dynamic secret retrieval, which automates the secret rotation process to reduce the risk of unauthorized access and limit sensitive data exposure. - - -- MFA and Single Sign-On (SSO). - - - -## Continuous Improvement - -We believe security is an ongoing process, and we are committed to constantly improving our security posture through regular assessment and testing. - -We review and audit our internal setup regularly to ensure our employees have access to the tools they need while maintaining strong security standards. - -
- -
- diff --git a/content/docs/09.5-security/08-lifecycle.md b/content/docs/09.5-security/08-lifecycle.md deleted file mode 100644 index ffcf358088..0000000000 --- a/content/docs/09.5-security/08-lifecycle.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "Lifecycle" -metaTitle: "Spectro Cloud Secure Development Lifecycle" -metaDescription: "Learn how Spectro Cloud applies security throughout its development lifecycle." -icon: "" -hideToC: false -fullWidth: false ---- - - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Our comprehensive approach to security is ingrained in each stage of our development lifecycle. From initial design and coding to testing and deployment, our processes are designed to identify, prevent, and mitigate security risks to ensure we deliver reliable and secure solutions. - -![Secure development flow from feature definition and design to development and release](/security_dev_lifecycle.png) - -Our Security team reviews early in the design process and identifies real and potential issues, which are logged in a ticketing system. Throughout development and before release we conduct more reviews and automated scans for common vulnerabilities. Scans go beyond our own code to include third-party libraries and container images. Should any vulnerabilities be found, we block the release and apply remediations. Our Security team must approve all our releases. - - - -# Resources - -- [Secure Development](/security/lifecycle/secure-development) - - -- [Release Process](/security/lifecycle/release-process) - - -
- - -
- - -
- - -
\ No newline at end of file diff --git a/content/docs/09.5-security/10-lifecycle/12-secure-development.md b/content/docs/09.5-security/10-lifecycle/12-secure-development.md deleted file mode 100644 index 0422987f2f..0000000000 --- a/content/docs/09.5-security/10-lifecycle/12-secure-development.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Secure Development" -metaTitle: "Secure Development" -metaDescription: "Learn about Spectro Cloud's secure product development." -icon: "" -hideToC: false -fullWidth: false ---- - - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Secure Development - -Our proactive *shift left* approach to making security an integral part of our development process ensures we detect and address vulnerabilities and design flaws early in the development cycle. By integrating security measures into the development process, we improve our software quality and reduce vulnerabilities while minimizing manual intervention and potential human error. - -We integrate security measures with our Continuous Integration/Continuous Delivery (CI/CD) pipeline. By regularly monitoring and improving our software to ensure product security, we are able to deliver high-quality solutions more quickly to you. - -We also employ a comprehensive suite of security scans that cover various aspects of our software: containers, container images, Kubernetes, source code, operating systems, and configurations. This extensive coverage enables us to identify and address a wide range of potential security issues before they can impact our end users. - -We connect the results of our security scans directly to our ticketing system. This seamless integration ensures we promptly address any identified vulnerabilities or issues and validate them during our release checklist activities. -In addition, we continually evaluate and adopt new security tools and practices to stay ahead of evolving threats. Our investment in security automation and tools demonstrates our commitment to safeguarding your data and maintaining the highest standards of software quality and security. - -
- -
- -
diff --git a/content/docs/09.5-security/10-lifecycle/15-release-process.md b/content/docs/09.5-security/10-lifecycle/15-release-process.md deleted file mode 100644 index f891376cf1..0000000000 --- a/content/docs/09.5-security/10-lifecycle/15-release-process.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "Release Process" -metaTitle: "Release Process" -metaDescription: "Learn about Spectro Cloud's release process for Palette." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Release Process - -We use semantic versioning for releases, where release versions follow the *Major.Minor.Patch* numbering pattern across all components, and we utilize and maintain *Integration*, *Stage*, and *Production* environments. - -
- -## Checklist - -Our release process includes a checklist of the features planned for release to ensure their completion or to ensure a completion plan is in place. - -When all pre-deployment checklist items are complete, stakeholders review the checklist to make an informed decision about the state of the release and do the following: - -
- -- Identify any steps that have not been completed. - - -- Request additional information. - - -- Follow up as needed. - - -## Signoff - -A new version deployment will not proceed until all stakeholders have signed off. - -
- -## Backup - -We back up the current release before starting a new one. Should a rollback be required and patching is not an option, a rollback request is submitted to the Spectro Cloud DevOps team. The DevOps team will restore from the backup and revert the production SaaS instance to the prior version. - -
- -
- -
- -
diff --git a/content/docs/09.5-security/10-product-architecture.md b/content/docs/09.5-security/10-product-architecture.md deleted file mode 100644 index 1849528462..0000000000 --- a/content/docs/09.5-security/10-product-architecture.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Secure Product Architecture" -metaTitle: "Secure Product Architecture" -metaDescription: "Learn about the integrity of Palette's secure architecture." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -In addition to the security principles we adhere to and our secure development lifecycle, we provide a cohesive security architecture for Palette. - -## Secure Product Architecture - -Palette uses a microservices-based architecture, and we take steps to ensure each service is secured. Product functionality is broken down logically into isolated services within containers. Containers are deployed in a Kubernetes cluster, called a management cluster, which Palette hosts and manages in Software as a Service (SaaS) mode or that users can host and manage in a self-hosted environment. - -Palette supports three architecture models: multi-tenant SaaS, dedicated SaaS, and self-hosted, which includes support for air-gapped environments. These flexible deployment models allow us to adapt to existing requirements in terms of separating responsibilities and network restrictions. - -
- -- **Multi-tenant SaaS**: The management plane is hosted in AWS across three regions that we manage: us-east-1, us-west-1, and us-west-2. Each customer occupies a tenant in our multi-tenant cloud environment. Our Operations team controls when to upgrade the management plane. - - -- **Dedicated SaaS**: The management plane is hosted in a cloud or region that you specify in our Spectro Cloud cloud account with a dedicated instance that we manage. In this scenario, you decide when to upgrade the management plane. - - -- **Self-hosted**: The management plane is hosted in your environment. It can be on-prem VMware vSphere, OpenStack, bare metal, in a public cloud that manages your compute instances, or a managed Kubernetes cluster such as Amazon Elastic Kubernetes Service (EKS), Azure Kubernetes Service (AKS), and Google Kubernetes Engine (GKE). - - -
- -![A diagram of Palette deployment models](/architecture_architecture-overview-deployment-models.png) - -
- - -Palette’s robust security measures safeguard your data and ensure the integrity of our services. We adhere to industry-leading standards and continuously refine our practices to provide the highest level of security. Palette infrastructure safeguards data in your Kubernetes production environment with its zero-trust architecture, granular Role-Based Access Control (RBAC), immutable Linux distributions ([Kairos](https://kairos.io/)), and hardened clusters and Kubernetes packs. - -Palette's security controls ensure data protection in SaaS operation at the management platform level and the [tenant](/glossary-all#tenant) cluster level. To learn more, refer to [SaaS Operation](/security/product-architecture/saas-operation). In self-hosted operation, you must ensure security controls in your environment. Find out more about self-hosted deployment in [Self-Hosted Operation](/security/product-architecture/self-hosted-operation). - - -## Multi-tenancy - -Palette is a multi-tenant SaaS platform in which every tenant represents a customer. We ensure tenant isolation through the following design principles and techniques: - -
- -- **Network isolation**: Tenant clusters are created in the tenant’s public cloud accounts or in private data centers. Customers cannot intercept network traffic in other tenant clusters. Access to tenant cluster APIs through the cluster’s kubeconfig file is restricted to the tenant. - - -- **Data isolation**: Palette applies a tenant filter to every operation to ensure users' access is restricted to their own tenant. - - -- **Tenant Data Encryption**: Tenant data is encrypted, and all message communication uses tenant-specific channels. - - -- **Audit Policies**: We record all actions taken on the platform and provide a comprehensive report for tracking purposes. - - -- **Noisy Neighbor Prevention**: In the SaaS deployment model, we use AWS Load Balancers and AWS CloudFront with a web application firewall (WAF) for all our public-facing services. These services benefit from the protections of AWS Shield Standard, which defends against the most common and frequently occurring network and transport layer Distributed Denial-of-Service (DDoS) attacks that target applications. This ensures that excessive calls from a tenant do not adversely affect other tenants' use of the platform. - - -## Palette Authentication & Authorization - -Palette fully supports RBAC and two authentication modes: - -
- -- *Local authentication* and *password policy*
- - With local authentication, a user email serves as the ID, and a password is compared with the one-way hash stored in the database to authenticate users to a tenant. The platform administrator can set password policy to control the requirements for password length, rule, and expiration. - - -- *Single Sign-On (SSO)* and *Multi-Factor Authentication (MFA)*
- - In these modes, the tenant is configured to have Security Assertion Markup Language (SAML) 2.0 Identify Provider (IDP) integrations. If the IDP requires MFA, you are redirected to the IDP’s authentication page. SSO can also automatically map a user to one or more user groups in the tenant. - - -## API Security - -Palette uses JSON Web Token (JWT)-based authentication and authorization for Representational State Transfer (REST) API access over HTTPS. - -The authentication token is valid for a limited time. If the token is about to expire, you can request a token refresh before making other API calls. - -Palette has a common API gateway validation service that ensures there are no incorrect parameter values or potential vulnerabilities, such as Structured Query Language (SQL) injection or cross-site scripting. - -You can use the gateway validation service log to trace APIs with a unique ID, Tenant UID, or Session UID. To avoid revealing unnecessary information, all UIDs are 48-bit random hex strings. - -Refer to the [API Authentication](https://docs.spectrocloud.com/api/v1/auth/) guide for details. diff --git a/content/docs/09.5-security/11-product-architecture/01-platform-security.md b/content/docs/09.5-security/11-product-architecture/01-platform-security.md deleted file mode 100644 index 7821587fdf..0000000000 --- a/content/docs/09.5-security/11-product-architecture/01-platform-security.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "Platform Security" -metaTitle: "Platform Security" -metaDescription: "Learn how Palette provides platform infrastructure security." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Based on the deployment model, Palette is composed of a multi-layer deployment stack: Cloud > VM > OS > Container Runtime > Kubernetes > Pods. - -To ensure maximum security, we follow defense-in-depth principles that prescribe securing each layer in a multi-layer deployment stack. - -For SaaS deployment models, cloud and Virtual Machine (VM) security are handled by SaaS platform operation security controls. For on-prem deployment models, the customer’s data center infrastructure team typically handles cloud and VM security. - -## Operating Systems - -The operating system that Palette uses for its Kubernetes management cluster is Ubuntu 20.04 LTS. We follow CIS standards to harden the operating system. - -These hardened images are used to launch control planes and worker nodes for the Kubernetes cluster hosting Palette. Additionally, all OS images are scanned for vulnerabilities prior to being published to a repository. - -## Container Security - -Spectro Cloud uses Containerd for container runtime. Containerd is an industry-standard container runtime that emphasizes simplicity, robustness, and portability in managing the complete container lifecycle. It runs as a demon on Ubuntu instances. - -Container images for various application services are built using distroless images, which have significantly fewer packages and improve security by reducing attack surface. - -All container images are scanned for vulnerabilities prior to being published to a repository or deployed to the SaaS platform. - -## Kubernetes Hardening - -We secure Palette's Kubernetes version based on Center for Internet Security (CIS) Kubernetes benchmarks. Several additional rules are also enforced on components such as the API Server, Controller Manager, Scheduler, and Kubelet. diff --git a/content/docs/09.5-security/11-product-architecture/02-data-encryption.md b/content/docs/09.5-security/11-product-architecture/02-data-encryption.md deleted file mode 100644 index fbe6cb4d6d..0000000000 --- a/content/docs/09.5-security/11-product-architecture/02-data-encryption.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: "Data Encryption" -metaTitle: "Data Encryption" -metaDescription: "Learn about Palette security controls for data and communications." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Palette has security controls for the operating system, containers, and Kubernetes. Data is protected with secure keys, encryption, and secure communication, standard authentication and authorization, and API security. Audit logs record actions taken on the platform. Review the [Audit Logs](/audit-logs) guide to learn how to access and use them. - - -
- -## Data At Rest Encryption - -Tenant data is encrypted using a 64-bit cryptographically secure tenant key. A unique tenant key is generated for each tenant. The tenant key is encrypted using the system root key and is stored in the database. The system root key is stored in the cluster’s etcd key-value store. All message communication uses tenant-specific channels. - -The following secure keys are unique and generated for each installation: - -
- -- **Root Key**: Encrypts the tenant-specific encryption key. - - -- **JSON Web Token (JWT) signature key**: Used to sign the JWT token. - - -- **Hash Salt**: Used to hash the user password and email ID. - - -- **Tenant key**: A 64-bit cryptographically secure tenant key encrypts tenant data stored in the management cluster, such as user account name, user email ID, and tenant cloud credentials. - - -In self-managed deployments, secure keys are generated during installation and stored as secrets in the management cluster’s etcd key-value store. - -
- -## Data In Transit Encryption - -Palette secures data in motion using an encrypted Transport Layer Security (TLS) communication channel for all internal and external interactions.

- -- **End User Communication**: Public certificates are created using a cert-manager for external API/UI communication. In self-hosted deployments, you can import an optional certificate and private key to match the Fully Qualified Domain Name (FQDN) management cluster. - - -- **Inter-Service Communication**: Services in the management cluster communicate over HTTPS with self-signed certificates and an Rivest–Shamir–Adleman (RSA) 2048-bit key. - - -- **Database Communication**: The database connection between Palette internal services that are active in the management cluster and MongoDB is protected by TLS with Authentication enabled. - - -- **Message Bus**: A Secure Network Address Translation (NATS) message bus is used for asynchronous communication between Palette management clusters and tenant clusters. NATS messages are exchanged using TLS protocol, and each tenant cluster uses dedicated credentials to connect to the message bus. Authentication and authorization policies are enforced in the NATS deployment to ensure message and data isolation across tenants. - diff --git a/content/docs/09.5-security/11-product-architecture/03-saas-operation.md b/content/docs/09.5-security/11-product-architecture/03-saas-operation.md deleted file mode 100644 index fc83c30d40..0000000000 --- a/content/docs/09.5-security/11-product-architecture/03-saas-operation.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "SaaS Operation" -metaTitle: "SaaS Operation" -metaDescription: "Learn about Palette security in a SaaS deployment." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Palette can be deployed as a multi-tenant SaaS system in which each tenant represents a customer. Palette SaaS infrastructure is hosted in the public cloud within a logically isolated virtual network that has a private and a public subnet. The [control plane and worker nodes](/security/product-architecture/saas-operation#controlplaneandworkernodes) for the Kubernetes cluster are launched in the private network. - -
- -## Cloud Infrastructure Security - -In public cloud environments such as AWS, Azure, and GCP, Palette interacts directly with a cloud provider’s API endpoint for access using cloud credentials specified in the tenant. The tenant clusters can be deployed in a virtual private cloud (VPC), as described in [Tenant Cluster Security](/security/product-architecture/tenant-cluster). - -This allows the SaaS controller to do the following: - -
- -- Dynamically query cloud resources. - - -- Act as an orchestrator to initiate SaaS controller requests for deployments. - -When a Palette SaaS deployment such as VMware or MAAS environments must connect on-prem to deploy target Kubernetes clusters, a Private Cloud Gateway (PCG) component is deployed in the self-hosted environment as a virtual appliance (OVA). The PCG is Palette's on-prem component to enable support for isolated, private cloud or data center environments. - -The PCG pairs automatically with a tenant based on a randomly generated pairing code similar to the Bluetooth pairing process and acts as a proxy between Palette SaaS and private cloud endpoints, such as vCenter. The PCG uses an outgoing internet connection to the SaaS platform using Static Network Address Translation (NATS) with Transport Layer Security (TLS). Refer to the [System Private Gateway](/clusters/data-center/maas/architecture#systemprivategateway) reference page to learn more. - -
- -## Control Plane and Worker Nodes - -Control plane nodes and worker nodes in the Kubernetes cluster hosting Palette SaaS are launched in private subnets. All ports on the nodes are protected from external access. - -In self-hosted Palette installations, customers manage their own SSH public keys unless an agreement is in place for Spectro Cloud to maintain their environment. - -
- -# Resources - -[Tenant Cluster Security](/security/product-architecture/tenant-cluster) - -
- -
- -
- -
- diff --git a/content/docs/09.5-security/11-product-architecture/04-self-hosted-operation.md b/content/docs/09.5-security/11-product-architecture/04-self-hosted-operation.md deleted file mode 100644 index 42e4cae62f..0000000000 --- a/content/docs/09.5-security/11-product-architecture/04-self-hosted-operation.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "Self-Hosted Operation" -metaTitle: "Self-Hosted Operation" -metaDescription: "Learn about Palette security in a self-Hosted deployment." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Self-Hosted Operation - -In self-hosted operation, where Palette is typically deployed on-prem behind a firewall, you must ensure your environment has security controls. Palette automatically generates security keys at installation and stores them in the management cluster. You can import an optional certificate and private key to match the Fully Qualified Domain Name (FQDN) management cluster. Palette supports enabling disk encryption policies for management cluster virtual machines (VMs) if required. For information about deploying Palette in a self-hosted environment, review the [Self-Hosted Installation](/enterprise-version) guide. - -In self-hosted deployments, the Open Virtualization Appliance (OVA) can operate in stand-alone mode for quick Proof of Concept (POC) or in enterprise mode, which launches a three-node High Availability (HA) cluster as the Palette management cluster. The management cluster provides a browser-based web interface that allows you to set up a tenant and provision and manage tenant clusters. You can also deploy Palette to a Kubernetes cluster by using the Palette Helm Chart. To learn more, review the [Install Using Helm Chart](/enterprise-version/deploying-palette-with-helm) guide. - - -The following points apply to self-hosted deployments: - -
- -- In deployments that require a proxy internet connection, both the Private Cloud Gateway (PCG) component and the management agent support SOCKS5 or HTTPS proxy. - - -- You manage your own SSH public keys unless an agreement is in place for Spectro Cloud to maintain your environment. - - -- Self-hosted Palette does not connect to Palette SaaS or send telemetry or customer data back to the Palette SaaS platform. - - - -
- -
- diff --git a/content/docs/09.5-security/11-product-architecture/05-tenant-cluster.md b/content/docs/09.5-security/11-product-architecture/05-tenant-cluster.md deleted file mode 100644 index 12efeea862..0000000000 --- a/content/docs/09.5-security/11-product-architecture/05-tenant-cluster.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: "Tenant Cluster Security" -metaTitle: "Tenant Cluster Security" -metaDescription: "Lorem ipsum dolor" -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Tenant clusters are Kubernetes clusters that Palette deploys for customers. Tenant clusters can be launched in the customer's choice of public or private cloud or bare metal environment. Palette offers complete flexibility and control in designing these tenant clusters through a construct called [Cluster Profiles](/cluster-profiles). - -[Cluster profiles](/cluster-profiles) are cluster construction templates. Palette deploys a Kubernetes cluster based on what the profile specifies. - -A cluster profile consists of core layers that consist of an Operating System (OS), a Kubernetes distribution, networking, and storage, and any add-on layers such as monitoring, logging, and more. Palette offers several out-of-the-box choices for each profile layer in the form of packs and the flexibility for you to bring your own pack for use in Palette cluster profiles. - -Palette's flexibility and extensibility make the security of tenant clusters a shared responsibility, as listed in the table. - -|Layer |Out of the box Pack | Custom Pack| -|:---------------|:---------|:--------------| -|Operating System |Spectro Cloud Responsibility|Customer Responsibility| -|Kubernetes|Spectro Cloud Responsibility|Customer Responsibility| -|Storage|Spectro Cloud Responsibility|Customer Responsibility| -|Networking|Spectro Cloud Responsibility|Customer Responsibility| -|Add-Ons|Spectro Cloud & Customer Responsibility|Customer Responsibility| - -We ensure our out-of-the-box core layer packs are secure. You ensure security for custom packs and add-on packs you bring to Palette. Palette provides defaults for its out-of-the-box add-on layers based on third-party best practices. You have the flexibility to tune the configuration to fit your needs, making security a shared responsibility. - -
- -## Cloud Infrastructure Security - -In a public cloud, Kubernetes nodes in tenant clusters are deployed within a logically isolated virtual network that has private and public subnets. The control plane and worker nodes for the Kubernetes cluster are launched in a private network. All ports on the nodes are protected from external access. - -Each tenant cluster has a management agent that is deployed as a pod. This agent has an outbound internet connection to Palette using static Network Address Translation (NAT) with Transport Layer Security (TLS) protocol v1.2 or higher and a hardened cipher suite. The agent periodically reports health, heartbeat, and statistics and connects to Palette's public repository over HTTPS for any out-of-the-box integration packs. - -In a self-hosted environment, where Palette is typically deployed on-prem behind a firewall, you must ensure security controls in your environment. Palette automatically generates security keys at installation and stores them in the management cluster. You can import an optional certificate and private key to match the management cluster Fully Qualified Domain Name (FQDN). Palette supports enabling disk encryption policies for management cluster Virtual Machines (VMs) if required. - -
- -## Hardened Operating System - -Palette provides Ubuntu or CentOS images for supported cloud environments. Images that are hardened according to Center for Internet Security (CIS) Benchmarks are used to launch control planes and worker nodes for the Kubernetes cluster. - -Palette's OS hardening utility performs the following tasks: - -
- -- Applies the latest available security updates. - - -- Hardens SSH server parameters, network parameters (sysctl), and system files by ensuring proper file permissions are set. - - -- Removes legacy services and Graphical User Interface (GUI) packages. - -Palette allows you to set up OS patching policies. You can patch the base OS when you deploy the cluster. Refer to [OS Patching](/clusters/cluster-management/os-patching) to learn more. - -
- -## Hardened Containers - -Container images for various application services are built using distroless images, which have significantly fewer packages and improve security by reducing attack surface. - -All container images are scanned for vulnerability using Palo Alto Networks Prisma Cloud (Twistlock) Defender before being published to a repository or deployed to the SaaS platform. - -
- -## Hardened Kubernetes - -Spectro Cloud has a fully automated Kubernetes verification system that adds the newest patch version of Kubernetes to its public repository. All Kubernetes packs are hardened according to CIS Benchmarks. - -We assess major Kubernetes versions based on the extent of changes. - -Kubernetes run-time security support is achieved through a variety of add-on packages, such as Sysdig Falco and Twistlock. - -You can set a schedule to start Kubernetes conformance and compliance tests using kube-bench, kube-hunter, and Sonobuoy. These tests ensure tenant clusters are secure, compliant, and up to date. - -
- -## Kubernetes Authentication & Authorization - -Kubernetes cluster authentication can be optionally integrated with Kubelogin with OpenID Connect (OIDC)-based authentication/authorization against an external Identify Provider (IDP). This enables group membership-based access control on different namespaces within the tenant Kubernetes cluster. Our Terraform provider also supports automatically setting role binding on namespaces by user or group. - -
- -## Compliance & Security Scans - -You can initiate multiple scans on tenant clusters. These scans ensure clusters adhere to specific compliance and security standards. The scans also perform penetration tests to detect potential vulnerabilities. - -Palette supports four types of scans: compliance, security, conformance, and Software Bill of Materials (SBOM). Each scan generates reports with scan-specific details. You can initiate multiple scans of each type over time. In addition, Palette keeps a history of previous scans for comparison purposes. \ No newline at end of file diff --git a/content/docs/09.5-security/20-security-bulletins.md b/content/docs/09.5-security/20-security-bulletins.md deleted file mode 100644 index ea438015e9..0000000000 --- a/content/docs/09.5-security/20-security-bulletins.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "Security Bulletins" -metaTitle: "Security Bulletins" -metaDescription: "Palette Security bulletins for Common Vulnerabilities and Exposures (CVEs)." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -The following are security advisories for Palette and other Spectro Cloud-related resources. - -Our security advisories follow the [CVSS standards](https://www.first.org/cvss/v3.1/specification-document#Qualitative-Severity-Rating-Scale). - -| Rating | CVSS Score | -|----------|------------| -| None | 0.0 | -| Low | 0.1- 3.9 | -| Medium | 4.0 - 6.9 | -| High | 7.0 - 8.9 | -| Critical | 9.0 - 10.0 | - - -You can review Common Vulnerabilities and Exposures (CVE) for Palette in [CVE Reports](/security/security-bulletins/cve-reports). An index of all Palette-related CVEs is availaable in the [CVE Index](/security/security-bulletins/index). - - -# Resources - - -- [CVE Reports](/security/security-bulletins/cve-reports) - - -- [CVE Index](/security/security-bulletins/index) - -
\ No newline at end of file diff --git a/content/docs/09.5-security/20-security-bulletins/10-cve-reports.md b/content/docs/09.5-security/20-security-bulletins/10-cve-reports.md deleted file mode 100644 index 62c27193aa..0000000000 --- a/content/docs/09.5-security/20-security-bulletins/10-cve-reports.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: "CVE Reports" -metaTitle: "CVE Reports" -metaDescription: "Security bulletins for Common Vulnerabilities and Exposures (CVEs) related to Palette" -icon: "" -hideToC: false -fullWidth: false ---- - -# Security Bulletins - - - - - - - -## March 20, 2023 - CVE-2023-22809 Sudo Vulnerability in Palette - 7.8 CVSS - -A security vulnerability in `sudo -e` option (aka *sudoedit*) allows a malicious user with sudoedit privileges to edit arbitrary files. The Palette container `palette-controller-manager:mold-manager` incorporates a sudo version affected by sudoers policy bypass in sudo when using sudoedit. - -All versions of Palette before v2.6.70 are affected. - -
- -#### Impact - -A local user with permission to edit files can use this flaw to change a file not permitted by the security policy, resulting in privilege escalation. - -
- -#### Resolution - -* For Palette SaaS, this has been addressed and requires no user action. -* For ​​Palette self-hosted deployments, please upgrade to newer versions greater than or equal to v2.6.70 to address the reported vulnerability. - -
- -#### Workarounds - -None. - -
- -#### References - -* [CVE-2023-22809](https://nvd.nist.gov/vuln/detail/cve-2023-22809) - - -## August 4, 2022 - CVE-2022-1292 c_rehash script vulnerability in vSphere CSI pack - 9.8 CVSS - -On May 3 2022, OpenSSL published a security advisory disclosing a command injection vulnerability in the `c_rehash` script included with the OpenSSL library. Some operating systems automatically execute this script as a part of normal operations, which could allow an attacker to execute arbitrary commands with elevated privileges. - -Palette is not directly affected by this vulnerability. However, if your cluster profile is using the vSphere CSI pack, version v2.3 or below, it contains a vulnerable version of the `c_rehash` script. - - -
- -#### Impact - -The `c_rehash` script does not sanitize shell metacharacters properly to prevent command injection. This script is distributed by some operating systems, and by extension, in container images, in a manner where it is automatically executed. On such operating systems, an attacker could execute arbitrary commands with the privileges of the script. - -
- -#### Resolution - -This vulnerability has been addressed in the vSphere CSI pack greater than or equal to version v2.6. - -
- -#### Workarounds - -Update cluster profiles using the vSphere CSI pack to version v2.6 or greater. Apply the updated cluster profile changes to all clusters consuming the cluster profile. - -
- -#### References - -- [CVE-2022-1292](https://nvd.nist.gov/vuln/detail/CVE-2022-1292) - - -
-
\ No newline at end of file diff --git a/content/docs/09.5-security/20-security-bulletins/20-index.md b/content/docs/09.5-security/20-security-bulletins/20-index.md deleted file mode 100644 index 0fb2f011b9..0000000000 --- a/content/docs/09.5-security/20-security-bulletins/20-index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "CVE Index" -metaTitle: "CVE Index" -metaDescription: "Security bulletins for Common Vulnerabilities and Exposures (CVEs) related to Palette" -icon: "" -hideToC: false -fullWidth: false ---- -# Overview - -The following is an index of all Palette-related CVEs and their disclosure year. Click on a CVE report to learn more. - -# 2023 - -- [March 20, 2023 - CVE-2023-22809 Sudo Vulnerability in Palette - 7.8 CVSS](/security/security-bulletins/cve-reports#march20,2023-cve-2023-22809sudovulnerabilityinpalette-7.8cvss) - - -# 2022 - -- [August 4, 2022 - CVE-2022-1292 c_rehash script vulnerability in vSphere CSI pack - 9.8 CVSS](/security/security-bulletins/cve-reports#august4,2022-cve-2022-1292c_rehashscriptvulnerabilityinvspherecsipack-9.8cvss) - - -
\ No newline at end of file diff --git a/content/docs/10-audit-logs.md b/content/docs/10-audit-logs.md deleted file mode 100644 index 87f9517b2d..0000000000 --- a/content/docs/10-audit-logs.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Audit Logs" -metaTitle: "Spectro Cloud user audit logs" -metaDescription: "Spectro Cloud logs for every event occurring under a user for every Kubernetes cluster" -icon: "admin" -hideToC: false -fullWidth: false ---- - -import WarningBox from 'shared/components/WarningBox'; - -# About Audit Logs - -The Spectro Cloud management platform application captures audit logs to track the user interaction with the application resources along with the timeline. For certain resources, the system-level modifications are also captured in the audit logs. - -The audit log contains information about the resource and the user who performed the action. The user or the system action on the resource is classified as *Create*, *Update*, and *Delete*. Every resource is categorized as a type that helps the user to scope down the audit logs. - -Audit logs are retained for the last one year. - -# Accessing Audit Logs - -Audits can be accessed for the tenant scope and the project scope. The tenant scope audits show all the activity logs across all projects and tenant actions. The project scope audits show the activity logs for the specific project. - -* The tenant scope audit logs can be accessed in the Spectro Cloud console under the **Admin > Audit Logs**. The user should have the *Tenant Admin* role or at least the `audit.get` and `audit.list` permissions at the tenant scope to access the audit logs. -* The project scope audit logs can be accessed under the **Project** *selection* > **Audit Logs**. The user should have at least the *Project Viewer* role with `audit.get` and `audit.list` permissions for the selected project to access the audit logs. -* Tenant admins (or users with appropriate permissions) can download the audit logs as a *.csv file. - -# Filtering Audit Logs - -The audit logs can be filtered based on user and resource attributes. The following attributes can be used to filter the audit logs: - -* Type - The action type on the resource. -* Resource Type - The resource type. (The resources are grouped based on the type). -* Start Date and End Date - Period range for the audit logs. - -# Adding Update Note - -For certain resources like the Cluster Profile, users can associate a custom update note in addition to the generic audit event log. On a successful save of the Cluster Profile, the user will be prompted to provide an update note about the changes made on the profile. This message will be shown when the user selects an audit log from the list. - -# Pushing the Audit Log to the AWS Cloud Trail - -Spectro Cloud users can now push the compliance, management, operational, and risk audit logs to the AWS CloudTrail. This enables continuous monitoring, security analysis, resource tracking, and troubleshooting of the workload cluster using the event history. - - -An AWS account with cloud trail created is the prerequisite. - -The permissions listed need to be enabled for CloudWatch. - - -## Permission List - -Ensure that the IAM user or the ROOT user role created should have the following IAM policy included for Amazon CloudWatch: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "logs:DescribeLogGroups", - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:PutLogEvents", - "logs:DeleteLogStream", - "logs:DescribeLogStreams" - ], - "Resource": [ - ";" - ] - } - ] -} -``` -## Instructions to Push Cluster Audit Logs to AWS Trails - -* Go to Admin Settings and select Audit Trails. -* Select the wizard ‘Add new Audit Trail’ and fill in the following details: - - * Audit Name: Custom name to identify the logs - * Type: Choice of monitoring service (currently set to AWS Cloud Watch) - * Group: The log group name obtained from cloud watch logs of AWS cloud trail creation - * Region: The region of the AWS account - * Method of verification: - * Credentials: -Use the AWS Access Key and Secret Access Key to validate the AWS account for pushing the Audit log trails from Spectro Cloud console. - * STS: -Use Amazon’s unique resource identifier- ARN, to validate the AWS account for pushing the Audit log trails from Spectro Cloud console. - -* Stream Optional. -* Confirm the information to complete the audit trail creation wizard. -* The audit trail could be edited and deleted using the kebab menu. - - diff --git a/content/docs/10-audit-logs/05-kube-api-audit-logging.md b/content/docs/10-audit-logs/05-kube-api-audit-logging.md deleted file mode 100644 index f417f880d6..0000000000 --- a/content/docs/10-audit-logs/05-kube-api-audit-logging.md +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: "Enable Audit Logging" -metaTitle: "Enable API Audit Logging" -metaDescription: "Learn how to configure the kube-apiserver audit logging feature for Palette." -icon: "" -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Enable Kubernetes API Audit Logging - -Kubernetes auditing is a feature of the Kubernetes cluster management system that allows administrators to track and log events within the cluster. Administrators can review actions taken by users and applications and changes to the cluster's configuration or state. By enabling auditing, organizations and system administrators can better understand their users' actions and behaviors. The audit log answers common questions about what, where, when, and who. - -You can also meet internal security control requirements by enabling audit logging. Many security controls require the following capabilities. - -
- -- ensuring administrators can trace the actions of individual users back to a specific person. - - -- to debug an issue where an unknown application is modifying resources - -The guidance on this page is based on the upstream Kubernetes documentation and `kube-apiserver` source code. Follow the steps below to enable audit logging for the Kubernetes API server. - -
- - - -Enabling audit logging causes the API server to consume more memory, as it needs to store additional context for each request to facilitate auditing. Memory consumption depends on the audit logging configuration. - - - -# Prerequisites - -- Access to a Kubernetes cluster node. - - -- Write access to the file system. - - -- Remote access to the Kubernetes cluster master nodes. - - - -# Enable Auditing - -The Kubernetes API Audit policies define the rules for capturing events and specifying the level of detail to include. -The audit policy you create will capture all requests at the *metadata* level. To learn more about the various audit levels, visit the Kubernetes API [Audit Policy](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/#audit-policy) documentation. - -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Identify one of your cluster contro-plane nodes. You can find a cluster node by navigating to the left **Main Menu** and selecting **Clusters**. Click on your cluster to access the details pages and click on the **Nodes** tab. The tab contains information about each pool, select a node from the **Master Pool** to view its IP address. - - -3. SSH into one of your control-plane nodes using its IP address and the SSH key you specified during the cluster creation process. - - - -4. From a control-plane node in the target cluster, issue the following command to create your audit policy file. - -
- - ```bash - cat << EOF > /etc/kubernetes/audit-policy.yaml - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - - level: Metadata - EOF - ``` - The audit log output will be written to a file located at **/var/log/kubernetes/audit/audit.log**. In production environments, you should ensure this log file is ingested by a logging and monitoring application. - - The **/var/log/kubernetes/audit/** directory should be backed by persistent storage to ensure that any unshipped audit logs will not be lost during an unexpected outage of the node. - -
- -5. Next, you will update the Kubernetes API server manifest file. The manifest file is located in the **/etc/kubernetes/manifests** folder. -Before you modify the manifest file, create a backup copy. - -
- - ```shell - cp /etc/kubernetes/manifests/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.backup - ``` - -6. Now that you have a backup copy of the manifest file go ahead and open up the file **/etc/kubernetes/manifests/kube-apiserver.yaml** in a text editor such as Vi or Nano. - -
- - ```shell - vi /etc/kubernetes/manifests/kube-apiserver.yaml - ``` - - Append the following YAML configuration to your kube-apiserver manifest. - -
- - ```yaml - volumeMounts: - - mountPath: /etc/kubernetes/audit-policy.yaml - name: audit - readOnly: true - - mountPath: /var/log/kubernetes/audit/ - name: audit-log - readOnly: false - volumes: - - name: audit - hostPath: - path: /etc/kubernetes/audit-policy.yaml - type: File - - name: audit-log - hostPath: - path: /var/log/kubernetes/audit/ - type: DirectoryOrCreate - ``` - -7. The next step is to update the Kubernetes API parameters with audit settings. -The top of the file contains the Kubernetes API parameters. Refer to the code snippet below to determine where to place these parameters. - -
- - ```yaml - spec: - containers: - - command: - - kube-apiserver - - --advertise-address=172.18.0.2 - - --allow-privileged=true - - --authorization-mode=Node,RBAC - ``` - -8. Go ahead and add the following audit parameters under the `- kube-apiserver` line. - -
- - ```shell - - --audit-policy-file=/etc/kubernetes/audit-policy.yaml - - --audit-log-path=/var/log/kubernetes/audit/audit.log - - --audit-log-batch-max-size=5 - - --audit-log-compress - - --audit-log-format=json - - --audit-log-maxage=30 - - --audit-log-maxbackup=100 - - --audit-log-maxsize=50 - - --audit-log-mode=batch - - --audit-log-truncate-enabled - - --audit-log-truncate-max-batch-size=10485760 - - --audit-log-truncate-max-event-size=102400 - - --audit-log-version=audit.k8s.io/v1 - ``` - -9. Save your changes and exit the file. When you exit the file, the changes will automatically get picked up by the Kubelet process and applied. - -You can also add the following Kubernetes API parameters to fine-tune the audit logging. - -| Parameter | Type | Description | -|-----------------------------------|----------|-------------------------------------------------------------------------------------------------------------------------------| -| `--audit-log-batch-max-wait` | duration | The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. Ex: `"5s"` | -| `--audit-log-batch-throttle-enable` | boolean | Whether batching throttling is enabled. Only used in batch mode. | -| `--audit-log-batch-throttle-qps` | float | The maximum average number of batches per second. Only used in batch mode | - - -To learn more about each of the Kubernetes API server flags, visit the Kubernetes API parameter [documentation page](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/). - -# Validate - -You can validate that audit logs are captured by navigating to the specified audit folder in the `--audit-log-path` parameter. - -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Identify one of your cluster contro-plane nodes. You find a cluster node by navigating to the left **Main Menu** and selecting **Clusters**. Click on your cluster to access the details pages and click on the **Nodes** tab. The tab contains information about each pool, select a node from the **Master Pool** to view its IP address. - - -3. SSH into one of your control-plane nodes using its IP address and the SSH key you specified during the cluster creation process. - - - -4. From a control-plane node in the target cluster, you can validate that audit logs are captured by reviewing the audit log file in the specified audit folder you specified in the `--audit-log-path` parameter. - - -5. Display the audit file content by using the following command. Replace the file path with the audit folder you specified in the `--audit-log-path` parameter. - -
- - ```shell - cat /var/log/kubernetes/audit/audit.log - ``` - - Example Output. - ```shell - {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"3cb20ec3-e944-4059-873c-078342b38fec","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/cluster-63a1ee9100663777ef2f75c8/leases/kubeadm-bootstrap-manager-leader-election-capi","verb":"update","user":{"username":"system:serviceaccount:cluster-63a1ee9100663777ef2f75c8:palette-manager","uid":"e728f219-d5e8-4a44-92c4-5ddcf22ce476","groups":["system:serviceaccounts","system:serviceaccounts:cluster-63a1ee9100663777ef2f75c8","system:authenticated"],"extra":{"authentication.kubernetes.io/pod-name":["capi-kubeadm-bootstrap-controller-manager-688596bc4b-pxmmh"],"authentication.kubernetes.io/pod-uid":["a0e9a0fd-0812-434e-a1a4-b8af9bb98a87"]}},"sourceIPs":["192.168.161.18"],"userAgent":"manager/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"cluster-63a1ee9100663777ef2f75c8","name":"kubeadm-bootstrap-manager-leader-election-capi","uid":"8e70db1f-a26c-4af5-a558-78e860ae9903","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"13660827"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2023-01-18T20:35:29.755649Z","stageTimestamp":"2023-01-18T20:35:29.760586Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding "palette-manager-admin-rolebinding" of ClusterRole "cluster-admin" to ServiceAccount "palette-manager/cluster-63a1ee9100663777ef2f75c8""}} - ``` - -# Resources - -- [Kubernetes API parameters](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) - - -- [Kubernetes Auditing Documentation](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/) - -
diff --git a/content/docs/12-enterprise-version.md b/content/docs/12-enterprise-version.md deleted file mode 100644 index 8fb39318e8..0000000000 --- a/content/docs/12-enterprise-version.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: "Self-Hosted Installation" -metaTitle: "Self-Hosted Installation" -metaDescription: "Understanding, installing and operating Spectro Cloud's Enterprise Self-Hosted variant." -icon: "warehouse" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; - -# Self-Hosted Installation - -Palette is available as a self-hosted platform offering. You can install the self-hosted version of Palette in your data centers or public cloud providers to manage Kubernetes clusters. You can install Palette by using the following four methods: - -
- - - - -Starting with Palette 4.0.0, the Palette CLI, and the Helm Chart, are the only supported methods for installing Palette. The Palette OVA installation method is only available for versions 3.4 and earlier. Refer to the [Install Enterprise Cluster](/enterprise-version/deploying-an-enterprise-cluster), or the [Kubernetes Install Helm Chart](/enterprise-version#kubernetesinstallhelmchart) guides for additional guidance on how to install Palette. - - - -
- -- [VMware Quick Start](/enterprise-version#vmwarequickstart) - - -- [VMware Enterprise](/enterprise-version#vmwareenterprise) - - -- [Kubernetes Install Helm Chart](/enterprise-version#kubernetesinstallhelmchart) - - -- [AirGap Install](/enterprise-version#airgapinstall) - -## VMware Quick Start - -A single-node Palette installation that is ideal for Proof of Concept (PoC) environments. Refer to the [Quick Start Installation](/enterprise-version/deploying-the-platform-installer) guide for more details. - -## VMware Enterprise - -A highly available multi-node Palette installation that is typically used for production purposes. Check out the [Enterprise Mode](/enterprise-version/deploying-an-enterprise-cluster) guide to get started. - -## Kubernetes Install Helm Chart - -Install Palette onto a Kubernetes cluster using a Helm Chart. Review the [Helm Chart Mode](/enterprise-version/deploying-palette-with-helm) guide to learn more. - - -## Airgap Install - -Palette can be installed in a VMware environment without internet access, known as an air gap installation, requiring pre-download of platform manifests, required platform packages, container images for core components, third-party dependencies, and Palette Packs, all sourced from a private rather than the default public Palette repository. - -# Download Palette Installer - -To request the Palette Self-hosted installer image, please contact our support team by sending an email to support@spectrocloud.com. Kindly provide the following information in your email: - -- Your full name -- Organization name (if applicable) -- Email address -- Phone number (optional) -- A brief description of your intended use for the Palette Self-host installer image. - -Our dedicated support team will promptly get in touch with you to provide the necessary assistance and share the installer image. - -If you have any questions or concerns, please feel free to contact support@spectrocloud.com. - - -# Upgrade Notes - -Review the [Upgrade Notes](/enterprise-version/upgrade) before attempting to upgrade Palette. - - -
- - ---- - -# Resources - - -* [System Requirements](/enterprise-version/on-prem-system-requirements) - - -* [Quick Start Mode](/enterprise-version/deploying-the-platform-installer) - - -* [Enterprise Mode](/enterprise-version/deploying-an-enterprise-cluster) - - -* [Helm Chart Mode](/enterprise-version/deploying-palette-with-helm) - - -* [System Console Dashboard](/enterprise-version/system-console-dashboard) - - -* [Creating a VMware Cloud Gateway](/clusters/data-center/vmware#creatingavmwarecloudgateway) - - -* [Create VMware Cloud Account](/clusters/data-center/vmware#creatingavmwarecloudaccount) - - -* [Deploy a VMware Cluster](/clusters/data-center/vmware#deployingavmwarecluster) - - -* [Troubleshooting](/clusters/data-center/vmware#troubleshooting) - - -* [Upgrade Notes](/enterprise-version/upgrade) - - -
- -
- diff --git a/content/docs/12-enterprise-version/00-on-prem-system-requirements.md b/content/docs/12-enterprise-version/00-on-prem-system-requirements.md deleted file mode 100644 index 5162bf61b8..0000000000 --- a/content/docs/12-enterprise-version/00-on-prem-system-requirements.md +++ /dev/null @@ -1,847 +0,0 @@ ---- -title: "System Requirements" -metaTitle: "System Requirements" -metaDescription: "An overview of the self-hosted Palette system requirements." -icon: "" -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# System Requirements - -Palette is available as a self-hosted application that you install in your environment. The self-hosted version is a dedicated Palette environment hosted on VMware instances or in an existing Kubernetes cluster. Self-hosted Palette is available in the following three modes: - -| **Self-Hosted Modes** | **Description** | -| --------------------- | --------------------------------------------------------------------------------- | -| **VMWare Enterprise Mode** | A multi-node, highly available version for production purposes. | -| **VMWare Quick Start Mode** | A single VM deployment of the platform that is ideal for use in Proofs of Concept (PoCs). | -| **Helm Chart Mode** | Install Palette in an existing Kubernetes cluster using a Helm Chart. | - -The next sections describe specific requirements for all modes. - -
- -## Prerequisites - -The following are prerequisites for deploying a Kubernetes cluster in VMware: -* vSphere version 7.0 or above. vSphere 6.7 is supported but not recommended as it reached end of general support in 2022. - - -* Configuration Requirements - A Resource Pool needs to be configured across the hosts, onto which the workload clusters will be provisioned. Every host in the Resource Pool will need access to shared storage, such as vSAN, to use high-availability control planes. Network Time Protocol (NTP) must be configured on each ESXi host. - - -* You need an active vCenter account with all the permissions listed below in the VMware Cloud Account Permissions section. - - -* Install a Private Cloud Gateway for VMware as described in the Creating a VMware Cloud Gateway section. Installing the Private Cloud Gateway automatically registers a cloud account for VMware in Palette. You can register additional VMware cloud accounts in Palette as described in the Creating a VMware Cloud account section. - -* Kubernetes version 1.19 minimum when installing Palette in a cluster using a Helm Chart. We recommend using managed Kubernetes, such as Amazon EKS and Azure EKS. - -* Subnet with egress access to the internet (direct or via proxy): - * For proxy: HTTP_PROXY, HTTPS_PROXY (both are required). - * Outgoing internet connection on port 443 to api.spectrocloud.com. - - -* The Private cloud gateway IP requirements are: - * One (1) node - one (1) IP or three (3) nodes - three (3) IPs. - * One (1) Kubernetes control-plane VIP. - * One (1) Kubernetes control-plane extra. - - -* Assign IPs for application workload services (e.g., Load Balancer services). - - -* A DNS to resolve public internet names (e.g., api.spectrocloud.com). - - -* Shared Storage between vSphere hosts. - - -* A cluster profile created in Palette for VMware. - - -* Zone Tagging: A dynamic storage allocation for persistent storage. - - -### Zone Tagging - - Zone tagging is required for dynamic storage allocation, across fault domains, when provisioning workloads that require persistent storage. This is required for the installation of the Palette platform itself and is also useful for Workloads deployed in the Tenant Clusters, if they have persistent storage needs. Use vSphere tags on data centers (kubernetes-region) and compute clusters (kubernetes-zone) to create distinct zones in your environment. - - As an example, assume your vCenter environment includes three compute clusters: *cluster-1*, *cluster-2*, and *cluster-3* as part of data center dc-1. You can tag them as follows: - -| **vSphere Object** | **Tag Category** | **Tag Value** | -| ------------------ | ---------------- | ------------- | -| dc-1 | k8s-region | region1 | -| cluster-1 | k8s-zone | az1 | -| cluster-2 | k8s-zone | az2 | -| cluster-3 | k8s-zone | az3 | - - -**Note**: The exact values for the kubernetes-region and kubernetes-zone tags can be different from the ones described in the example above, as long as these are unique. -
- -### Tag Requirements -The following points needs to be taken care while creating the Tags: -* A valid tag must consist of alphanumeric characters -* The tag must start and end with an alphanumeric characters -* The regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?') - -**Example Tags:** -* MyValue -* my_value -* 12345 - - - - - -## VMware Privileges - -The vSphere user account that is deploying Palette must have the following minimum vSphere privileges. The **Administrator** role provides super-user access to all vSphere objects. For users without the **Administrator** role, one or more custom roles can be created based on the tasks being performed by the user. -Permissions and privilieges vary depending on the vSphere version you are using. - -Select the tab that corresponds with your vSphere versions. - -
- - - - - - -## Root-Level Role Privileges - -
- -The root-level role privileges are applied to root object and Datacenter objects only. - -|**vSphere Object**|**Privileges**| -|---------------|----------| -|**Cns**|Searchable| -|**Datastore**|Browse datastore -|**Host**|Configuration -|| Storage partition configuration -|**vSphere** **Tagging**|Create vSphere Tag| -||Edit vSphere Tag| -|**Network**|Assign network| -|**Sessions**|Validate session| -|**VM Storage Policies**|View VM storage policies| -|**Storage views**|View| - -
- -## Spectro Role Privileges - - - - - - -#### Cns Privileges - - Searchable - - - - - -#### Datastore Privileges - - Allocate Space - - Browse Datastore - - Low level file operations - - Remove file - - Update virtual machine files - - Update virtual machine metadata - - - - - - - #### Folder Privileges - - Create folder - - Delete folder - - Move folder - - Rename folder - - - - - - #### Host Privileges - - Local Operations - * Reconfigure virtual machine - - - - - -
- - - -If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” needs to be provided. - - - -
- - #### Network Privileges - - - Assign Network - -
- - - - #### Resource Privileges - - - Apply recommendation - - Assign virtual machine to resource pool - - Migrate powered off virtual machine - - Migrate powered on virtual machine - - Query vMotion - - - - - - #### Sessions Privileges - - Validate session - - - - - - #### VM Storage Policies Privileges - - - View access for VM storage policies is required. Ensure the privilege `StorageProfile.View` is available. Refer to the [VM Storage Policies Privileges](https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-security/GUID-DECEAE60-58CB-4B30-8874-FA273573E6B5.html) resource to learn more. - - - - - - #### Storage Views Privileges - - View - - - - - - - #### Task Privileges - - - Create task - - Update task - - - - - - #### vApp Privileges - - - Import - - View OVF environment - - vApp application configuration - - vApp instance configuration - - - - - - #### vSphere Tagging - - - Create vSphere Tag - - Edit vSphere Tag - - - - - - - #### Virtual Machines Privileges - - -
- -| | | | -| ------------------------- | ------------------------------------------- | ------------------------------------- | -| **Change Configuration** | | | -| | Change Settings | Extend virtual disk | -| | Change Swapfile Placement | Modify device settings | -| | Configure host USB device | Query Fault Tolerance compatibility | -| | Configure raw device | Query unowned files | -| | Add existing disk | Reload from path | -| | Add new disk | Remove disk | -| | Add or remove device | Rename | -| | Change resource | Reset guest information | -| | Configure managedBy | Set annotation | -| | Display connection settings | Toggle fork parent | -| | Advanced configuration | Upgrade virtual machine compatibility | -| | Change CPU count | | -| **Guest operations** | | | -| | Guest operation alias modification | Guest operation alias query | -| | Guest operation modifications | Guest operation queries | -| | Guest operation program execution | | -| **Interaction** | | | -| | Power off | Power on | -| **Inventory** | | | -| | Create from existing | Move | -| | Create new | Remove | -| **Provisioning** | | | -| | Allow disk access | Customize guest | -| | Allow file access | Deploy template | -| | Allow read-only disk access | Mark as template | -| | Allow virtual machine download | Mark as virtual machine | -| | Allow virtual machine files upload | Modify customization specification | -| | Clone template | Promote disks | -| | Clone virtual machine | Read customization specifications | -| | Create template from virtual machine | | -| **Service Configuration** | | | -| | Allow notifications | Modify service configuration | -| | Allow polling of global event notifications | Query service configurations | -| | Manage service configurations | Read service configuration | -| **Snapshot Management** | | | -| | Create snapshot | Remove snapshot | -| | Rename snapshot | Revert to snapshot | -| **vSphere Replication** | | | -| | Configure replication | Monitor replication | -| | Monitor replication | | - - -
- - - - #### vSAN - - - Cluster - * ShallowRekey - - - -
- -
- - - - -## Root-Level Role Privileges - -
- -The root-level role privileges are applied to root object and Datacenter objects only. - -|**vSphere Object**|**Privileges**| -|---------------|----------| -|**Cns**|Searchable| -|**Datastore**|Browse datastore -|**Host**|Configuration -|| Storage partition configuration -|**vSphere** **Tagging**|Create vSphere Tag| -||Edit vSphere Tag| -|**Network**|Assign network| -|**Sessions**|Validate session| -|**Profile-driven storage**|Profile-driven storage view| -|**Storage views**|View| - -
- -## Spectro Role Privileges - - - - - - -#### Cns Privileges - - Searchable - - - - - -#### Datastore Privileges - - Allocate Space - - Browse Datastore - - Low level file operations - - Remove file - - Update virtual machine files - - Update virtual machine metadata - - - - - - - #### Folder Privileges - - Create folder - - Delete folder - - Move folder - - Rename folder - - - - - - #### Host Privileges - - Local Operations - * Reconfigure virtual machine - - - - - -
- - - - -If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” needs to be provided. - - - - #### Network Privileges - - - Assign Network - -
- - - - #### Resource Privileges - - - Apply recommendation - - Assign virtual machine to resource pool - - Migrate powered off virtual machine - - Migrate powered on virtual machine - - Query vMotion - - - - - - #### Sessions Privileges - - Validate session - - - - - - #### Profile Driven Storage - - Profile-driven storage view - - - - - - #### Storage Views Privileges - - View - - - - - - - #### Task Privileges - - - Create task - - Update task - - - - - - #### vApp Privileges - - - Import - - View OVF environment - - vApp application configuration - - vApp instance configuration - - - - - - #### vSphere Tagging - - - Create vSphere Tag - - Edit vSphere Tag - - - - - - - #### Virtual Machines Privileges - - -
- -| | | | -| ------------------------- | ------------------------------------------- | ------------------------------------- | -| **Change Configuration** | | | -| | Change Settings | Extend virtual disk | -| | Change Swapfile Placement | Modify device settings | -| | Configure host USB device | Query Fault Tolerance compatibility | -| | Configure raw device | Query unowned files | -| | Add existing disk | Reload from path | -| | Add new disk | Remove disk | -| | Add or remove device | Rename | -| | Change resource | Reset guest information | -| | Configure managedBy | Set annotation | -| | Display connection settings | Toggle fork parent | -| | Advanced configuration | Upgrade virtual machine compatibility | -| | Change CPU count | | -| **Guest operations** | | | -| | Guest operation alias modification | Guest operation alias query | -| | Guest operation modifications | Guest operation queries | -| | Guest operation program execution | | -| **Interaction** | | | -| | Power off | Power on | -| **Inventory** | | | -| | Create from existing | Move | -| | Create new | Remove | -| **Provisioning** | | | -| | Allow disk access | Customize guest | -| | Allow file access | Deploy template | -| | Allow read-only disk access | Mark as template | -| | Allow virtual machine download | Mark as virtual machine | -| | Allow virtual machine files upload | Modify customization specification | -| | Clone template | Promote disks | -| | Clone virtual machine | Read customization specifications | -| | Create template from virtual machine | | -| **Service Configuration** | | | -| | Allow notifications | Modify service configuration | -| | Allow polling of global event notifications | Query service configurations | -| | Manage service configurations | Read service configuration | -| **Snapshot Management** | | | -| | Create snapshot | Remove snapshot | -| | Rename snapshot | Revert to snapshot | -| **vSphere Replication** | | | -| | Configure replication | Monitor replication | -| | Monitor replication | | - - -
- - - - #### vSAN - - - Cluster - * ShallowRekey - - - -
- - - -
- - - - -## Root-Level Role Privileges - -
- -The root-level role privileges are applied to root object and Datacenter objects only. - -|**vSphere Object**|**Privileges**| -|---------------|----------| -|**Cns**|Searchable| -|**Datastore**|Browse datastore -|**Host**|Configuration -|| Storage partition configuration -|**vSphere** **Tagging**|Create vSphere Tag| -||Edit vSphere Tag| -|**Network**|Assign network| -|**Sessions**|Validate session| -|**Profile-driven storage**|Profile-driven storage view| -|**Storage views**|View| - -
- -## Spectro Role Privileges - - - - - - -#### Cns Privileges - - Searchable - - - - - -#### Datastore Privileges - - Allocate Space - - Browse Datastore - - Low level file operations - - Remove file - - Update virtual machine files - - Update virtual machine metadata - - - - - - - #### Folder Privileges - - Create folder - - Delete folder - - Move folder - - Rename folder - - - - - - #### Host Privileges - - Local Operations - * Reconfigure virtual machine - - - - - -
- - - -If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” needs to be provided. - - - - #### Network Privileges - - - Assign Network - -
- - - - #### Resource Privileges - - - Apply recommendation - - Assign virtual machine to resource pool - - Migrate powered off virtual machine - - Migrate powered on virtual machine - - Query vMotion - - - - - - #### Sessions Privileges - - Validate session - - - - - - #### Profile Driven Storage - - Profile-driven storage view - - - - - - #### Storage Views Privileges - - View - - - - - - - #### Task Privileges - - - Create task - - Update task - - - - - - #### vApp Privileges - - - Import - - View OVF environment - - vApp application configuration - - vApp instance configuration - - - - - - #### vSphere Tagging - - - Create vSphere Tag - - Edit vSphere Tag - - - - - - - #### Virtual Machines Privileges - - -
- -| | | | -| ------------------------- | ------------------------------------------- | ------------------------------------- | -| **Change Configuration** | | | -| | Change Settings | Extend virtual disk | -| | Change Swapfile Placement | Modify device settings | -| | Configure host USB device | Query Fault Tolerance compatibility | -| | Configure raw device | Query unowned files | -| | Add existing disk | Reload from path | -| | Add new disk | Remove disk | -| | Add or remove device | Rename | -| | Change resource | Reset guest information | -| | Configure managedBy | Set annotation | -| | Display connection settings | Toggle fork parent | -| | Advanced configuration | Upgrade virtual machine compatibility | -| | Change CPU count | | -| **Guest operations** | | | -| | Guest operation alias modification | Guest operation alias query | -| | Guest operation modifications | Guest operation queries | -| | Guest operation program execution | | -| **Interaction** | | | -| | Power off | Power on | -| **Inventory** | | | -| | Create from existing | Move | -| | Create new | Remove | -| **Provisioning** | | | -| | Allow disk access | Customize guest | -| | Allow file access | Deploy template | -| | Allow read-only disk access | Mark as template | -| | Allow virtual machine download | Mark as virtual machine | -| | Allow virtual machine files upload | Modify customization specification | -| | Clone template | Promote disks | -| | Clone virtual machine | Read customization specifications | -| | Create template from virtual machine | | -| **Service Configuration** | | | -| | Allow notifications | Modify service configuration | -| | Allow polling of global event notifications | Query service configurations | -| | Manage service configurations | Read service configuration | -| **Snapshot Management** | | | -| | Create snapshot | Remove snapshot | -| | Rename snapshot | Revert to snapshot | -| **vSphere Replication** | | | -| | Configure replication | Monitor replication | -| | Monitor replication | | - - -
- - - - #### vSAN - - - Cluster - * ShallowRekey - - - -
- - - - -
-
- - -
- - ---- - -## Network Requirements - -* Outgoing access from the platform VMs to the internet either directly or via a proxy. - - -* An IP Address (static or DHCP) for the quick start virtual machine (also used as an installer for enterprise version). - - -* A block of five (5) IP addresses reserved for an enterprise cluster: One IP address for each of the three enterprise cluster VMs, an IP to be used as a VIP, and an additional IP reserved for rolling upgrades. - - -* Interconnectivity across all the three (3) VMs on all ports. - - -* Connectivity from the Virtual Machines to the vCenter. - - - -Ensure your data center CIDR IP address does not overlap with the Kubernetes PodCIDR range. During installation, you can change the Kubernetes PodCIDR range settings. - - - -## Proxy Requirements -* If a proxy is used for outgoing connections, it must support both HTTPS and HTTP traffic. All Palette components communicate over HTTPS by default. An HTTP proxy can be used when HTTP is the only supported protocol, such as connecting to a private image registry that only supports HTTP. - -* Connectivity to all [Proxy Whitelist](/clusters#proxywhitelist) domains must be allowed - - -## Self-Hosted Configuration - -This section lists resource requirements for Palette VerteX for various capacity levels. In Palette VerteX, the terms *small*, *medium*, and *large* are used to describe the instance size of worker pools that Palette VerteX is installed on. The following table lists the resource requirements for each size. - - -
- - - -The recommended maximum number of deployed nodes and clusters in the environment should not be exceeded. We have tested the performance of Palette VerteX with the recommended maximum number of deployed nodes and clusters. Exceeding these limits can negatively impact performance and result in instability. The active workload limit refers to the maximum number of active nodes and pods at any given time. - - - -
- - - -| **Size** | **Nodes**| **CPU**| **Memory**| **Storage**| **MongoDB Storage Limit**| **MongoDB Memory Limit**| **MongoDB CPU Limit** |**Total Deployed Nodes**| **Deployed Clusters with 10 Nodes**| -|----------|----------|--------|-----------|------------|--------------------|-------------------|------------------|----------------------------|----------------------| -| Small | 3 | 8 | 16 GB | 60 GB | 20 GB | 4 GB | 2 | 1000 | 100 | -| Medium (Recommended) | 3 | 16 | 32 GB | 100 GB | 60 GB | 8 GB | 4 | 3000 | 300 | -| Large | 3 | 32 | 64 GB | 120 GB | 80 GB | 12 GB | 6 | 5000 | 500 | - - -#### Instance Sizing - -| **Configuration** | **Active Workload Limit** | -|---------------------|---------------------------------------------------| -| Small | Up to 1000 Nodes each with 30 Pods (30,000 Pods) | -| Medium (Recommended) | Up to 3000 Nodes each with 30 Pods (90,000 Pods)| -| Large | Up to 5000 Nodes each with 30 Pods (150,000 Pods) | - -
- - -## Best Practices - -The following steps are optional but recommended for production environments. - - -| | | -| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| **DNS Mapping** | A DNS is used to access the Palette Management Console. While the Virtual IP Address (VIP), configured on the platform can be used
to access the platform, it is recommended that you reserve a DNS for this purpose and map it to the VIP after installation. | -| **SMTP Setting**s | Configure the SMTP settings to enable the Palette platform to send out email notifications. Email notifications are sent out to new
users, when they are initially onboarded onto the platform, so they can activate their accounts and reset their password at a later time. | -| **Trusted Certificate** | Configure your platform with a trusted CA certificates. | -| **FTP Location for backups** | Configure an FTP location for platform backups and schedule daily backups. | diff --git a/content/docs/12-enterprise-version/01-deploying-the-platform-installer.md b/content/docs/12-enterprise-version/01-deploying-the-platform-installer.md deleted file mode 100644 index d0595f6b93..0000000000 --- a/content/docs/12-enterprise-version/01-deploying-the-platform-installer.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: "Install Using Quick-Start Mode" -metaTitle: "Install Using Quick-Start Mode" -metaDescription: "Learn how to install self-hosted Palette by deploying a single node instance." -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# VMware Quick Start Installation - -The Palette On-Prem Quick Start Mode is a single node installation of the Palette platform, used for PoC environments to quickly understand the capabilities of the Palette platform. We do not recommended for Production deployments as it does not provide high availability or scalability. - -
- - - - -Starting with Palette 4.0.0, the Palette CLI, and the Helm Chart, are the only supported methods for installing Palette. The Palette OVA installation method is only available for versions 3.4 and earlier. Refer to the [Install Enterprise Cluster](/enterprise-version/deploying-an-enterprise-cluster), or the [Kubernetes Install Helm Chart](/enterprise-version#kubernetesinstallhelmchart) guides for additional guidance on how to install Palette. - - - -# Deploy Platform Installer - -1. Log in to the vSphere console and navigate to VMs and Templates. -2. Navigate to the Datacenter and folder you would like to use for the installation. -3. Right-click on the folder and invoke the VM creation wizard by selecting the option to Deploy OVF Template. -4. Complete all the steps of the OVF deployment wizard. Provide values for various fields as follows: - * URL: <Location of the platform installer> - * Virtual Machine Name: <vm name> - * Folder: <Select desired folder> - * Select the desired Datacenter, Storage, and Network for the platform installer VM as you proceed through the next steps. The Platform installer VM requires an outgoing internet connection. Select a network that provides this access directly, or via a proxy. - * Customize the template as follows: - * Name: <The name to identify the platform installer> - * SSH Public Keys: Create a new SSH key pair (or pick an existing one). Enter the public key in this field. The public key will be installed in the installer VM to provide SSH access, as the user `ubuntu`. This is useful for troubleshooting purposes. - * Monitoring Console Password: A monitoring console is deployed in the platform installer VM to provide detailed information about the installation progress as well as to provide access to various logs. This console can be accessed after the VM is powered on at https://<VM IP Address>:5080. The default monitoring console credentials are: - - * User Name: admin - * Password: admin - - Provide a different password for the monitoring console if desired. Leave the field blank to accept the default password. - * Pod CIDR: Optional - provide an IP range exclusive to pods. This range should be different to prevent an overlap with your network CIDR. (e.g: 192.168.0.0/16) - * Service cluster IP range: Optional - assign an IP range in the CIDR format exclusive to the service clusters. This range also must not overlap with either the pod CIDR range or your network CIDR. (e.g: 10.96.0.0/12) - * Static IP Address: <VM IP Address> Optional IP address (e.g: 192.168.10.15) to be specified only if static IP allocation is desired. DHCP is used by default. - * Static IP subnet prefix: <Network Prefix> Static IP subnet prefix (e.g: 18), required only for static IP allocation. - * Static IP gateway: <Gateway IP Address> (e.g: 192.168.0.1) required only for static IP allocation. - * Static IP DNS: <Name servers> Comma separated DNS addresses (e.g: 8.8.8.8, 192.168.0.8), required only for static IP allocation. - * HTTP Proxy: <endpoint for the http proxy server>, e.g: _http://USERNAME:PASSWORD@PROXYIP:PROXYPORT_. An optional setting, required only if a proxy is used for outbound connections. - * HTTPS Proxy: <endpoint for the https proxy server>, e.g: _http://USERNAME:PASSWORD@PROXYIP:PROXYPORT_. An optional setting, required only if a proxy is used for outbound connections. - * NO Proxy: <comma-separated list of vCenter server, local network CIDR, hostnames, domain names that should be excluded from proxying>, e.g: _vcenter.company.com_,10.10.0.0/16. - * Spectro Cloud Repository settings: The platform installer downloads various platform artifacts from a repository. Currently, this repository is hosted by Palette and the installer VM needs to have an outgoing internet connection to the repository. Upcoming releases will enable the option to privately host a dedicated repository to avoid having to connect outside. This option is currently unavailable. Leave all the fields under Palette Repository settings blank - * Finish the OVF deployment wizard and wait for the template to be created. This may take a few minutes as the template is initially downloaded. -5. Power on the VM. - -# Monitor Installation - -The platform installer contains a web application called the Supervisor, to provide detailed progress of the installation. After the VM is powered on, perform the following steps to ensure installation is completed successfully. - -1. Open the Supervisor application in a browser window by navigating to https://<VM IP Address>:5080. -2. Observe the installation status in the Status tab. The page auto-refreshes to provide updated installation progress. -3. Once the final installation step is complete, you will see URLs to navigate to the On-Prem System Console as well as the Management Console. - * On-Prem System Console: Initial login:admin/admin - * Management Console: Tenant credentials to be created and used [Configure System for First Time](./#configuresystemforfirsttime). -4. Navigate to the On-Prem System Console to perform the initial configuration. Additional administration tasks like SMTP setup, certificate management, etc. can also be performed from the On-Prem System Console. - - -Typically, the installation takes around 10 mins after powering on the virtual machine. If the installation fails or takes an unusually long time, please look for failure messages in the install status page, or access system logs from the "Logs" tab to get detailed information about the failure. - - -# Configure System for First Time - -The On-Prem System Console provides options for performing various administrative setup tasks. Most of these are optional and can be performed at any later time. To quickly start using the platform's functionality, all that is needed is to create the first tenant and activate it. - -1. Open the On-Prem System Console application in a browser window by navigating to https://<VM IP Address>/system. -2. Log in using username: 'admin' and password: 'admin'. -3. Reset the default password. -4. Choose "Quick Start" when prompted for a choice for the startup mode. -5. Navigate to the Tenant Management section and create your first tenant. -6. Copy the tenant activation link and invoke it in a browser window to activate the newly created tenant. -7. Enter the desired password and proceed and login as a tenant into the Management Console. - -Next, continue to perform various tasks as desired from the management console like creating gateways, cloud accounts, cluster profiles, and launching of clusters. diff --git a/content/docs/12-enterprise-version/02-deploying-an-enterprise-cluster.md b/content/docs/12-enterprise-version/02-deploying-an-enterprise-cluster.md deleted file mode 100644 index 5a841b9749..0000000000 --- a/content/docs/12-enterprise-version/02-deploying-an-enterprise-cluster.md +++ /dev/null @@ -1,398 +0,0 @@ ---- -title: "Install Enterprise Cluster" -metaTitle: "Install Enterprise Cluster" -metaDescription: "Learn how to install self-hosted Palette or convert a self-hosted single node cluster to a highly available three node cluster." -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; -import Tabs from 'shared/components/ui/Tabs'; - -# Install Enterprise Cluster - -You have two options for installing Palette. You can use the Palette CLI to install a new self-hosted Palette instance or convert an existing single-node cluster (Quick-Start Mode) to a highly available three-node cluster. Select the tab below that corresponds to your installation type. - - -
- - - - -Starting with Palette 4.0.0, the Palette CLI, and the Helm Chart, are the only supported methods for installing Palette. The Palette OVA installation method is only available for versions 3.4 and earlier. Refer to the CLI tab below, or the [Kubernetes Install Helm Chart](/enterprise-version#kubernetesinstallhelmchart) guide for additional guidance on how to install Palette. - - - - -
- - - - - -You install Palette using the Palette Command Line Interface (CLI) that guides you for details to create a configuration file and a three-node enterprise cluster for high availability (HA). You can invoke the Palette CLI on any Linux x86-64 system with the Docker daemon installed and connectivity to VMware vSphere where Palette will be deployed. - - -## Prerequisites - - -- An AMD64 Linux environment with connectivity to the VMware vSphere environment. - - - -- [Docker](https://docs.docker.com/engine/install/) or equivalent container runtime installed and available on the Linux host. - - - -- Palette CLI installed and available. Refer to the Palette CLI [Install](/palette-cli/install-palette-cli#downloadandsetup) page for guidance. - - - -- Review required VMware vSphere [permissions](/enterprise-version/on-prem-system-requirements#vmwareprivileges). - - - -- We recommended the following resources for Palette VerteX. Refer to the [Palette VerteX size guidelines](/enterprise-version/on-prem-system-requirements#self-hostedconfiguration) for additional sizing information. - - - 8 CPUs per VM. - - - 16 GB Memory per VM. - - - 100 GB Disk Space per VM. - - -- The following network ports must be accessible for Palette to operate successfully. - - - TCP/443: Inbound to and outbound from the Palette management cluster. - - - TCP/6443: Outbound traffic from the Palette management cluster to the deployed cluster's Kubernetes API server. - - -- Ensure you have an SSL certificate that matches the domain name you will assign to Palette. You will need this to enable HTTPS encryption for Palette. Reach out to your network administrator or security team to obtain the SSL certificate. You need the following files: - - - x509 SSL certificate file in base64 format. - - - x509 SSL certificate key file in base64 format. - - - x509 SSL certificate authority file in base64 format. This file is optional. - - -- Zone tagging is required for dynamic storage allocation across fault domains when provisioning workloads that require persistent storage. Refer to [Zone Tagging](/enterprise-version/on-prem-system-requirements#zonetagging) for information. - - -- Assigned IP addresses for application workload services, such as Load Balancer services. - - -- Shared Storage between VMware vSphere hosts. - -
- - - -Self-hosted Palette installations provide a system Private Cloud Gateway (PCG) out-of-the-box and typically do not require a separate, user-installed PCG. However, you can create additional PCGs as needed to support provisioning into remote data centers that do not have a direct incoming connection from the Palette console. To learn how to install a PCG on VMware, check out the [VMware](/clusters/data-center/vmware) guide. - - - -
- -## Deployment - - -The video below provides a demonstration of the installation wizard and the prompts you will encounter. Take a moment to watch the video before you begin the installation process. Make sure to use values that are appropriate for your environment. Use the **three-dots Menu** in the lower right corner of the video to expand the video to full screen and to change the playback speed. - -
- - `video: title: "palette-cli-install": /./palette-install.mp4` - -Use the following steps to install Palette. - - -
- -1. Open a terminal window and invoke the Palette CLI by using the `ec` command to install the enterprise cluster. The interactive CLI prompts you for configuration details and then initiates the installation. For more information about the `ec` subcommand, refer to [Palette Commands](/palette-cli/commands#ec). - -
- - ```bash - palette ec install - ``` - -2. At the **Enterprise Cluster Type** prompt, choose **Palette**. - - -3. Type `y` if you want to use Ubuntu Pro. Otherwise, type `n`. If you choose to use Ubuntu Pro, you will be prompted to enter your Ubuntu Pro token. - - -4. Provide the repository URL you received from our support team. - - -5. Enter the repository credentials. - - -6. Choose `VMware vSphere` as the cloud type. This is the default. - - -7. Type an enterprise cluster name. - - -8. When prompted, enter the information listed in each of the following tables. - -
- - #### Environment Configuration - - |**Parameter**| **Description**| - |:-------------|----------------| - |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all EC nodes and all of its target cluster nodes. Example: `https://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| - |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all EC nodes and all of its target cluster nodes. Example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| - |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: `maas.company.com,10.10.0.0/16`.| - |**Proxy CA Certificate Filepath**|The default is blank. You can provide the filepath of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| - |**Pod CIDR**|Enter the CIDR pool IP that will be used to assign IP addresses to pods in the EC cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| - |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the EC cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| - -
- - -9. Select the OCI registry type and provide the configuration values. Review the following table for more information. - -
- - #### Pack & Image Registry Configuration - - | **Parameter** | **Description** | - |---------------------------|-----------------------------------------| - | **Registry Type** | Specify the type of registry. Allowed values are `OCI` or `OCI ECR`. | - | **Registry Name** | Enter the name of the registry. | - | **Registry Endpoint** | Enter the registry endpoint. | - | **Registry Base Path** | Enter the registry base path. | - |**Allow Insecure Connection** | Bypasses x509 verification. Type `Y` if using a vSphere instance with self-signed Transport Layer Security (TLS) certificates. Otherwise, type `n`.| - | **Registry Username** or **Registry Access Key** | Enter the registry username or the access key if using `OCI ECR`. | - | **Registry Password** or **Registry Secret Key** | Enter the registry password or the secret key if using `OCI ECR`. | - | **Registry Region** | Enter the registry region. This option is only available if you are using `OCI ECR`. | - | **ECR Registry Private** | Type `y` if the registry is private. Otherwise, type `n`. | - | **Use Public Registry for Images** | Type `y` to use a public registry for images. Type `n` to a different registry for images. If you are using another registry for images, you will be prompted to enter the registry URL, base path, username, and password. | - -
- -10. Next, specify the database storage size to allocate for Palette. The default is 20 GB. Refer to the [size guidelines](/vertex/install-palette-vertex#sizeguidelines) for additional information. - - - -11. The next set of prompts is for the VMware vSphere account information. Enter the information listed in the following table. - -
- - #### VMware vSphere Account Information - - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - |**vSphere Endpoint** | VMware vSphere endpoint. Must be a fully qualified domain name (FQDN) or IP address without a scheme - that is, without an IP protocol, such as `https://`. Example: `vcenter.mycompany.com`.| - |**vSphere Username** | VMware vSphere account username.| - |**vSphere Password**| VMware vSphere account password.| - |**Allow Insecure Connection** | Bypasses x509 verification. Type `Y` if using a VMware vSphere instance with self-signed Transport Layer Security (TLS) certificates. Otherwise, type `n`.| - -
- - #### VMware vSphere Cluster Configuration - - This information determines where Palette will be deployed in your VMware vSphere environment. The Palette CLI will use the provided VMware credentials to retrieve information from your VMware vSphere environment and present options for you to select from. - -
- - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - |**Datacenter**| The installer retrieves the Datacenter automatically. | - |**Folder** | Select the folder that contains the VM instance. | - | **Cluster** | Select the cluster where you want to deploy Palette. | - | **Network** | Select the network where you want to deploy Palette. | - | **Resource Pool** | Select the resource pool where you want to deploy Palette. | - | **Datastore** | Select the datastore where you want to deploy Palette. | - |**Fault Domains** | Configure one or more fault domains by selecting values for these properties: Cluster, Network (with network connectivity), Resource Pool, and Storage Type (Datastore or VM Storage Policy). Note that when configuring the Network, if you are using a distributed switch, choose the network that contains the switch. | - |**NTP Servers** | You can provide a list of Network Time Protocol (NTP) servers. | - |**SSH Public Keys** | Provide any public SSH keys to access your Palette VMs. This option opens up your system's default text editor. Vi is the default text editor for most Linux distributions. To review basic vi commands, check out the [vi Commands](https://www.cs.colostate.edu/helpdocs/vi.html) reference. | - - -12. Specify the IP pool configuration. The placement type can be Static or Dynamic Domain Name Server (DDNS). Choosing static placement creates an IP pool from which VMs are assigned IP addresses. Choosing DDNS assigns IP addresses using DNS. - -
- - #### Static Placement Configuration - | **Parameter** | **Description** | - |---------------------------|-----------------------------------------| - | **IP Start range** | Enter the first address in the EC IP pool range. | - | **IP End range** | Enter the last address in the EC IP pool range. | - | **Network Prefix** | Enter the network prefix for the IP pool range. Valid values are in [0, 32]. Example: `18`. | - | **Gateway IP Address** | Enter the IP address of the static IP gateway. | - | **Name servers** | Comma-separated list of DNS name server IP addresses. | - | **Name server search suffixes** | An optional comma-separated list of DNS search domains. | - - -
- - -13. The last set of prompts is for the vSphere machine configuration. Enter the information listed in the following table. - -
- - #### vSphere Machine Configuration - - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **Number of CPUs** | The number of CPUs allocated to each VM node instance.| - | **Memory** | The amount of memory allocated to each VM node instance.| - | **Disk Size** | The size of the disk allocated to each VM node instance.| - - -
- - - The installation process stands up a [kind](https://kind.sigs.k8s.io/) cluster locally that will orchestrate the remainder of the installation. The installation takes some time. - -
- - Upon completion, the enterprise cluster configuration file named `ec.yaml` contains the information you provided, and its location is displayed in the terminal. Credentials and tokens are encrypted in the YAML file. - -
- - ```bash hideClipboard - ==== Enterprise Cluster config saved ==== - Location: :/home/spectro/.palette/ec/ec-20230706150945/ec.yaml - ``` - -
- - When the installation is complete, Enterprise Cluster Details that include a URL and default credentials are displayed in the terminal. You will use these to access the Palette system console. - -
- - ```bash hideClipboard - ==================================== - ==== Enterprise Cluster Details ==== - ==================================== - Console URL: https://10.10.189.100/system - Username: ********** - Password: ********** - ``` - - -14. Copy the URL to the browser to access the system console. You will be prompted to reset the password. - -
- - - - The first time you visit the Palette system console, a warning message about an untrusted SSL certificate may appear. This is expected, as you have not yet uploaded your SSL certificate to Palette. You can ignore this warning message and proceed. - - - -
- - ![Screenshot of the Palette system console showing Username and Password fields.](/palette_installation_install-on-vmware_palette-system-console.png) - -
- - -15. Log in to the system console using the credentials provided in the Enterprise Cluster Details output. After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette system console. - - -16. After login, a Summary page is displayed. Palette is installed with a self-signed SSL certificate. To assign a different SSL certificate you must upload the SSL certificate, SSL certificate key, and SSL certificate authority files to Palette. You can upload the files using the Palette system console. Refer to the [Configure HTTPS Encryption](/vertex/system-management/ssl-certificate-management) page for instructions on how to upload the SSL certificate files to Palette. - - -17. The last step is to start setting up a tenant. To learn how to create a tenant, check out the [Tenant Management](/vertex/system-management/tenant-management) guide. - -
- - ![Screenshot of the Summary page showing where to click Go to Tenant Management button.](/palette_installation_install-on-vmware_goto-tenant-management.png) - - -# Validate - -You can verify the installation is successful if you can access the system console using the IP address provided in Enterprise Cluster Details and if the Summary page displays the **Go to Tenant Management** button. - -You can also validate that a three-node Kubernetes cluster is launched and Palette is deployed on it. - -
- -1. Log in to the vCenter Server by using vSphere Client. - - -2. Navigate to the Datacenter and locate your VM instance. - - -3. Select the VM to access its details page, and verify three nodes are listed. - - -4. Open a web browser session, and use the IP address provided in Enterprise Cluster Details at the completion of the installation to connect to the Palette system console. Copy the IP address to the address bar and append `/system`. - - -5. Log in using your credentials. - - -6. A **Summary** page will be displayed that contains a tile with a **Go to Tenant Management** button. After initial installation, the **Summary** page shows there are zero tenants. - - - -
- - - - -## Enterprise Mode - -The Palette Enterprise Mode is a multi-node, highly-available installation of the Palette platform suitable for production purposes. Installation involves instantiating the on-prem platform installer VM and invoking the "Enterprise Cluster Migration" wizard. Please follow [these](/enterprise-version/deploying-the-platform-installer/) steps to deploy the installer VM and observe the [monitoring console](/enterprise-version/deploying-the-platform-installer/#monitorinstallation) to ensure installation is successful. After a successful installation of the platform installer, proceed to enterprise cluster migration. - -
- - - -Deployment of an enterprise cluster is a migration process from the quick start mode. You may choose to deploy the enterprise cluster on day 1 right after instantiating the platform installer VM, or use the system in the quick start mode initially and at a later point invoke the enterprise cluster migration wizard to deploy the enterprise cluster. All the data from the quick start mode is migrated to the enterprise cluster as part of this migration process. - - - - - -1. Open the On-Prem system console from a browser window by navigating to https://<VM IP Address>/system and log in. - - -2. Navigate to the Enterprise Cluster Migration wizard from the menu on the left-hand side. - - -3. Enter the vCenter credentials to be used to launch the enterprise cluster. Provide the vCenter server, username, and password. Check the `Use self-signed certificates` if applicable. Validate your credentials and click on `Next` button to proceed to IP Pool Configuration. - - -4. Enter the IPs to be used for Enterprise Cluster VMs as a `Range` or a `Subnet`. At least 5 IP addresses should be required in the range for the installation and the ongoing management. Provide the details of the `Gateway` and the `Nameserver addresses`. Any search suffixes being used can be entered in the `Nameserver search suffix` box. Click on `Next` to proceed to Cloud Settings. - - -5. Select the datacenter and the folder to be used for the enterprise cluster VMs. Select the desired compute cluster, resource pools, datastore, and network. For high availability purposes, you may choose to distribute the three VMs across multiple compute clusters. If this is desired, invoke the "Add Domain" option to enter multiple sets of properties. - - -6. Add SSH Public key and optionally NTP servers and click "Confirm". - - -7. The Enterprise cluster deployment will proceed through the following three steps: - * Deployment - A 3 node Kubernetes cluster is launched and Palette Platform is deployed on it. This typically takes 10 mins. - * Data Migration - Data from the installer VM is migrated to the newly created enterprise cluster. - * Tenant Migration - If any tenants were created prior to the enterprise cluster migration, which would typically be the case if the system was used in the quick start mode initially, all those tenants, as well as the management of any such tenant clusters previously deployed, will be migrated to the enterprise cluster. - - -8. Once Enterprise Cluster is fully deployed, the On-Prem System and Management Console should be accessed on this new cluster. The platform installer VM can be safely powered off at this point. - -
- -
- - -
- -# Resources - -- [Palette CLI](/palette-cli/install-palette-cli#downloadandsetup) - - -- [Airgap Install Instructions](/enterprise-version/air-gap-repo) - -
\ No newline at end of file diff --git a/content/docs/12-enterprise-version/03-deploying-palette-with-helm.md b/content/docs/12-enterprise-version/03-deploying-palette-with-helm.md deleted file mode 100644 index 3c153a7a1f..0000000000 --- a/content/docs/12-enterprise-version/03-deploying-palette-with-helm.md +++ /dev/null @@ -1,682 +0,0 @@ ---- -title: "Install using Helm Chart" -metaTitle: "Install using Helm Chart" -metaDescription: "Learn how to deploy self-hosted Palette to a Kubernetes cluster using a Helm Chart." -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; -import Tabs from 'shared/components/ui/Tabs'; - -# Helm Chart Mode - -You can use the Palette Helm Chart to install Palette in a multi-node Kubernetes cluster in your production environment. - -This installation method is common in secure environments with restricted network access that prohibits using Palette SaaS. Review our [architecture diagrams](/architecture/networking-ports) to ensure your Kubernetes cluster has the necessary network connectivity for Palette to operate successfully. - - -Depending on what version of Palette you are using, the available parameters will be different. Select the tab below that corresponds to the version of Palette you are using. - -
- - - - -## Prerequisites - -- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed and available. - - -- [Helm](https://helm.sh/docs/intro/install/) is installed and available. - - -- Access to the target Kubernetes cluster's kubeconfig file. You must be able to interact with the cluster using `kubectl` commands and have sufficient permissions to install Palette. We recommend using a role with cluster-admin permissions to install Palette. - - -- The Kubernetes cluster must be set up on a supported version of Kubernetes, which includes versions v1.25 to v1.27. - - - -- Ensure the Kubernetes cluster does not have Cert Manager installed. Palette requires a unique Cert Manager configuration to be installed as part of the installation process. If Cert Manager is already installed, you must uninstall it before installing Palette. - - -- The Kubernetes cluster must have a Container Storage Interface (CSI) installed and configured. Palette requires a CSI to store persistent data. You may install any CSI that is compatible with your Kubernetes cluster. - - - -- We recommended the following resources for Palette VerteX. Refer to the [Palette VerteX size guidelines](/enterprise-version/on-prem-system-requirements#hardwarerequirements) for additional sizing information. - - - 8 CPUs per node. - - - 16 GB Memory per node. - - - 100 GB Disk Space per node. - - - A Container Storage Interface (CSI) for persistent data. - - - A minimum of three worker nodes or three untainted control plane nodes. - - -- The following network ports must be accessible for Palette to operate successfully. - - - TCP/443: Inbound and outbound to and from the Palette management cluster. - - - TCP/6443: Outbound traffic from the Palette management cluster to the deployed clusters' Kubernetes API server. - - -- Ensure you have an SSL certificate that matches the domain name you will assign to Palette. You will need this to enable HTTPS encryption for Palette. Reach out to your network administrator or security team to obtain the SSL certificate. You need the following files: - - - x509 SSL certificate file in base64 format. - - - x509 SSL certificate key file in base64 format. - - - x509 SSL certificate authority file in base64 format. - - -- Ensure the OS and Kubernetes cluster you are installing Palette onto is FIPS-compliant. Otherwise, Palette and its operations will not be FIPS-compliant. - - -- A custom domain and the ability to update Domain Name System (DNS) records. You will need this to enable HTTPS encryption for Palette. - - -- Access to the Palette Helm Charts. Refer to the [Access Palette](/enterprise-version#downloadpaletteinstaller) for instructions on how to request access to the Helm Chart - - - -
- - - -Do not use a Palette-managed Kubernetes cluster when installing Palette. Palette-managed clusters contain the Palette agent and Palette-created Kubernetes resources that will interfere with the installation of Palette. - - - - -## Install Palette - -Use the following steps to install Palette on Kubernetes. - -
- - - -The following instructions are written agnostic to the Kubernetes distribution you are using. Depending on the underlying infrastructure provider and your Kubernetes distribution, you may need to modify the instructions to match your environment. Reach out to our support team if you need assistance. - - - - -1. Open a terminal session and navigate to the directory where you downloaded the Palette Helm Charts provided by our support. We recommend you place all the downloaded files into the same directory. You should have the following Helm Charts: - -
- - - Spectro Management Plane Helm Chart. - -
- - - Cert Manager Helm Chart. - - -2. Extract each Helm Chart into its directory. Use the commands below as a reference. Do this for all the provided Helm Charts. - -
- - ```shell - tar xzvf spectro-mgmt-plane-*.tgz - ``` - -
- - ```yaml - tar xzvf cert-manager-*.tgz - ``` - - -3. Install Cert Manager using the following command. Replace the actual file name of the Cert Manager Helm Chart with the one you downloaded, as the version number may be different. - -
- - ```shell - helm upgrade --values cert-manager/values.yaml cert-manager cert-manager-1.11.0.tgz --install - ``` - -
- - - - The Cert Manager Helm Chart provided by our support team is configured for Palette. Do not modify the **values.yaml** file unless instructed to do so by our support team. - - - - -4. Open the **values.yaml** in the **spectro-mgmt-plane** folder with a text editor of your choice. The **values.yaml** contains the default values for the Palette installation parameters, however, you must populate the following parameters before installing Palette. - -
- - | **Parameter** | **Description** | **Type** | - | --- | --- | --- | - | `env.rootDomain` | The URL name or IP address you will use for the Palette installation. | string | - | `ociPackRegistry` or `ociPackEcrRegistry` | The OCI registry credentials for Palette FIPS packs.| object | - | `scar` | The Spectro Cloud Artifact Repository (SCAR) credentials for Palette FIPS images. These credentials are provided by our support team. | object | - - - Save the **values.yaml** file after you have populated the required parameters mentioned in the table. - -
- - - - You can learn more about the parameters in the **values.yaml** file in the [Helm Configuration Reference](/enterprise-version/deploying-palette-with-helm) page. - - - - - -5. Install the Palette Helm Chart using the following command. - -
- - ```shell - helm upgrade --values spectro-mgmt-plane/values.yaml hubble spectro-mgmt-plane-0.0.0.tgz --install - ``` - - -6. Track the installation process using the command below. Palette is ready when the deployments in the namespaces `cp-system`, `hubble-system`, `ingress-nginx`, `jet-system` , and `ui-system` reach the *Ready* state. The installation takes between two to three minutes to complete. - -
- - ```shell - kubectl get pods --all-namespaces --watch - ``` - - -7. Create a DNS CNAME record that is mapped to the Palette `ingress-nginx-controller` load balancer. You can use the following command to retrieve the load balancer IP address. You may require the assistance of your network administrator to create the DNS record. - -
- - ```shell - kubectl get service ingress-nginx-controller --namespace ingress-nginx --output jsonpath='{.status.loadBalancer.ingress[0].hostname}' - ``` - -
- - - - As you create tenants in Palette, the tenant name is prefixed to the domain name you assigned to Palette. For example, if you create a tenant named `tenant1` and the domain name you assigned to Palette is `palette.example.com`, the tenant URL will be `tenant1.palette.example.com`. You can create an additional wildcard DNS record to map all tenant URLs to the Palette load balancer. - - - - -8. Use the custom domain name or the IP address of the load balancer to visit the Palette system console. To access the system console, open a web browser and paste the custom domain URL in the address bar and append the value `/system`. Replace the domain name in the URL with your custom domain name or the IP address of the load balancer. Alternatively, you can use the load balancer IP address with the appended value `/system` to access the system console. - -
- - - - The first time you visit the Palette system console, a warning message about an untrusted SSL certificate may appear. This is expected, as you have not yet uploaded your SSL certificate to Palette. You can ignore this warning message and proceed. - - - -
- - ![Screenshot of the Palette system console showing Username and Password fields.](/palette_installation_install-on-vmware_palette-system-console.png) - - -9. Log in to the system console using the following default credentials. - -
- - | **Parameter** | **Value** | - | --- | --- | - | Username | `admin` | - | Password | `admin` | - -
- - After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette system console. - -
- -10. After login, a summary page is displayed. Palette is installed with a self-signed SSL certificate. To assign a different SSL certificate you must upload the SSL certificate, SSL certificate key, and SSL certificate authority files to Palette. You can upload the files using the Palette system console. Refer to the [Configure HTTPS Encryption](/vertex/system-management/ssl-certificate-management) page for instructions on how to upload the SSL certificate files to Palette. - - -
- - - -If you plan to deploy host clusters into different networks, you may require a reverse proxy. Check out the [Configure Reverse Proxy](/enterprise-version/reverse-proxy) guide for instructions on how to configure a reverse proxy for Palette VerteX. - - - - -You now have a self-hosted instance of Palette installed in a Kubernetes cluster. Make sure you retain the **values.yaml** file as you may need it for future upgrades. - - -## Validate - -Use the following steps to validate the Palette installation. - -
- - -1. Open up a web browser and navigate to the Palette system console. To access the system console, open a web browser and paste the following URL in the address bar and append the value `/system`. Replace the domain name in the URL with your custom domain name or the IP address of the load balancer. - - - -2. Log in using the credentials you received from our support team. After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette system console. - - -3. Open a terminal session and issue the following command to verify the Palette installation. The command should return a list of deployments in the `cp-system`, `hubble-system`, `ingress-nginx`, `jet-system` , and `ui-system` namespaces. - -
- - ```shell - kubectl get pods --all-namespaces --output custom-columns="NAMESPACE:metadata.namespace,NAME:metadata.name,STATUS:status.phase" \ - | grep -E '^(cp-system|hubble-system|ingress-nginx|jet-system|ui-system)\s' - ``` - - Your output should look similar to the following. - - ```shell hideClipboard - cp-system spectro-cp-ui-689984f88d-54wsw Running - hubble-system auth-85b748cbf4-6drkn Running - hubble-system auth-85b748cbf4-dwhw2 Running - hubble-system cloud-fb74b8558-lqjq5 Running - hubble-system cloud-fb74b8558-zkfp5 Running - hubble-system configserver-685fcc5b6d-t8f8h Running - hubble-system event-68568f54c7-jzx5t Running - hubble-system event-68568f54c7-w9rnh Running - hubble-system foreq-6b689f54fb-vxjts Running - hubble-system hashboard-897bc9884-pxpvn Running - hubble-system hashboard-897bc9884-rmn69 Running - hubble-system hutil-6d7c478c96-td8q4 Running - hubble-system hutil-6d7c478c96-zjhk4 Running - hubble-system mgmt-85dbf6bf9c-jbggc Running - hubble-system mongo-0 Running - hubble-system mongo-1 Running - hubble-system mongo-2 Running - hubble-system msgbroker-6c9b9fbf8b-mcsn5 Running - hubble-system oci-proxy-7789cf9bd8-qcjkl Running - hubble-system packsync-28205220-bmzcg Succeeded - hubble-system spectrocluster-6c57f5775d-dcm2q Running - hubble-system spectrocluster-6c57f5775d-gmdt2 Running - hubble-system spectrocluster-6c57f5775d-sxks5 Running - hubble-system system-686d77b947-8949z Running - hubble-system system-686d77b947-cgzx6 Running - hubble-system timeseries-7865bc9c56-5q87l Running - hubble-system timeseries-7865bc9c56-scncb Running - hubble-system timeseries-7865bc9c56-sxmgb Running - hubble-system user-5c9f6c6f4b-9dgqz Running - hubble-system user-5c9f6c6f4b-hxkj6 Running - ingress-nginx ingress-nginx-controller-2txsv Running - ingress-nginx ingress-nginx-controller-55pk2 Running - ingress-nginx ingress-nginx-controller-gmps9 Running - jet-system jet-6599b9856d-t9mr4 Running - ui-system spectro-ui-76ffdf67fb-rkgx8 Running - ``` - - -## Next Steps - -You have successfully installed Palette in a Kubernetes cluster. Your next steps are to configure Palette for your organization. Start by creating the first tenant to host your users. Use the [Create a Tenant](/vertex/system-management/tenant-management#createatenant) page for instructions on how to create a tenant. - - - - -
- - -## Prerequisites - -- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed. - - -- Configure a Container Storage Interface (CSI) for persistent data. - - -- Have at least three worker nodes or three untainted control plane nodes. - - -- [Cert Manager](https://cert-manager.io/docs) v1.11.0 or greater installed in the Kubernetes cluster. Use the official Cert Manager [installation guide](https://cert-manager.io/docs/installation/) for additional guidance. - - - -- Allocate a minimum of 4 CPUs and 12 GB of Memory per node. - - -- A custom domain and the ability to update Domain Name System (DNS) records. - - - -- Access to the Palette Helm Chart. Contact support@spectrocloud.com to gain access to the Helm Chart. - - -- For AWS EKS, ensure you have the [AWS CLI](https://aws.amazon.com/cli/) and the [kubectl CLI](https://github.com/weaveworks/eksctl#installation) installed. - -
- - - -Palette cannot manage the cluster that it is installed onto due to component conflicts. Consider using a managed Kubernetes service to minimize management overhead. The Palette Helm Chart is not tied to any particular managed Kubernetes service. - - - - - -## Install Palette - -Choose the installation steps for your target environment. The steps in the generic tab apply to all Kubernetes clusters. Steps in other tabs have instructions explicitly tailored to the target environment. - -
- - - - - -1. Download the kubeconfig file for the Kubernetes cluster where you will deploy Palette. Ensure you can interact with the target cluster. You can validate by issuing a `kubectl` command. - -
- - ```shell - kubectl get pods -A - ``` - - -2. Extract the **values.yaml** from the Helm Chart with the following command: - -
- - ```shell - tar xzvf /path/to/chart.tgz spectro-mgmt-plane/values.yaml - ``` - - -3. Review the **values.yaml** . You must populate the `env.rootDomain` parameter to the domain you will use for the installation. All other parameter values are optional, and you can reset or change them with a Helm upgrade operation. - -
- - - - Do not use a wildcard in the root domain value for the `env.rootDomain` parameter. Use a complete domain name when assigning a root domain name value. - - - - -4. Install the Helm Chart using the following command. Replace the path in the command to match your local path of the Palette Helm Chart. - -
- - ```shell - helm install palette /path/to/chart.tgz -f /path/to/values.yaml - ``` - - -5. Monitor the deployment using the command below. Palette is ready when the deployments in namespaces `cp-system`, `hubble-system`, `jet-system` , and `ui-system` reach the *Ready* state. - -
- - ```shell - kubectl get pods --all-namespaces --watch - ``` - -6. Create a DNS record that is mapped to the Palette `ingress-nginx-controller` load balancer. You can use the following command to retrieve the load balancer IP address. - -
- - ```shell - kubectl get service ingress-nginx-controller --namespace nginx --output jsonpath='{.status.loadBalancer.ingress[0].hostname}' - ``` - -You now have a self-hosted instance of Palette installed in a Kubernetes cluster. Make sure you retain the **values.yaml** file as you will need it for future upgrades. - -
- -
- - - -1. Ensure the AWS CLI is configured with your credentials. You can use the following command to configure your credentials. Refer to the [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) guide for additional help. - -
- - ```shell - aws configure - ``` - -2. Next, create an EKS cluster. - -
- - ```shell - eksctl create cluster \ - --name palette-selfhost \ - --node-type m5.xlarge \ - --nodes 3 \ - --nodes-min 3 \ - --nodes-max 4 \ - --region eu-west-2 \ - --kubeconfig ~/Downloads/palette-selfhost.kubeconfig - ``` - - Change `--region` and `--nodes` as required. You can also change the instance size. - - Note that the [minimum instance requirement](https://aws.amazon.com/ec2/instance-types/) is three nodes with a least 4 CPUs and 12 GB of Memory per node. - - -3. When the cluster is available, go ahead and configure the OpenID Connect (OIDC) for the cluster to use Palette as the Identity Provider (IDP). - -
- - ```shell - eksctl utils associate-iam-oidc-provider --cluster=palette-selfhost --approve - ``` - -4. Next, add the EBS Container Storage Interface (CSI) driver IAM role. Replace the `` with your AWS account ID. - -
- - ```shell - eksctl create addon --name aws-ebs-csi-driver \ - --cluster palette-selfhost \ - --service-account-role-arn arn:aws:iam:::role/AmazonEKS_EBS_CSI_DriverRole \ - --force - ``` - -5. Log in to the [AWS console](https://console.aws.amazon.com) and navigate to the EKS Dashboard. - - - -6. Select the **palette-selfhost** cluster to access its details page. - - - -7. From the cluster details page, click on **Compute** > **Node Group**. Next, click on **Node IAM role ARN link**. - - ![A view of the cluster details page with the Node IAM role ARN highlighted](/enterprise-version_deploying-palette-with-helm_aws-iam-role.png) - - -8. From the **Permissions** tab, click on the **Add Permissions** button, and select **Attach Policies**. - - -9. Search for the **AmazonEBSCSIDriverPolicy** policy and add it to the role. - -
- - - - You can find additional guidance about Amazon EBS CSI drivers and requirements by reviewing the [EBS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) and the [Manage EBS with EKS](https://github.com/awsdocs/amazon-eks-user-guide/blob/master/doc_source/managing-ebs-csi.md) guide. - - - -10. Extract the Helm Chart files from the compressed asset we provided to you. Replace the file path and version placeholder as needed. - -
- - ```shell - tar xzvf path/to-file/spectro-mgmt-helm-charts-X.X.tar.gz - ``` - -11. Navigate to the **spectro-mgmt-helm-charts-X.X** folder. - -
- - ```shell - cd spectro-mgmt-helm-charts-X.X - ``` - -12. Review the **values.yaml** . You must populate the `env.rootDomain` parameter to the domain you will use for the installation. In addition, add the same `rootDomain` with port `:4222` to the `natsUrl` in the `nats` section of the YAML. Example: `env.rootDomain: my-domain.com:4222`. All other parameter values are optional, and you can reset or change them with the Palette API. - -
- - - - Do not use a wildcard in the root domain value for the `env.rootDomain` parameter. Use a complete domain name when assigning a root domain name value. - - - - 13. If you wish to use [AWS ACM for SSL Certs](https://docs.aws.amazon.com/acm/latest/userguide/acm-overview.html), instead of the default self-signed certificate that the Nginx *ingress controller* generates, you can add it to the `annotations` under `ingress`. - -
- - ```yaml - ingress: - ingress: - # Whether to front NGINX Ingress Controller with a cloud - # load balancer (internal == false) or use host network - internal: false - - # Default SSL certificate and key for NGINX Ingress Controller (Optional) - # A wildcard cert for config.env.rootDomain, e.g., *.myfirstpalette.spectrocloud.com - # If left blank, the NGINX ingress controller will generate a self-signed cert (when terminating TLS upstream of ingress-nginx-controller) - # certificate: "" - # key: "" - - annotations: - # AWS example - service.beta.kubernetes.io/aws-load-balancer-internal: "true" - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "" - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" - ingressStaticIP: "" - # Used to terminate HTTPS traffic at the load balancer versus passing through the load balancer. This parameter is available in Palette 3.3 or greater. - terminateHTTPSAtLoadBalancer: true - ``` - - 14. Download the kubeconfig file for the EKS cluster. Ensure you can interact with the target cluster. You can validate by issuing a `kubectl` command. For additional guidance, refer to the [kubeconfig file for an Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html) guide. - - - -15. Install the Helm Chart using the following command. Replace the path in the command to match your local path of the Palette Helm Chart. - -
- - ```shell - helm install palette /path/to/chart.tgz -f /path/to/values.yaml - ``` - -16. Monitor the deployment using the command below. Palette is ready when the deployments in namespaces `cp-system`, `hubble-system`, `jet-system` , and `ui-system` reach the *Ready* state. - -
- - ```shell - kubectl get pods --all-namespaces --watch - ``` - -17. Create a DNS record mapped to the load balancer created by the Palette service `ingress-nginx-controller` . You can use the following command to retrieve the load balancer IP address. - -
- - ```shell - kubectl get service ingress-nginx-controller --namespace nginx --output jsonpath='{.status.loadBalancer.ingress[0].hostname}' - ``` - -You now have a self-hosted instance of Palette installed in a Kubernetes cluster. Make sure you retain the **values.yaml** file as you will need it for future upgrades. - -
- -
- - -
- -# Validate - -You can validate that the installation of Palette is successful by visiting the custom domain you assigned to the -`env.rootDomain` parameter in the **values.yaml**. - -
- - - - -If you notice that the pods in the `hubble-system` namespace are not initializing as expected, it might be due to a delay in adding the DNS records for the rootDomain. The workaround is to terminate all pods except the pods related to `mongo-db` in the `hubble-system` namespace to trigger a redeployment of the pods. - -
- - ```shell - kubectl delete pods --namespace hubble-system --selector=role!=mongo - ``` - -
- - -## Upgrade Palette - - - -To upgrade Palette with a new Helm release, use the following steps.

- -1. Download the new version of the Helm Chart. - - - -2. Extract the new **values.yaml** file from the Helm Chart with the following command: - -
- - ```shell - tar xzvf /path/to/chart.tgz spectro-mgmt-plane/values.yaml - ``` - - -3. Compare the new **values.yaml** against the original **values.yaml** you used for the initial Palette installation. Address any new parameters added to the values file. - - - - -4. Issue the following command to upgrade Palette. Use the same **values.yaml** file you used for the Palette installation. - -
- - ```shell - helm upgrade palette /path/to/chart.tgz --file /path/to/orginal_values.yaml - ``` - - -### Post-Install Configuration Values - -The values you specified in the **values.yaml** file all fall under the parameter section `values.config` and are stored in the `configserver-cm` ConfigMap. - -After the installation, if you need to change any configuration values under `values.config` in the **values.yaml** file, you must use the Palette API. -When you use the `helm upgrade` command, internal system configurations stored in the Kubernetes ConfigMap `configserver-cm` will display as updated, but Palette will not apply the new values. Palette only accepts changes to these configuration values if they are submitted via API. - -If you find yourself in this scenario, contact our support team by emailing us at support@spectrocloud.com for additional guidance. - - - -## Next Steps - -Start exploring the Palette system dashboard so that you become familiar with the available actions you can take as an administrator. Check out the [System Console Dashboard](/enterprise-version/system-console-dashboard) resource to learn more. - - -
- - -
-
- -
\ No newline at end of file diff --git a/content/docs/12-enterprise-version/03.5-helm-chart-install-reference.md b/content/docs/12-enterprise-version/03.5-helm-chart-install-reference.md deleted file mode 100644 index 7127599aca..0000000000 --- a/content/docs/12-enterprise-version/03.5-helm-chart-install-reference.md +++ /dev/null @@ -1,717 +0,0 @@ ---- -title: "Helm Chart Install Reference" -metaTitle: "Helm Chart Install References" -metaDescription: "Reference for Palette Helm Chart installation parameters." -icon: "" -hideToC: False -fullWidth: False ---- - - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; - -# Helm Chart Install Reference - -You can use the Palette Helm Chart to install Palette in a multi-node Kubernetes cluster in your production environment. The Helm chart allows you to customize values in the **values.yaml** file. This reference lists and describes parameters available in the **values.yaml** file from the Helm Chart for your installation. To learn how to install Palette using the Helm Chart, refer to [Helm Chart Mode](/enterprise-version/deploying-palette-with-helm). - - -Depending on what version of Palette you are using, the available parameters will be different. Select the tab below that corresponds to the version of Palette you are using. - -
- - - - - -## Required Parameters - -The following parameters are required for a successful installation of Palette. - - -| **Parameters** | **Description** | **Type** | -| --- | --- | --- | -| `config.env.rootDomain` | Used to configure the domain for the Palette installation. We recommend you create a CNAME DNS record that supports multiple subdomains. You can achieve this using a wild card prefix, `*.palette.abc.com`. Review the [Environment parameters](#environment) to learn more. | String | -| `config.env.ociRegistry` or `config.env.ociEcrRegistry`| Specifies the FIPS image registry for Palette. You can use an a self-hosted OCI registry or a public OCI registry we maintain and support. For more information, refer to the [Registry](#registries) section. | Object | -| `scar`| The Spectro Cloud Artifact Repository (SCAR) credentials for Palette FIPS images. Our support team provides these credentials. For more information, refer to the [Registry](#registries) section. | Object | - - - - -If you are installing an air-gapped version of Palette, you must provide the image swap configuration. For more information, refer to the [Image Swap Configuration](#imageswapconfiguration) section. - - - - - -## MongoDB - -Palette uses MongoDB Enterprise as its internal database and supports two modes of deployment:

- -- MongoDB Enterprise deployed and active inside the cluster. - - -- MongoDB Enterprise is hosted on a software-as-a-service (SaaS) platform, such as MongoDB Atlas. - -The table below lists the parameters used to configure a MongoDB deployment. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `internal` | Specifies the MongoDB deployment either in-cluster or using Mongo Atlas. | Boolean | `true` | -| `databaseUrl`| The URL for MongoDB Enterprise. If using a remote MongoDB Enterprise instance, provide the remote URL. This parameter must be updated if `mongo.internal` is set to `false`. | String | `mongo-0.mongo,mongo-1.mongo,mongo-2.mongo` | -| `databasePassword`| The base64-encoded MongoDB Enterprise password. If you don't provide a value, a random password will be auto-generated. | String | `""` | -| `replicas`| The number of MongoDB replicas to start. | Integer | `3` | -| `memoryLimit`| Specifies the memory limit for each MongoDB Enterprise replica.| String | `4Gi` | -| `cpuLimit` | Specifies the CPU limit for each MongoDB Enterprise member.| String | `2000m` | -| `pvcSize`| The storage settings for the MongoDB Enterprise database. Use increments of `5Gi` when specifying the storage size. The storage size applies to each replica instance. The total storage size for the cluster is `replicas` * `pvcSize`. | string | `20Gi`| -| `storageClass`| The storage class for the MongoDB Enterprise database. | String | `""` | - - -```yaml -mongo: - internal: true - databaseUrl: "mongo-0.mongo,mongo-1.mongo,mongo-2.mongo" - databasePassword: "" - replicas: 3 - cpuLimit: "2000m" - memoryLimit: "4Gi" - pvcSize: "20Gi" - storageClass: "" -``` - -## Config - -Review the following parameters to configure Palette for your environment. The `config` section contains the following subsections: - -### SSO - -You can configure Palette to use Single Sign-On (SSO) for user authentication. Configure the SSO parameters to enable SSO for Palette. You can also configure different SSO providers for each tenant post-install, check out the [SAML & SSO Setup](/user-management/saml-sso) documentation for additional guidance. - -To configure SSO, you must provide the following parameters. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | --- | -| `saml.enabled` | Specifies whether to enable SSO SAML configuration by setting it to true. | Boolean | `false` | -| `saml.acsUrlRoot` | The root URL of the Assertion Consumer Service (ACS).| String | `myfirstpalette.spectrocloud.com`| -| `saml.acsUrlScheme` | The URL scheme of the ACS: `http` or `https`. | String | `https` | -| `saml.audienceUrl` | The URL of the intended audience for the SAML response.| String| `https://www.spectrocloud.com` | -| `saml.entityID` | The Entity ID of the Service Provider.| String | `https://www.spectrocloud.com`| -| `saml.apiVersion` | Specify the SSO SAML API version to use.| String | `v1` | - -```yaml -config: - sso: - saml: - enabled: false - acsUrlRoot: "myfirstpalette.spectrocloud.com" - acsUrlScheme: "https" - audienceUrl: "https://www.spectrocloud.com" - entityId: "https://www.spectrocloud.com" - apiVersion: "v1" -``` - -### Email - -Palette uses email to send notifications to users. The email notification is used when inviting new users to the platform, password resets, and when [webhook alerts](/clusters/cluster-management/health-alerts#overview) are triggered. Use the following parameters to configure email settings for Palette. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `enabled` | Specifies whether to enable email configuration. | Boolean| `false`| -| `emailID ` | The email address for sending mail.| String| `noreply@spectrocloud.com` | -| `smtpServer` | Simple Mail Transfer Protocol (SMTP) server used for sending mail. | String | `smtp.gmail.com` | -| `smtpPort` | SMTP port used for sending mail.| Integer | `587` | -| `insecureSkipVerifyTLS` | Specifies whether to skip Transport Layer Security (TLS) verification for the SMTP connection.| Boolean | `true` | -| `fromEmailID` | Email address of the ***From*** address.| String | `noreply@spectrocloud.com` | -| `password` | The base64-encoded SMTP password when sending emails.| String | `""` | - -```yaml -config: - email: - enabled: false - emailId: "noreply@spectrocloud.com" - smtpServer: "smtp.gmail.com" - smtpPort: 587 - insecureSkipVerifyTls: true - fromEmailId: "noreply@spectrocloud.com" - password: "" -``` - -### Environment - -The following parameters are used to configure the environment. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `env.rootDomain` | Specifies the URL name assigned to Palette Vertex. The value assigned should have a Domain Name System (DNS) CNAME record mapped to exposed IP address or the load balancer URL of the service *ingress-nginx-controller*. Optionally, if `ingress.ingressStaticIP` is provided with a value you can use same assigned static IP address as the value to this parameter.| String| `""` | -| `env.installerMode` | Specifies the installer mode. Do not modify the value.| String| `self-hosted` | -| `env.installerCloud` | Specifies the cloud provider. Leave this parameter empty if you are installing a self-hosted Palette. | String | `""` | - -```yaml -config: - env: - rootDomain: "" -``` -
- - - -As you create tenants in Palette, the tenant name is prefixed to the domain name you assigned to Palette. For example, if you create a tenant named tenant1 and the domain name you assigned to Palette is `palette.example.com`, the tenant URL will be `tenant1.palette.example.com`. We recommend you create an additional wildcard DNS record to map all tenant URLs to the Palette load balancer. For example, `*.palette.example.com`. - - - -### Cluster - -Use the following parameters to configure the Kubernetes cluster. - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `stableEndpointAccess` | Set to `true` if the Kubernetes cluster is deployed in a public endpoint. If the cluster is deployed in a private network through a stable private endpoint, set to `false`. | Boolean | `false` | - -```yaml -config: - cluster: - stableEndpointAccess: false -``` - -## Registries - -Palette requires credentials to access the required Palette images. You can configure different types of registries for Palette to download the required images. You must configure at least one Open Container Initiative (OCI) registry for Palette. You must also provide the credentials for the Spectro Cloud Artifact Repository (SCAR) to download the required FIPS images. - -
- -### OCI Registry - - -Palette requires access to an OCI registry that contains all the required FIPS packs. You can host your own OCI registry and configure Palette to reference the registry. Alternatively, you can use the public OCI registry that we provide. Refer to the [`ociPackEcrRegistry`](#ociecrregistry) section to learn more about the publicly available OCI registry. - - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `ociPackRegistry.endpoint` | The endpoint URL for the registry. | String| `""` | -| `ociPackRegistry.name` | The name of the registry. | String| `""` | -| `ociPackRegistry.password` | The base64-encoded password for the registry. | String| `""` | -| `ociPackRegistry.username` | The username for the registry. | String| `""` | -| `ociPackRegistry.baseContentPath`| The base path for the registry. | String | `""` | -| `ociPackRegistry.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the registry connection. | Boolean | `false` | -| `ociPackRegistry.caCert` | The registry's base64-encoded certificate authority (CA) certificate. | String | `""` | - - -```yaml -config: - ociPackRegistry: - endpoint: "" - name: "" - password: "" - username: "" - baseContentPath: "" - insecureSkipVerify: false - caCert: "" -``` - -### OCI ECR Registry - -We expose a public OCI ECR registry that you can configure Palette to reference. If you want to host your own OCI registry, refer to the [OCI Registry](#oci-registry) section. -The OCI Elastic Container Registry (ECR) is hosted in an AWS ECR registry. Our support team provides the credentials for the OCI ECR registry. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `ociPackEcrRegistry.endpoint` | The endpoint URL for the registry. | String| `""` | -| `ociPackEcrRegistry.name` | The name of the registry. | String| `""` | -| `ociPackEcrRegistry.accessKey` | The base64-encoded access key for the registry. | String| `""` | -| `ociPackEcrRegistry.secretKey` | The base64-encoded secret key for the registry. | String| `""` | -| `ociPackEcrRegistry.baseContentPath`| The base path for the registry. | String | `""` | -| `ociPackEcrRegistry.isPrivate` | Specifies whether the registry is private. | Boolean | `true` | -| `ociPackEcrRegistry.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the registry connection. | Boolean | `false` | -| `ociPackEcrRegistry.caCert` | The registry's base64-encoded certificate authority (CA) certificate. | String | `""` | - -```yaml -config: - ociPackEcrRegistry: - endpoint: "" - name: "" - accessKey: "" - secretKey: "" - baseContentPath: "" - isPrivate: true - insecureSkipVerify: false - caCert: "" -``` - -### Spectro Cloud Artifact Repository (SCAR) - -SCAR credentials are required to download the necessary FIPS manifests. Our support team provides the SCAR credentials. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `scar.endpoint` | The endpoint URL of SCAR. | String| `""` | -| `scar.username` |The username for SCAR. | String| `""` | -| `scar.password` | The base64-encoded password for the SCAR. | String| `""` | -| `scar.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the SCAR connection. | Boolean | `false` | -| `scar.caCert` | The base64-encoded certificate authority (CA) certificate for SCAR. | String | `""` | - -
- - ```yaml - config: - scar: - endpoint: "" - username: "" - password: "" - insecureSkipVerify: false - caCert: "" - ``` - -### Image Swap Configuration - -You can configure Palette to use image swap to download the required images. This is an advanced configuration option, and it is only required for air-gapped deployments. You must also install the Palette Image Swap Helm chart to use this option, otherwise, Palette will ignore the configuration. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `imageSwapInitImage` | The image swap init image. | String | `gcr.io/spectro-images-public/thewebroot/imageswap-init:v1.5.2` | -| `imageSwapImage` | The image swap image. | String | `gcr.io/spectro-images-public/thewebroot/imageswap:v1.5.2` | -| `imageSwapConfig`| The image swap configuration for specific environments. | String | `""` | -| `imageSwapConfig.isEKSCluster` | Specifies whether the cluster is an Amazon EKS cluster. Set to `false` if the Kubernetes cluster is not an EKS cluster. | Boolean | `true` | - -
- - ```yaml - config: - imageSwapImages: - imageSwapInitImage: "gcr.io/spectro-images-public/thewebroot/imageswap-init:v1.5.2" - imageSwapImage: "gcr.io/spectro-images-public/thewebroot/imageswap:v1.5.2" - - imageSwapConfig: - isEKSCluster: true - ``` - -### NATS - -Palette uses [NATS](https://nats.io) and gRPC for communication between Palette components. Dual support for NATS and gRPC is available. You can enable the deployment of an additional load balancer for NATS. Host clusters deployed by Palette use the load balancer to communicate with the Palette control plane. This is an advanced configuration option and is not required for most deployments. Speak with your support representative before enabling this option. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `nats.enabled`| Specifies whether to enable the deployment of a NATS load balancer. | Boolean | `true` | -| `nats.internal`| Specifies whether to deploy a load balancer or use the host network. If this value is set to `true`, then the remaining NATS parameters are ignored. | Boolean | `true` | -| `nats.natsUrl`| The NATS URL. This can be a comma separated list of mappings for the NATS load balancer service. For example, "message1.dev.spectrocloud.com:4222,message2.dev.spectrocloud.com:4222". This parameter is mandatory if `nats.internal` is set to `false`. If `nats.internal` is set to `true`, you can leave this parameter empty. | String | `""` | -| `nats.annotations`| A map of key-value pairs that specifies load balancer annotations for NATS. You can use annotations to change the behavior of the load balancer and the Nginx configuration. This is an advanced setting. We recommend you consult with your assigned support team representative prior to modification. | Object | `{}` | -| `nats.natsStaticIP`| Specify a static IP address for the NATS load balancer service. If empty, a dynamic IP address will be assigned to the load balancer. | String | `""` | - - -
- - ```yaml - nats: - enabled: true - internal: true - natsUrl: "" - annotations: {} - natsStaticIP: -``` - - - - -## gRPC - -gRPC is used for communication between Palette components. You can enable the deployment of an additional load balancer for gRPC. Host clusters deployed by Palette use the load balancer to communicate with the Palette control plane. This is an advanced configuration option, and it is not required for most deployments. Speak with your support representative before enabling this option. Dual support for NATS and gRPC is available. - -If you want to use an external gRPC endpoint, you must provide a domain name for the gRPC endpoint and a valid x509 certificate. Additionally, you must provide a custom domain name for the endpoint. A CNAME DNS record must point to the IP address of the gRPC load balancer. For example, if your Palette domain name is `palette.example.com`, you could create a CNAME DNS record for `grpc.palette.example.com` that points to the IP address of the load balancer dedicated to gRPC. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `external`| Specifies whether to use an external gRPC endpoint. | Boolean | `false` | -| `endpoint`| The gRPC endpoint. | String | `""` | -| `caCertificateBase64`| The base64-encoded certificate authority (CA) certificate for the gRPC endpoint. | String | `""` | -| `serverCrtBase64`| The base64-encoded server certificate for the gRPC endpoint. | String | `""` | -| `serverKeyBase64`| The base64-encoded server key for the gRPC endpoint. | String | `""` | -| `insecureSkipVerify`| Specifies whether to skip Transport Layer Security (TLS) verification for the gRPC endpoint. | Boolean | `false` | - - - - -```yaml -grpc: - external: false - endpoint: "" - caCertificateBase64: "" - serverCrtBase64: "" - serverKeyBase64: "" - insecureSkipVerify: false -``` - -## Ingress - -Palette deploys an Nginx Ingress Controller. This controller is used to route traffic to the Palette control plane. You can change the default behavior and omit the deployment of an Nginx Ingress Controller. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `enabled`| Specifies whether to deploy an Nginx controller. Set to `false` if you do not want an Nginx controller deployed. | Boolean | `true` | -| `ingress.internal`| Specifies whether to deploy a load balancer or use the host network. | Boolean | `false` | -| `ingress.certificate`| Specify the base64-encoded x509 SSL certificate for the Nginx Ingress Controller. If left blank, the Nginx Ingress Controller will generate a self-signed certificate. | String | `""` | -| `ingress.key`| Specify the base64-encoded x509 SSL certificate key for the Nginx Ingress Controller. | String | `""` | -| `ingress.annotations`| A map of key-value pairs that specifies load balancer annotations for ingress. You can use annotations to change the behavior of the load balancer and the Nginx configuration. This is an advanced setting. We recommend you consult with your assigned support team representative prior to modification. | Object | `{}` | -| `ingress.ingressStaticIP`| Specify a static IP address for the ingress load balancer service. If empty, a dynamic IP address will be assigned to the load balancer. | String | `""` | -| `ingress.terminateHTTPSAtLoadBalancer`| Specifies whether to terminate HTTPS at the load balancer. | Boolean | `false` | - - -```yaml -ingress: - enabled: true - ingress: - internal: false - certificate: "" - key: "" - annotations: {} - ingressStaticIP: "" - terminateHTTPSAtLoadBalancer: false -``` - -## Spectro Proxy - -You can specify a reverse proxy server that clusters deployed through Palette can use to facilitate network connectivity to the cluster's Kubernetes API server. Host clusters deployed in private networks can use the [Spectro Proxy pack](/integrations/frp) to expose the cluster's Kubernetes API to downstream clients that are not in the same network. Check out the [Reverse Proxy](/enterprise-version/reverse-proxy) documentation to learn more about setting up a reverse proxy server for Palette. - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `frps.enabled`| Specifies whether to enable the Spectro server-side proxy. | Boolean | `false` | -| `frps.frpHostURL`| The Spectro server-side proxy URL. | String | `""` | -| `frps.server.crt`| The base64-encoded server certificate for the Spectro server-side proxy. | String | `""` | -| `frps.server.key`| The base64-encoded server key for the Spectro server-side proxy. | String | `""` | -| `frps.ca.crt`| The base64-encoded certificate authority (CA) certificate for the Spectro server-side proxy. | String | `""` | - -```yaml -frps: - frps: - enabled: false - frpHostURL: "" - server: - crt: "" - key: "" - ca: - crt : "" -``` - -## UI System - -The table lists parameters to configure the Palette User Interface (UI) behavior. You can disable the UI or the Network Operations Center (NOC) UI. You can also specify the MapBox access token and style layer ID for the NOC UI. MapBox is a third-party service that provides mapping and location services. To learn more about MapBox and how to obtain an access token, refer to the [MapBox Access tokens](https://docs.mapbox.com/help/getting-started/access-tokens) guide. - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `enabled`| Specifies whether to enable the Palette UI. | Boolean | `true` | -| `ui.nocUI.enable`| Specifies whether to enable the Palette Network Operations Center (NOC) UI. Enabling this parameter requires the `ui.nocUI.mapBoxAccessToken`. Once enabled, all cluster locations will be reported to MapBox. This feature is not FIPS compliant. | Boolean | `false` | -| `ui.nocUI.mapBoxAccessToken`| The MapBox access token for the Palette NOC UI. | String | `""` | -| `ui.nocUI.mapBoxStyledLayerID`| The MapBox style layer ID for the Palette NOC UI. | String | `""` | - - - -```yaml -ui-system: - enabled: true - ui: - nocUI: - enable: false - mapBoxAccessToken: "" - mapBoxStyledLayerID: "" -``` - - - - -## Reach System - -You can configure Palette to use a proxy server to access the internet. Set the parameter `reach-system.reachSystem.enabled` to `true` to enable the proxy server. Proxy settings are configured in the `reach-system.reachSystem.proxySettings` section. - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `reachSystem.enabled`| Specifies whether to enable the usage of a proxy server for Palette. | Boolean | `false` | -| `reachSystem.proxySettings.http_proxy`| The HTTP proxy server URL. | String | `""` | -| `reachSystem.proxySettings.https_proxy`| The HTTPS proxy server URL. | String | `""` | -| `reachSystem.proxySettings.no_proxy`| A list of hostnames or IP addresses that should not be proxied. | String | `""` | - - - ```yaml - reach-system: - reachSystem: - enabled: false - proxySettings: - http_proxy: "" - https_proxy: "" - no_proxy: - ``` - - - -
- - -## Required Parameters - -The following parameters in the **values.yaml** file are required:

- -- **env.rootDomain** - Used to configure the domain for the Palette installation. You should create a CNAME DNS record separately, and it should be a wildcard to account for Organization prefixes. Review the [Environment parameters](/enterprise-version/helm-chart-install-reference#environment) to learn more.

- -- **natsUrl** - The URL format specifies how to configure NATS servers to the IP address and port. Review the [Network Address Translation (NATS) parameters](/enterprise-version/helm-chart-install-reference#networkaddresstranslation(nats)) to learn more.

- - - -- **Registry and Palette Artifact Repository** - Specifies the Docker registry where chart images are stored and the Palette Artifact Repository (PAR). Refer to the [Registry and Palette Artifact Repository parameters](/enterprise-version/helm-chart-install-reference#registryandpaletteartifactrepository(par)). - -## MongoDB - -Palette uses MongoDB as its database and supports two modes of deployment:

- -- MongoDB deployed and active inside the cluster. - - -- MongoDB hosted on a software as a service (SaaS) platform, such as Atlas. - -The table lists the parameters used to configure a MongoDB deployment. - -| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | -| --- | --- | --- | --- | --- | -| `internal` | `n/a` | Boolean | Specifies the MongoDB deployment either in-cluster or using Mongo Atlas. | Required | -| `databaseUrl` | `mongo-0.mongo,mongo-1.mongo,mongo-2.mongo` | String | URL for MongoDB. Change the URL if you are using Mongo Atlas.| Required| -| `databasePassword` | `""` | String | The base64 encoded MongoDB password. | Optional | -| `replicas` | `3` | Integer | Specifies the number of MongoDB replicas to start.| Required | -| `cpuLimit` | `2000m` | String | Specifies the CPU limit for each MongoDB replica.| Optional | -| `memorylimit` | `4Gi` | String |Specifies the memory limit for each MongoDB replica.| Optional | -| `pvcSize` | `20Gi` | String | Specifies the Persistent Volume Claim (PVC) size for each MongoDB replica.|Optional | -| `storageClass` | `""` | String | Storage class for the PVC. Leave this empty to use the default storage class. |Optional | - - -```yaml -mongo: - databaseUrl: "mongo-0.mongo,mongo-1.mongo,mongo-2.mongo" - replicas: 3 - cpuLimit: "2000m" - memoryLimit: "4Gi" - pvcSize: "20Gi" - storageClass: "" -``` - -## Config - -The configuration file contains the following sections. - -### SSO - -The table lists parameters to configure SSO SAML authentication in Palette. - -| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | -| --- | --- | --- | --- | --- | -| `saml.enabled` | `false` | Boolean | Specifies whether to enable SSO SAML configuration by setting it to true. | Optional| -| `saml.acsUrlRoot` | `myfirstpalette.spectrocloud.com` | String | Root URL of the Assertion Consumer Service (ACS).| Optional| -| `saml.acsUrlScheme` | `https` | String | URL scheme of the ACS either http or https. | Optional | -| `saml.audienceUrl` | `https://www.spectrocloud.com` | String | URL of the intended audience for the SAML response.| Optional| -| `saml.entityID` | `https://www.spectrocloud.com` | String | Entity ID of the Service Provider.| Optional | -| `saml.apiVersion` | `v1` | String |SSO SAML API version to use.| Optional | - -```yaml -config: - sso: - saml: - enabled: false - acsUrlRoot: "myfirstpalette.spectrocloud.com" - acsUrlScheme: "https" - audienceUrl: "https://www.spectrocloud.com" - entityId: "https://www.spectrocloud.com" - apiVersion: "v1" -``` - -### Email - -The table lists the parameters to configure email settings in Palette's self-hosted mode. - -| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | -| --- | --- | --- | --- | --- | -| `enabled` | `false` | Boolean | Specifies whether to enable email configuration. | Optional| -| `emailID ` | `""` | String | Email address for sending mail.| Optional| -| `smtpServer` | `smtp.gmail.com` | String | Simple Mail Transfer Protocol (SMTP) server used for sending mail. | Optional | -| `smtpPort` | `587` | Integer | SMTP port used for sending mail.| Optional| -| `insecureSkipVerifyTIs` | `true` | Boolean | Specifies whether to skip Transport Layer Security (TLS) verification for the SMTP connection.| Optional | -| `fromEmailID` | `noreply@spectrocloud.com` | String |Email address of the ***From*** address.| Optional | -| `password` | `""` | String |The base64-encoded SMTP password when sending emails.| Optional | - -```yaml -config: - email: - enabled: false - emailId: "@spectrocloud.com" - smtpServer: "smtp.gmail.com" - smtpPort: 587 - insecureSkipVerifyTls: true - fromEmailId: "noreply@spectrocloud.com" - password: "" -``` - -### Environment - -The table lists environment variables required to deploy Palette. - -| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | -| --- | --- | --- | --- | --- | -| `env.rootDomain` | `""` | String | Specifies the default Domain Name System (DNS) record mapped to the *ingress-nginx-controller* load balancer. It is required if false. Otherwise, leave it empty. | Required| -| `env.installerMode` | `self-hosted` | String | Specifies the installer mode. Do not modify the value.| Required| -| `env.installerCloud` | `""` | String | Specifies the cloud provider. It should be left empty. | Optional | - -```yaml -config: - env: - rootDomain: "" - installerMode: "self-hosted" - installerCloud: "" -``` - -### Cluster - -The cluster parameter specifies how the Kubernetes cluster is deployed. - - -| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | -| --- | --- | --- | --- | --- | -| `stableEndpointAccess` | `false` | Boolean | False indicates the Kubernetes cluster is deployed in a private network through a stable private endpoint. True indicates the cluster is deployed through a public endpoint. | Optional| - -```yaml -config: - cluster: - stableEndpointAccess: false -``` - -### Registry and Palette Artifact Repository (PAR) - -The table lists Registry and Palette Artifact Repository (PAR) parameters to install Palette using Helm Chart. - -| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | -| --- | --- | --- | --- | --- | -| `registry.endpoint` | `""` | String | The endpoint URL for the registry. | Required| -| `registry.name` | `""` | String | The name of the registry. | Required| -| `registry.password` | `""` | String | The password for the registry. | Required| -| `registry.username` | `""` | String | The username for the registry. | Required| -| `scar.endpoint` | `""` | String | The endpoint URL of the PAR. | Required| -| `scar.username` | `""` | String | The username for the PAR. | Required| -| `scar.password` | `""` | String | The password for the PAR. | Required| - -```yaml -config: - registry: - endpoint: "" - name: "" - password: "" - username: "" - - scar: - endpoint: "" - username: "" - password: "" -``` - -Contact support@spectrocloud.com to gain access to the Helm Chart. - -# Network Address Translation (NATS) - -The table lists Network Address Translation (NATS) parameters that Palette uses for communication between the tenant and management clusters. The internal flag determines whether NATS uses a new load balancer or the existing ingress service. To learn about NATS cluster configuration map properties, refer to [NATS clustering configuration.](https://docs.nats.io/running-a-nats-service/configuration/clustering/cluster_config) - -| **Parameters ** | **Default Value** | **Type ** | **Description** | **Required/Optional** | -| ------------ | ------------- | ------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------ | -| `internal` | `true` | Boolean | `true` means NATS shares the ingress load balancer or uses hostNetwork. `false` means a cloud load balancer is used. | Optional | -| `natsUrl` | `""` | String | Comma-separated list of mappings for NATS load balancer service. Required if `nats.internal` is false. | Required | -| `annotations`| `{}` | Map | A map of key-value pairs that specify the load balancer annotations for NATS. These annotations vary depending on the cloud provider. | Optional | -| `routes` | `[]` | List | List of server URLs for clustering (excluding self-routes) that can include authentication via token or username/password in the URL. | Optional | -| `natsStaticIP`| `""` | String | Static IP for the NATS load balancer service. If empty, a dynamic IP address will be generated. | Optional | - -```yaml -nats: - internal: true - natsUrl: "" - annotations: {} - routes: [] - natsStaticIP: "" -``` - -## Ingress - -The table lists parameters used to configure the NGINX Ingress Controller, which provides an external HTTP load balancer for Kubernetes services. Refer to [Set Up Ingress](/clusters/cluster-groups/ingress-cluster-group) for more guidance. - -| **Parameters** | **Default Value** | **Type** | **Description** | **Required/Optional** | -|--------------------------------|---------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------|--------------------| -| `Internal` | `false` | Boolean |Specify whether to use a cloud load balancer or host network. | Required | -| `certificate` | `""` | String | Default SSL certificate for NGINX Ingress Controller. If left blank, the NGINX Ingress Controller will generate a self-signed certificate. | Optional | -| `key` | `""` | String | Default SSL key for the NGINX Ingress Controller. | Optional | -| `annotations` | `{}` | Map | A map of key-value pairs that specifies load balancer annotations for ingress. | Optional | -| `ingressStaticIP` | `""` | String | Static IP for the ingress load balancer service. If empty, a dynamic IP address will be generated. | Optional | -| `terminateHTTPSAtLoadBalancer` | `false` | Boolean | Specify whether to terminate HTTPS at the load balancer. | Optional | - -```yaml -ingress: - ingress: - internal: false - certificate: "" - key: "" - annotations: {} - ingressStaticIP: "" - terminateHTTPSAtLoadBalancer: false -``` - -## Spectro Proxy - -The table lists parameters to configure the Spectro server-side proxy. - -| **Parameters** | **Default Value** | **Type** | **Description** | **Required/Optional** | -|---------------------|------------------------------|---------|---------------------------------------------------------------|--------------------| -| `enabled` | `false` | Boolean | Specifies whether Spectro Proxy is enabled or not. | Optional | -| `frpHostURL` | `proxy.sample.spectrocloud.com` | String | The URL of the Spectro proxy host. | Optional | -| `server.crt` | `"LS0..."` | String | Specifies the certificate file for the Spectro Proxy server. | Optional | -| `server.key` | `"LS0..."` | String | Specifies the private key file for the Spectro Proxy server. | Optional | -| `ca` | `"LS0..."` | String | Specifies the Certificate Authority (CA) for the Spectro Proxy server. | Optional | -| `ca.crt` | `"LS0..."` | String | Specifies the CA certificate file for the Spectro Proxy server. | Optional | - -```yaml -frps: - frps: - enabled: false - frpHostURL: proxy.sample.spectrocloud.com - server: - crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURqekNDQW5lZ0F3SUJBZ0lVZTVMdXBBZGljd0Z1SFJpWWMyWEgzNTFEUzJJd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tERW1NQ1FHQTFVRUF3d2RjSEp2ZUhrdWMyRnRjR3hsTG5Od1pXTjBjbTlqYkc5MVpDNWpiMjB3SGhjTgpNakl4TURFME1UTXlOREV5V2hjTk1qY3hNREV6TVRNeU5ERXlXakI3TVFzd0NRWURWUVFHRXdKVlV6RUxNQWtHCkExVUVDQk1DUTBFeEV6QVJCZ05WQkFjVENsTmhiblJoUTJ4aGNtRXhGVEFUQmdOVkJBb1RERk53WldOMGNtOUQKYkc5MVpERUxNQWtHQTFVRUN4TUNTVlF4SmpBa0JnTlZCQU1USFhCeWIzaDVMbk5oYlhCc1pTNXpjR1ZqZEhKdgpZMnh2ZFdRdVkyOXRNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXd5bEt3MmlxClBXM2JrQU0wV3RhaEFLbEppcWFHd05LUDVRRTZ6ZW5NM2FURko3TjIwN0dWcUNGYzJHTDNodmNhTDFranZjeEkKK2lybHpkbm9hcVhUSmV3ZkJiTGs2SGVhZmdXUVp3NHNNeE5QRUVYYlNXYm54Mm03Y2FlbVJiUWZSQWhPWXRvWgpIWG1IMzQ1Q25mNjF0RnhMeEEzb0JRNm1yb0JMVXNOOUh2WWFzeGE5QUFmZUNNZm5sYWVBWE9CVmROalJTN1VzCkN5NmlSRXpEWFgvem1nOG5WWFUwemlrcXdoS3pqSlBJd2FQa2ViaXVSdUJYdEZ0VlQwQmFzS3VqbURzd0lsRFQKVmR4SHRRQUVyUmM4Q2Nhb20yUkpZbTd1aHNEYlo2WVFzS3JiMmhIbU5rNENVWUd5eUJPZnBwbzR2bFd1S2FEcgpsVFNYUXlPN0M0ejM1d0lEQVFBQm8xNHdYREJhQmdOVkhSRUVVekJSZ2dsc2IyTmhiR2h2YzNTSEJIOEFBQUdDCkhYQnliM2g1TG5OaGJYQnNaUzV6Y0dWamRISnZZMnh2ZFdRdVkyOXRnaDhxTG5CeWIzaDVMbk5oYlhCc1pTNXoKY0dWamRISnZZMnh2ZFdRdVkyOXRNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUEvRFJFVm54SWJRdi9uMDEvSQpJd1d0ekhKNGNHOUp6UlB6dmszNUcvRGJOVzZYZ0M3djBoWlFIVHg5bzMrckxoSUFiWTNmbjc1VEtlN3hMRWpiCkI3M3pGWURJSStkYzM5NkQzZU51M2NxRGIvY01kYmlFalhod2ttZk9NRm9qMnpOdHJIdzFsSjA0QlNFMWw1YWgKMDk0Vy9aaEQ2YTVLU3B0cDh1YUpKVmNrejRYMEdRWjVPYjZadGdxZVVxNytqWVZOZ0tLQzJCMW1SNjMyMDNsZwozVFZmZEkrdmI3b292dVdOOFRBVG9qdXNuS25WMmRMeTFBOWViWXYwMEM3WWZ6Q0NhODgrN2dzTGhJaUJjRHBPClJkWjU3QStKanJmSU5IYy9vNm5YWFhDZ2h2YkFwUVk1QnFnMWIzYUpUZERNWThUY0hoQVVaQzB5eU04bXcwMnQKWHRRQwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBd3lsS3cyaXFQVzNia0FNMFd0YWhBS2xKaXFhR3dOS1A1UUU2emVuTTNhVEZKN04yCjA3R1ZxQ0ZjMkdMM2h2Y2FMMWtqdmN4SStpcmx6ZG5vYXFYVEpld2ZCYkxrNkhlYWZnV1FadzRzTXhOUEVFWGIKU1dibngybTdjYWVtUmJRZlJBaE9ZdG9aSFhtSDM0NUNuZjYxdEZ4THhBM29CUTZtcm9CTFVzTjlIdllhc3hhOQpBQWZlQ01mbmxhZUFYT0JWZE5qUlM3VXNDeTZpUkV6RFhYL3ptZzhuVlhVMHppa3F3aEt6akpQSXdhUGtlYml1ClJ1Qlh0RnRWVDBCYXNLdWptRHN3SWxEVFZkeEh0UUFFclJjOENjYW9tMlJKWW03dWhzRGJaNllRc0tyYjJoSG0KTms0Q1VZR3l5Qk9mcHBvNHZsV3VLYURybFRTWFF5TzdDNHozNXdJREFRQUJBb0lCQUFPVVZFeTFOTG9mczdFMgpmZFZVcm10R3I1U2RiVWRJRlYrTDREbzZtWWxQSmxhT0VoWGI0ZlROZDloNEtEWVBmaWwwSnhXcUU0U1RHTmZuCnNUMlRnUVhuQ01LZi8xYk1Lc2M0N3VjVStYYU9XaHJnVFI5UmhkckFjN0duODRLL3hQc0ljL2VZTEhHLzh1QUUKeWUvLzVmRkM2QmpXY0hUM1NkTlZnd3duamJudG5XTXIzTFJBVnJBamZBckxveWUwS0F2YytYdXJLTEVCcmMyVQpjaHlDbitZemJKN0VlSG44UXdQNGdBNXVSK0NCMFJPeFErYXIzS3M5YUhkZTQ1OEVNNEtLMnpUOXA4RWZRc1lFCkFtNUpxWjliR0JEVHV1dEkyNm9GK0pLQ1IzZzhXNERRcHVYRUZoVjlya0pMSm13RDhQb0JaclF6UzZvdmJhdkkKRk42QVM4RUNnWUVBOEcxQzFxZVh4dTQ4aEYxak5MTCswRmxkeWdFem9SMmFoRGJCai8weUZkQVVjU2pYTzk0NAozN1dORTBUUG10WG1Vc3NZTlBTR21XaWI2OUhicEFoMTY3SWVwNE9LaVlZdkozYm1oUC9WNzFvK3M0SWJlSHh1CkVJbWVVckFOZWRoQURVQnZ4c1lXRWxlVlVJSFFRcjY1VHM2ZjIrWkpTKzg4TU05bUorL3BmcmNDZ1lFQXo4MXgKR3JiSE5oak56RjhZMjhiK0hMNW5rdDR0SUdkU3hnbW9PMFFJeGkrQVNZTzB0WW42VFk0ZHI5ZXErMzE3b21ZawpMbDNtNENORDhudG1vYzRvWnM4SUpDQ0IrZjNqcTY4OHdoQU9vVHZ4dDhjZVJqOFRhRHl1SHZwS043OVNsVVd2CjBJd2ZRNDNIemd3SWJiSWhjcTRJVGswanI0VHdWbThia283VElGRUNnWUJoNnUzVXhHN0JHeGZVaE1BNW4waSsKREJkeGhPbkZEV3gzdW1FOHhrN1dxV2NaNnhzMWk3eTRCNVhNS2pNdkNUeURyYWxQTCtOOXFTZ1BjK216TmFybwo4aU1mOENmRStMeE5vMVFoQ0p6Vm5YaDUzVnhZeHJ5QXlidU1TNTFCYVh3MHFYQ2NrT0krV0NNOHBaSHZEUVFsCmYydUZ3SlZMY3NTZDBHbjNpL01ab3dLQmdBY1BzUjg2Uk15MnpROTd6OGx3R3FSNVorV2F2U2ZUdXdGVnhLeTIKNUNGdjdja1J1NnRMbEFEY3FtK1dRWTRvTm5KUFREMXpIV3hTWm5XdjhjM2Z4b212MFZRQThzbSs4ZVNjb05EcgpZTVBqMkpQcEpVTTMwMzRBU2Q1dG5PWUdEMVZaTjk4N1U3aWs4Ynd6dG5tYnl2MHRvc1NlWkc4TGNtdE5mVDllCnNSZnhBb0dCQUpTV1lDellyTlRMNnRUSnh5M2FqWm5jZkxrMEV0eWNCd05FRXZHVzVSVE9LOUFYTE96RzN0eHUKajZqWlRpaUFRU09aaVd0clJHU0U0bEkyQ1MvcjNjd3VuSGlnZlovd1dKZldkZ0JpRnZqOTVFbUVQWUZaRDRobQpkT3l5UHhRRXFTRmprQ21BS2plOFBpTDdpU01GbGhBZTZQWFljQlExdCtzd01UeXBnY3RrCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== - ca: - crt : LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURNVENDQWhtZ0F3SUJBZ0lVSHhWK0ljVGZHUElzdW8yY3dqQ0Q0Z2RSTFFRd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tERW1NQ1FHQTFVRUF3d2RjSEp2ZUhrdWMyRnRjR3hsTG5Od1pXTjBjbTlqYkc5MVpDNWpiMjB3SGhjTgpNakl4TURFME1UTXlOREV5V2hjTk16WXdOakl5TVRNeU5ERXlXakFvTVNZd0pBWURWUVFEREIxd2NtOTRlUzV6CllXMXdiR1V1YzNCbFkzUnliMk5zYjNWa0xtTnZiVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBSy90WXBHVi9HRURUWnZzL25QQ2lOK0U3K1dOQ21GeU1NQjdkazVOT3JzQWZIaVVvZ1JRVUo0WQptSjhwVmYrSzhTRFBsdGNYcW40WVVTbmxiUERsVlBkWU5zOTEwT3RaS1EwNW96aUtGV2pNbS85NHlLSjVyVzNsCndDNEN0ayttUm9Ib0ZQQS81dmFVbVZHdlVadjlGY0JuL0pKN2F4WnRIQk1PRiticXQ0Zmd0ci9YMWdOeWhPVzUKZTVScGpESkozRjJTVnc5NUpBQSt4a3V3UitFSmVseEtnQVpxdDc0ejB4U2ROODZ0QzNtK0wxRGs2WVVlQWEzZApvM3Rsa3ZkeDV6dUJvSmI2QmpZWEV4UE1PbThRcHFNVWRLK3lDZUdrem9XQStDOUtFdGtVaERCWktENStNWXRZCktVMUh1RXJCbmw2Z3BuWTRlbzJjVTRxdkNwZzZ4S3NDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRklKMkRkTjgKc2ZtVjRCT1ZFL0FjZ0VEejArNmlNQjhHQTFVZEl3UVlNQmFBRklKMkRkTjhzZm1WNEJPVkUvQWNnRUR6MCs2aQpNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQWhQVi9RMVl1YWVTOTZVCmhjVGQ4RWdJaHhpbHFiTWlTQm5WaVdrdlJzWk94UUIwNTFScWtwT3g0UTRsckdaOGVJWWc3T0trTTdzejhuTVQKL2pxS21sZDY0MzJCcURCMlNkNVp5ZFdReHAwU1laRTlnVWszYk9KRGtZVXQ4b1cvZDBWeG9uU05LQVN3QmZKaApWV1VZUUlpNm55K0ZZZmtuRFNvRnFlY2Z3SDBQQVUraXpnMkI3KzFkbko5YisyQ21IOUVCallOZ2hoNlFzVlFQCkh2SkdQQURtandPNkJOam5HK0Z3K0Z6cmFXUTNCTjAwb08zUjF6UmgxZERmTTQzR3oxRmZGRW5GSXI5aGFuUnQKWHJFZm8vZWU5bjBLWUFESEJnV1g4dlhuNHZrRmdWRjgwYW9MUUJSQTBxWXErcW1pVlp6YnREeE9ldFEyRWFyTQpyNmVWL0lZPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== -``` - -## UI System - -The table lists parameters for the Network Operations Center User Interface (NOC UI). Palette's NOC UI enables easy location monitoring of multi-location clusters through an intuitive UI. - -| **Parameters ** | **Default Value** | **Type** | **Description** | **Required/Optional** | -|---------------------|---------------|---------|------------------------------------------------------|--------------------| -| `enabled` | `false` | Boolean | Specifies whether to enable the Palette Network Operations Center (NOC) UI. Enabling this parameter requires the `ui.nocUI.mapBoxAccessToken`. Once enabled, all cluster locations will be reported to MapBox. | Optional | -| `mapBoxAccessToken` | `""` | String | Access token for the MapBox API. | Optional | -| `mapBoxStyledLayerID`| `""` | String | ID for the MapBox style layer. | Optional | - -```yaml -ui-system: - ui: - nocUI: - enable: false - mapBoxAccessToken: "" - mapBoxStyledLayerID: "" -``` - - - - - - - - - -
-
- - - diff --git a/content/docs/12-enterprise-version/04-system-console-dashboard.md b/content/docs/12-enterprise-version/04-system-console-dashboard.md deleted file mode 100644 index ec35ad4619..0000000000 --- a/content/docs/12-enterprise-version/04-system-console-dashboard.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "System Console Dashboard" -metaTitle: "System Console Dashboard" -metaDescription: "Understanding the super-admin settings in Palette's Enterprise (on-premise) variant." -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -The self-hosted system console enables an initial setup and onboarding, administration, as well as upgrade management of the Palette Platform. The on-prem system console is available in a "quick start" mode and an "enterprise" mode. - -Platform administrators can use this console to perform the following operations: - -| Setting | Function | -| --- | --- | -| Tenant Management | Create and activate tenants | -| Update Management | Upgrade Spectro Cloud platform to newer versions | -| Administration | Configure platform settings like SMTP, Certificates, etc. | -| Migrate quick start mode cluster to enterprise | Available in quick start mode to install an enterprise cluster | - -# Tenant Management - -Create new tenants and their initial tenant admin accounts. Optionally, activate new tenants to enable tenant administrators to log in and access the tenant management console. - -# Update Management - -Apply Palette platform upgrades. Upgrades to the Palette platform are published to the Palette repository and a notification is displayed on the console when new versions are available. Platform administrators can apply platform upgrades directly from the on-prem system console. - -# Administration - -## SMTP - -Configure SMTP settings to enable the Palette platform to send out email notifications. Email Notifications are sent out to new users when they are onboarded to the platform to activate their accounts. - -## Certificates - -Provide the desired SSL/TLS server certificates to support external access to valid HTTPs. - -# Cluster Management - -Enterprise clusters are created and deployed from this section. The layers and/or pack integrations constituting a cluster can also be configured and updated. diff --git a/content/docs/12-enterprise-version/05-enterprise-cluster-management.md b/content/docs/12-enterprise-version/05-enterprise-cluster-management.md deleted file mode 100644 index e1ba7393cb..0000000000 --- a/content/docs/12-enterprise-version/05-enterprise-cluster-management.md +++ /dev/null @@ -1,242 +0,0 @@ ---- -title: "Enterprise Management" -metaTitle: "On-Premises Enterprise Management" -metaDescription: " Features to enhance the enterprise clusters" -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; -import Tabs from 'shared/components/ui/Tabs'; - -# Overview - -Palette supports several Day-2 operations to manage the end-to-end lifecycle of the Kubernetes clusters launched through Palette On-Premises Enterprise Mode. It provides several capabilities across clusters to keep your clusters secure, compliant, up-to-date, and perform ongoing management operations like backup/restore and cluster migration across Private Cloud Gateway (PCGs). - -
-
- - - - - - -# Palette PCG Migration - -Palette enables PCG migration to route the traffic between PCGs to ensure uninterrupted PCG service availability. If a PCG goes unhealthy, it can be deleted after migrating the clusters launched through that PCG to another healthy PCG. This ensures that cluster operations such as deletion are carried out without interruption. - -## When Will You Migrate - -The possible conditions of PCG migration are: - -* Unhealthy PCG to healthy PCG - - -* Healthy PCG to healthy PCG - - -## How to Migrate a PCG Traffic - -To migrate the traffic from a PCG: -
- -1. Log in as **Tenant Admin** to the Palette Console. - - -2. From the **Tenant Settings**, go to the **Private Cloud Gateways** tab to list all PCGs. - - -3. Click the 'Kebab' menu (three-dot ellipsis) towards the PCG to be migrated to see the drop-down option of **Migrate**. - - -4. Click the **Migrate** option to open the wizard to select your destination PCG. - - -5. The wizard will display the drop-down list of all healthy PCGs to which traffic can be migrated. Select the PCG of your choice from the drop-down. - - -6. Confirm the migration operation to get a UI confirmation of the successful migration. - - -7. Once the migration is completed, the unhealthy/source PCG can be deleted successfully. Clear the residual resources manually to complete the deletion process. - - -8. The **Audit Logs** gives the migration update. - -
- - - -# Backup and Restore for Enterprise Clusters - -Palette provides convenient backup options to backup the Enterprise Kubernetes cluster state into object storage. It restores it at a later point in time if required to the same or a different cluster. Besides backing up Kubernetes native objects like Pods, DaemonSets, Services, etc., a snapshot of the persistent volume is taken and maintained as part of the backup. The two options of backup creation are: - -* FTP - - -* S3 - -FTP mode backup is sending the backup data of your enterprise cluster to a dedicated FTP server using the File Transfer Protocol (FTP). - -S3 buckets for backup make it trivial for everyone to use Amazon’s infrastructure for remote backups and secure cluster objects online. In addition, this feature provides the advantages of scheduling, strong encryption, compression, easy access to your backup files. - -## Instructions - -1. Log in to enterprise mode as administrator: - - * https://system_IP/system - * Username: admin - * Password: custom password - - -2. Select **Administration** from left panel. - - -3. On the **Administration** page, select **Backup/Restore** from the top ribbon. - - -4. Complete the backup configuration wizard to complete the mode of backup creation. - - -5. Select the mode of backup from the two available options: - * FTP - * S3 - - -### FTP - -The following information is filled to create a backup location in FTP mode: - -1. Provide the ftp:// server details. - - -2. The directory name for the backup storage. - - -3. Username and Password to log in to the server. - - -4. Scheduling details of the backup. - * **Interval** specifies the number of days between two consecutive backups. - * **Retention period** for backup in days. - * **Hours of the day** (UTC 0 to 23 hours) specifies the time of the specified day to take the backup. - - -5. This configuration is saved and used for creating an FTP backup by clicking the **+Create FTP backup** button on the top-right corner of the page. - - -6. The configuration can be edited as per the requirements. - - -7. Delete/Restore a specific backup from the actions panel. - - -The saved configuration details can be used to create multiple backup locations. -Any changes can be made to the existing configuration and saved for reuse. - - -### S3 Backup Location - - - -An AWS S3 bucket created is a prerequisite. - -The following permissions need to be enabled. - - - -#### Permission Sets -Ensure that the IAM user or the ROOT user role created should have the following two IAM policies included: - -**EC2-Policy** - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:DeleteObject", - "s3:PutObject", - "s3:AbortMultipartUpload", - "s3:ListMultipartUploadParts" - ], - "Resource": [";" - ] - } -``` - - -**S3-Policy** - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeVolumes", - "ec2:DescribeSnapshots", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:CreateSnapshot", - "ec2:DeleteSnapshot" - ], - "Resource": "" - } -``` - -The following information is needed: - - -* AWS Account Access key - - -* AWS Account Secret Key - - -* AWS Region - - -* AWS Bucket name - - -* Folder name to which the backup is stored in the S3 bucket - - -* Scheduling details of the backup, - * **Interval** specifies the number of days between two consecutive backups. - * **Retention period** of backup in days. - * **Hours of the day** (UTC 0 to 23 hours) specifies the time of the specified day to take the backup. - - -* Validate the information and save the configurations. - - -* The saved configuration is used for creating an S3 backup by clicking the **+Create S3 backup** button on the top-right corner of the page. - - -* Once the backup is created, the details such as Backup uid, Mode, Status, Finish Time, and Actions is viewed from the console for the individual backup. - - -* Delete/Restore a specific backup from the actions panel. - - - -The saved configuration details can be used to create multiple backup locations. Any changes can be made to the existing configuration and saved for reuse. - - - - - -
- -
-
diff --git a/content/docs/12-enterprise-version/06-air-gap-repo.md b/content/docs/12-enterprise-version/06-air-gap-repo.md deleted file mode 100644 index 1832351844..0000000000 --- a/content/docs/12-enterprise-version/06-air-gap-repo.md +++ /dev/null @@ -1,731 +0,0 @@ ---- -title: "Install in an Air Gap Environment" -metaTitle: "Install in an Air Gap Environment" -metaDescription: "Learn how to install Palette into an air gap environment." -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import Tabs from 'shared/components/ui/Tabs'; - -# Overview - - -You can install a self-hosted version of Palette into a VMware environment without direct internet access. This type of installation is referred to as an *air gap* installation. - -In a standard Palette installation, the following artifacts are downloaded by default from the public Palette repository. - -* Palette platform manifests and required platform packages. - - -* Container images for core platform components and 3rd party dependencies. - - -* Palette Packs. - - -The installation process changes a bit in an air gap environment due to the lack of internet access. Before the primary Palette installation step, you must download the three required Palette artifacts mentioned above. The other significant change is that Palette's default public repository is not used. Instead, a private repository supports all Palette operations pertaining to storing images and packages. - -The following diagram is a high-level overview of the order of operations required to deploy a self-hosted instance of Palette in an airgap environment. - - -![An architecture diagram outlining the five different install phases](/enterprise-version_air-gap-repo_overview-order-diagram.png) - - -The airgap installation can be simplified into five major phases. - -
- -1. Download the Open Virtual Appliance (OVA) image and deploy the instance hosting the private repository that supports the airgap environment. - - -2. The private Spectro Cloud repository is initialized, and all the Palette-required artifacts are downloaded and available. - - -3. The Palette Install OVA is deployed, configured, and initialized. - - -4. The scale-up process to a highly available three-node installation begins. - - -5. Palette is ready for usage. - - -This guide focuses on the first two installation phases, as the remaining ones are covered in the [Migrate Cluster to Enterprise](/enterprise-version/deploying-an-enterprise-cluster) guide and the [Install Using Quick-Start Mode](/enterprise-version/deploying-the-platform-installer) guide. - - -# Prerequisites - -* The following minimum resources are required to deploy Palette. - * 2 vCPU - * 4 GB of Memory - * 100 GB of Storage. Storage sizing depends on your intended update frequency and data retention model.

- -* Ensure the following ports allow inbound network traffic. - * 80 - * 443 - * 5000 - * 8000 - - -* Request the Palette self-hosted installer image and the Palette air gap installer image. To request the installer images, please contact our support team by sending an email to support@spectrocloud.com. Kindly provide the following information in your email: - - - Your full name - - Organization name (if applicable) - - Email address - - Phone number (optional) - - A brief description of your intended use for the Palette Self-host installer image. - -Our dedicated support team will promptly get in touch with you to provide the necessary assistance and share the installer image. - -If you have any questions or concerns, please feel free to contact support@spectrocloud.com. - -
- - -# Deploy Air Gapped Appliance - - -1. Log in to vCenter Server by using the vSphere Client. - - -2. Navigate to the Datacenter and select the cluster you want to use for the installation. Right-click on the cluster and select **Deploy OVF Template**. - - -3. Select the airgap OVA installer image you downloaded after receiving guidance from our support team. - - -4. Select the folder where you want to install the Virtual Machine (VM) and assign a name to the VM. - - -5. Next, select the compute resource. - - -6. Review the details page. You may get a warning message stating the certificate is not trusted. You can ignore the message and click **Next**. - - -7. Select your storage device and storage policy. Click on **Next** to proceed. - - -8. Choose a network for your appliance and select **Next**. - - -9. Fill out the remaining template customization options. You can modify the following input fields.

- - | Parameter | Description | Default Value | - | --- | --- | -- | - | **Encoded user-data** | In order to fit into an XML attribute, this value is base64 encoded. This value will be decoded, and then processed normally as user-data. | - | - | **ssh public keys** | This field is optional but indicates that the instance should populate the default user's `authorized_keys` with the provided public key. | -| - | **Default User's password** | Setting this value allows password-based login. The password will be good for only a single login. If set to the string `RANDOM` then a random password will be generated, and written to the console. | - | - | **A Unique Instance ID for this instance** | Specifies the instance id. This is required and used to determine if the machine should take "first boot" actions| `id-ovf`| - | **Hostname** | Specifies the hostname for the appliance. | `ubuntuguest` | - | **URL to seed instance data from** | This field is optional but indicates that the instance should 'seed' user-data and meta-data from the given URL.| -| - -10. Click on **Next** to complete the deployment wizard. Upon completion, the cloning process will begin. The cloning process takes a few minutes to complete. - - -11. Power on the VM and click on the **Launch Web Console** button to access the instance's terminal. - - -12. Configure a static IP address on the node by editing **/etc/netplan/50-cloud-init.yaml**. - -
- - ```shell - sudo vi /etc/netplan/50-cloud-init.yaml - ``` - - Use the following sample configuration as a starting point but feel free to change the configuration file as required for your environment. To learn more about Netplan, check out the [Netplan configuration examples](https://netplan.io/examples) from Canonical. - -
- - ```yaml - network: - version: 2 - renderer: networkd - ethernets: - ens192: - dhcp4: false - addresses: - - 10.10.244.9/18 # your static IP and subnet mask - gateway4: 10.10.192.1 # your gateway IP - nameservers: - addresses: [10.10.128.8] # your DNS nameserver IP address. - ``` - - To exit Vi, press the **ESC** key and type `:wq` followed by the **Enter** key.

- -13. Issue the `netplan` command to update the network settings. - -
- - ```shell - sudo netplan apply - ``` - -14. Give the instance one to two minutes before issuing the following command. The next step is to start the airgap setup script that stands up the Spectro Repository. Issue the command below and replace `X.X.X.X` with the static IP you provided to the Netplan configuration file. - -
- - ```shell - sudo /opt/spectro/airgap-setup.sh X.X.X.X - ``` - - Record the output of the setup command as you will use it when deploying the Quick Start appliance later on in the installation process. - - Example Output: - ```shell - Setting up Manifests - Setting up Manifests - Setting up SSL Certs - Setup Completed - - Details: - ------- - Spectro Cloud Repository - UserName: XXXXXXXXX - Password: XXXXXXXXXX - Location: https://10.10.249.12 - Artifact Repo Certificate: - LS0tLS1CRUdJ............. - - Pack Registry - URL: https://10.10.249.12:5000 - Username: XXXXXXXXX - Password: XXXXXXXXX - ``` - -15. If you need to configure the instance with proxy settings, go ahead and do so now. You can configure proxy settings by using environment variables. Replace the values with your environment's respective values. - -
- - ```shell - export http_proxy=http://10.1.1.1:8888 - export https_proxy=https://10.1.1.1:8888 - export no_proxy=.example.dev,10.0.0.0/8 - ``` - -16. The next set of steps will download the required binaries to support a Palette installation, such as the Palette Installer, required Kubernetes packages, and kubeadm packages. You can download these artifacts from the instance, or externally and transfer them to the instance. Click on each tab for further guidance. - -
- - - - You must download the following three resources. Our support team will provide you with the credentials and download URL. - Click on each tab to learn more about each resource and steps for downloading. - - - -
- - - - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/airgap-v3.3.15.bin \ - --output airgap-k8s-v3.3.15.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-k8s-v3.3.15.bin && sudo ./airgap-k8s-v3.3.15.bin - ``` - - Example Output: - ```shell - sudo ./airgap-k8s-v3.3.15.bin - Verifying archive integrity... 100% MD5 checksums are OK. All good. - Uncompressing Airgap K8S Images Setup - Version 3.3.15 100% - Setting up Packs - Setting up Images - - Pushing image k8s.gcr.io/kube-controller-manager:v1.22.10 - - Pushing image k8s.gcr.io/kube-proxy:v1.22.10 - - Pushing image k8s.gcr.io/kube-apiserver:v1.22.10 - - Pushing image k8s.gcr.io/kube-scheduler:v1.22.10 - … - Setup Completed - ``` - - - - -
- - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/airgap-k8s-v3.3.15.bin \ - --output airgap-k8s-v3.3.15.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-k8s-v3.3.15.bin && sudo ./airgap-k8s-v3.3.15.bin - ``` - - Example Output: - ```shell - sudo ./airgap-k8s-v3.3.15.bin - Verifying archive integrity... 100% MD5 checksums are OK. All good. - Uncompressing Airgap K8S Images Setup - Version 3.3.15 100% - Setting up Packs - Setting up Images - - Pushing image k8s.gcr.io/kube-controller-manager:v1.22.10 - - Pushing image k8s.gcr.io/kube-proxy:v1.22.10 - - Pushing image k8s.gcr.io/kube-apiserver:v1.22.10 - - Pushing image k8s.gcr.io/kube-scheduler:v1.22.10 - … - Setup Completed - ``` - - -
- - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-kubeadm.bin \ - --output airgap-edge-kubeadm.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-kubeadm.bin && sudo ./airgap-edge-kubeadm.bin - ``` - - Example Output: - ```shell - sudo ./airgap-edge-kubeadm.bin - Verifying archive integrity... 100% MD5 checksums are OK. All good. - Uncompressing Airgap Edge Packs - Kubeadm Images 100% - Setting up Images - - Skipping image k8s.gcr.io/coredns/coredns:v1.8.6 - - Pushing image k8s.gcr.io/etcd:3.5.1-0 - - Pushing image k8s.gcr.io/kube-apiserver:v1.23.12 - - Pushing image k8s.gcr.io/kube-controller-manager:v1.23.12 - - Pushing image k8s.gcr.io/kube-proxy:v1.23.12 - … - Setup Completed - ``` - -
- - -
- -
- -17. If you will be using Edge deployments, go ahead and download the packages your Edge deployments will need. If you are not planning to use Edge, skip to end. You can come back to this step in the future and add the packages if needed. Click on the `...` tab for additional options. - - -
- - - - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu22-k3s.bin \ - --output airgap-edge-ubuntu22-k3s.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-ubuntu22-k3s.bin && sudo ./airgap-edge-ubuntu22-k3s.bin - ``` - - -
- - - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu22-rke.bin \ - --output airgap-edge-ubuntu22-rke.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-ubuntu22-rke.bin && sudo ./airgap-edge-ubuntu22-rke.bin - ``` - -
- - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu22-kubeadm.bin \ - --output airgap-edge-ubuntu22-kubeadm.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-ubuntu22-kubeadm.bin && sudo ./airgap-edge-ubuntu22-kubeadm.bin - ``` - -
- - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu20-k3s.bin \ - --output airgap-edge-ubuntu20-k3s.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-ubuntu20-k3s.bin && sudo ./airgap-edge-ubuntu20-k3s.bin - ``` - -
- - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu20-rke.bin \ - --output airgap-edge-ubuntu20-rke.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-ubuntu20-rke.bin && sudo ./airgap-edge-ubuntu20-rke.bin - ``` - -
- - - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu20-kubeadm.bin \ - --output airgap-edge-ubuntu20-kubeadm.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-ubuntu20-kubeadm.bin && sudo ./airgap-edge-ubuntu20-kubeadm.bin - ``` - -
- - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-opensuse-k3s.bin \ - --output airgap-edge-opensuse-k3s.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-opensuse-k3s.bin && sudo ./airgap-edge-opensuse-k3s.bin - ``` - -
- - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-opensuse-rke.bin \ - --output airgap-edge-opensuse-rke.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-opensuse-rke.bin && sudo ./airgap-edge-opensuse-rke.bin - ``` - -
- - - - Download the binary by using the URL provided by the Palette support team. Change the version number as needed. - -
- - ```shell - curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-opensuse-kubeadm.bin \ - --output airgap-edge-opensuse-kubeadm.bin - ``` - - - - If you receive a certificate error, use the `-k` or `--insecure` flag. - - - - Assign the proper permissions and start the download script. - -
- - ```shell - sudo chmod 755 ./airgap-edge-opensuse-kubeadm.bin && sudo ./airgap-edge-opensuse-kubeadm.bin - ``` - -
- - -
- -
- ----- - -
- - -The next step of the installation process is to begin the deployment of an appliance using the instructions in the [Migrate Cluster to Enterprise Mode](/enterprise-version/deploying-an-enterprise-cluster). If you need to review the Spectro Cloud Repository details, issue the following command for detailed output. - -
- -```shell -sudo /bin/airgap-setup.sh -``` - -
- - - -You can review all the logs related to the setup of the private Spectro repository in **/tmp/airgap-setup.log**. - - - - -# Validate - -You can validate that the Spectro Repository you deployed is available and ready for the next steps of the installation process. If you provided the appliance with an SSH key then you can skip to step five. - -
-1. Log in to vCenter Server by using the vSphere Client. - - -2. Navigate to your Datacenter and locate your VM. Click on the VM to access its details page. - - -3. Power on the VM. - - -4. Click on **Launch Web Console** to access the terminal. - - -5. Log in with the user `ubuntu` and the user password you specified during the installation. If you are using SSH, use the following command, and ensure you specify the path to your SSH private key and replace the IP address with your appliance's static IP. - -
- - ```shell - ssh --identity_file ~/path/to/your/file ubuntu@10.1.1.1 - ``` - - -6. Verify the registry server is up and available. Replace the `10.1.1.1` value with your appliance's IP address. - -
- - ```shell - curl --insecure https://10.1.1.1:5000/health - ``` - - Example Output: - ```shell - {"status":"UP"} - ``` - -7. Ensure you can log into your registry server. Use the credentials provided to you by the `airgap-setup.sh` script. Replace the `10.1.1.1` value with your appliance's IP address. - -
- - ```shell - curl --insecure --user admin:admin@airgap https://10.1.1.1:5000/v1/_catalog - ``` - - Example Output: - ``` - {"metadata":{"lastUpdatedTime":"2023-04-11T21:12:09.647295105Z"},"repositories":[{"name":"amazon-linux-eks","tags":[]},{"name":"aws-efs","tags":[]},{"name":"centos-aws","tags":[]},{"name":"centos-azure","tags":[]},{"name":"centos-gcp","tags":[]},{"name":"centos-libvirt","tags":[]},{"name":"centos-vsphere","tags":[]},{"name":"cni-aws-vpc-eks","tags":[]},{"name":"cni-aws-vpc-eks-helm","tags":[]},{"name":"cni-azure","tags":[]},{"name":"cni-calico","tags":[]},{"name":"cni-calico-azure","tags":[]},{"name":"cni-cilium-oss","tags":[]},{"name":"cni-custom","tags":[]},{"name":"cni-kubenet","tags":[]},{"name":"cni-tke-global-router","tags":[]},{"name":"csi-aws","tags":[]},{"name":"csi-aws-ebs","tags":[]},{"name":"csi-aws-efs","tags":[]},{"name":"csi-azure","tags":[]},{"name":"csi-gcp","tags":[]},{"name":"csi-gcp-driver","tags":[]},{"name":"csi-longhorn","tags":[]},{"name":"csi-longhorn-addon","tags":[]},{"name":"csi-maas-volume","tags":[]},{"name":"csi-nfs-subdir-external","tags":[]},{"name":"csi-openstack-cinder","tags":[]},{"name":"csi-portworx-aws","tags":[]},{"name":"csi-portworx-gcp","tags":[]},{"name":"csi-portworx-generic","tags":[]},{"name":"csi-portworx-vsphere","tags":[]},{"name":"csi-rook-ceph","tags":[]},{"name":"csi-rook-ceph-addon","tags":[]},{"name":"csi-tke","tags":[]},{"name":"csi-topolvm-addon","tags":[]},{"name":"csi-vsphere-csi","tags":[]},{"name":"csi-vsphere-volume","tags":[]},{"name":"edge-k3s","tags":[]},{"name":"edge-k8s","tags":[]},{"name":"edge-microk8s","tags":[]},{"name":"edge-native-byoi","tags":[]},{"name":"edge-native-opensuse","tags":[]},{"name":"edge-native-ubuntu","tags":[]},{"name":"edge-rke2","tags":[]},{"name":"external-snapshotter","tags":[]},{"name":"generic-byoi","tags":[]},{"name":"kubernetes","tags":[]},{"name":"kubernetes-aks","tags":[]},{"name":"kubernetes-coxedge","tags":[]},{"name":"kubernetes-eks","tags":[]},{"name":"kubernetes-eksd","tags":[]},{"name":"kubernetes-konvoy","tags":[]},{"name":"kubernetes-microk8s","tags":[]},{"name":"kubernetes-rke2","tags":[]},{"name":"kubernetes-tke","tags":[]},{"name":"portworx-add-on","tags":[]},{"name":"spectro-mgmt","tags":[]},{"name":"tke-managed-os","tags":[]},{"name":"ubuntu-aks","tags":[]},{"name":"ubuntu-aws","tags":[]},{"name":"ubuntu-azure","tags":[]},{"name":"ubuntu-coxedge","tags":[]},{"name":"ubuntu-edge","tags":[]},{"name":"ubuntu-gcp","tags":[]},{"name":"ubuntu-libvirt","tags":[]},{"name":"ubuntu-maas","tags":[]},{"name":"ubuntu-openstack","tags":[]},{"name":"ubuntu-vsphere","tags":[]},{"name":"volume-snapshot-controller","tags":[]}],"listMeta":{"continue":""}} - ``` - - -8. Next, validate the Spectro repository is available. Replace the IP with your appliance's IP address. - -
- - ```shell - curl --insecure --user spectro:admin@airgap https://10.1.1.1 - ``` - - Output: - ```html - - - - Welcome to nginx! - - - -

Welcome to nginx!

-

If you see this page, the nginx web server is successfully installed and - working. Further configuration is required.

- -

For online documentation and support please refer to - nginx.org.
- Commercial support is available at - nginx.com.

- -

Thank you for using nginx.

- - - ``` - -
diff --git a/content/docs/12-enterprise-version/07-monitoring.md b/content/docs/12-enterprise-version/07-monitoring.md deleted file mode 100644 index 5c192c5936..0000000000 --- a/content/docs/12-enterprise-version/07-monitoring.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "Cluster Monitoring Metrics" -metaTitle: "Enterprise Cluster Monitoring Metrics" -metaDescription: "Enterprise Cluster Monitoring Metrics for Palette's Enterprise (on-premises) variant." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Enterprise Cluster Monitoring Metrics -## Pods Monitoring Metrics -### Namespaces to Monitor Pods - -|**Namespaces** |**Interpretation**| -|-----------|--------------| -|**ui-system** |Palette Management UI| -|**cp-system** |System Management UI| -|**nats-system**| Message System| -|**ingress-nginx**| Ingress services| -|**hubble-system**|Core backend services| -|**jet-system**|Pivot Tenant Clusters| - -### Exceptions - -The below pods are dynamically created from jobs and can be excluded from monitoring. - -* ingress-nginx-admission-patch-* [ ns: ingress-nginx ] -* ingress-nginx-admission-create-* [ ns: ingress-nginx ] -* packsync-* [ ns: hubble-system ] -* cleanup-* [ ns: hubble-system ] - - -## CPU and Memory Monitoring Metrics - -### Default Specifications -* CPU: 4 vCPU -* RAM: 8 GB RAM -* CP Nodes: 3 - -### Thresholds -* CPU warn [per node ] > 70% -* CPU alert [per node] > 80% -* Memory Warn [per node] > 80% -* Memory Alert [per node] > 90% - -### Node Monitoring Metrics - #### Number of Nodes: 3 - #### Node Alerts -* Node up -* Node down -* Node unreachable - -
-
- diff --git a/content/docs/12-enterprise-version/07.1-reverse-proxy.md b/content/docs/12-enterprise-version/07.1-reverse-proxy.md deleted file mode 100644 index 9b2c4eaa28..0000000000 --- a/content/docs/12-enterprise-version/07.1-reverse-proxy.md +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: "Configure Reverse Proxy" -metaTitle: "Configure Reverse Proxy" -metaDescription: "Learn how to configure a reverse proxy for Palette." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -You can configure a reverse proxy for Palette. The reverse proxy can be used by host clusters deployed in a private network. Host clusters deployed in a private network are not accessible from the public internet or by users in different networks. You can use a reverse proxy to access the cluster's Kubernetes API server from a different network. - -When you configure reverse proxy server for Palette, clusters that use the [Spectro Proxy pack](/integrations/frp) will use the reverse proxy server address in the kubeconfig file. Clusters not using the Spectro Proxy pack will use the default cluster address in the kubeconfig file. - - -Use the following steps to configure a reverse proxy server for Palette. - -# Prerequisites - - -- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed and available. - - -- [Helm](https://helm.sh/docs/intro/install/) is installed and available. - - -- Access to the kubeconfig file of the Palette Kubernetes cluster. You can download the kubeconfig file from the Palette system console. Navigate to **Enterprise System Migration**, select the Palette cluster, and click the **Download Kubeconfig** button for the cluster. - - -- A domain name that you can use for the reverse proxy server. You will also need access to the DNS records for the domain so that you can create a CNAME DNS record for the reverse proxy server load balancer. - - -- Ensure you have an SSL certificate that matches the domain name you will assign to Spectro Proxy. You will need this to enable HTTPS encryption for the Spectro Proxy. Contact your network administrator or security team to obtain the SSL certificate. You need the following files: - - x509 SSL certificate file in base64 format - - - x509 SSL certificate key file in base64 format - - - x509 SSL certificate authority file in base64 format - - -- The Spectro Proxy server must have internet access and network connectivity to the private network where the Kubernetes clusters are deployed. - - -# Enablement - -1. Open a terminal session and navigate to the directory where you stored the **values.yaml** for the Palette installation. - - -2. Use a text editor and open the **values.yaml** file. Locate the `frps` section and update the following values in the **values.yaml** file. Refer to the [Spectro Proxy Helm Configuration](/enterprise-version/helm-chart-install-reference/#spectroproxy) to learn more about the configuration options. - -
- - | **Parameter** | **Description** | **Type** | - | --- | --- | ---| - | `enabled`| Set to `true` to enable the Spectro Proxy server. | boolean | - | `frps.frpHostURL`| The domain name you will use for the Spectro Proxy server. For example, `frps.example.com`. | - | `server.crt`| The x509 SSL certificate file in base64 format. | - | `server.key`| The x509 SSL certificate key file in base64 format. | - | `ca.crt`| The x509 SSL certificate authority file in base64 format. | - -
- - The following is an example of the `frps` section in the **values.yaml** file. The SSL certificate files are truncated for brevity. - -
- - ```yaml - frps: - frps: - enabled: true - frpHostURL: "frps.palette.example.com" - server: - crt: "LS0tLS1CRU...........tCg==" - key: "LS0tLS1CRU...........tCg==" - ca: - crt : "LS0tLS1CRU...........tCg==" - ``` - - -3. Issue the `helm upgrade` command to update the Palette Kubernetes configuration. The command below assumes you are in the folder that contains the **values.yaml** file and the Palette Helm chart. Change the directory path if needed. - -
- - ```bash - helm upgrade --values values.yaml hubble spectro-mgmt-plane-0.0.0.tgz --install - ``` - - -4. After the new configurations are accepted, use the following command to get the IP address of the Spectro Proxy server's load balancer. - -
- - ```bash - kubectl get svc --namespace proxy-system spectro-proxy-svc - ``` -5. Update the DNS records for the domain name you used for the Spectro Proxy server. Create a CNAME record that points to the IP address of the Spectro Proxy server's load balancer. - - -6. Log in to the Palette System API by using the `/v1/auth/syslogin` endpoint. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette, or use the IP address. Ensure you replace the credentials below with your system console credentials. - -
- - ```bash - curl --insecure --location 'https://palette.example.com/v1/auth/syslogin' \ - --header 'Content-Type: application/json' \ - --data '{ - "password": "**********", - "username": "**********" - }' - ``` - Output - ```json hideClipboard - { - "Authorization": "**********.", - "IsPasswordReset": true - } - ``` - -7. Using the output you received, copy the authorization value to your clipboard and assign it to a shell variable. Replace the authorization value below with the value from the output. - -
- - ```shell hideClipboard - TOKEN=********** - ``` - -8. Next, prepare a payload for the`/v1/system/config/` endpoint. This endpoint is used to configure Palette to use a reverse proxy. The payload requires the following parameters: - -
- - | **Parameter** | **Description** | **Type** | - | --- | --- | --- | - | `caCert`| The x509 SSL certificate authority file in base64 format. | string | - | `clientCert`| The x509 SSL certificate file in base64 format. | string | - | `clientKey`| The x509 SSL certificate key file in base64 format. | string | - | `port` | The port number for the reverse proxy server. We recommend using port `443`. | integer | - | `protocol` | The protocol to use for the reverse proxy server. We recommend using `https`. | string | - | `server`| The domain name you will use for the Spectro Proxy server. For example, `frps.example.com`. Do not include the HTTP schema in the value. | string | - - The following is an example payload. The SSL certificate files are truncated for brevity. - -
- - ```json hideClipboard - { - "caCert": "-----BEGIN CERTIFICATE-----\n.............\n-----END CERTIFICATE-----", - "clientCert": "-----BEGIN CERTIFICATE-----\n..........\n-----END CERTIFICATE-----", - "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n........\n-----END RSA PRIVATE KEY-----", - "port": 443, - "protocol": "https", - "server": "frps.palette.example.com.com" - } - ``` - -
- - - - You can save the payload to a file and use the `cat` command to read the file contents into the `curl` command. For example, if you save the payload to a file named `payload.json`, you can use the following command to read the file contents into the `curl` command. You can also save the payload as a shell variable and use the variable in the `curl` command. - - - - -
- -9. Issue a PUT request using the following `curl` command. Replace the URL with the custom domain URL you assigned to Palette or use the IP address. You can use the `TOKEN` variable you created earlier for the authorization header. Ensure you replace the payload below with the payload you created in the previous step. - -
- - ```bash - curl --insecure --silent --include --output /dev/null -w "%{http_code}" --location --request PUT 'https://palette.example.com/v1/system/config/reverseproxy' \ - --header "Authorization: $TOKEN" \ - --header 'Content-Type: application/json' \ - --data ' { - "caCert": "-----BEGIN CERTIFICATE-----\n................\n-----END CERTIFICATE-----\n", - "clientCert": "-----BEGIN CERTIFICATE-----\n.............\n-----END CERTIFICATE-----", - "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n............\n-----END RSA PRIVATE KEY-----\n", - "port": 443, - "protocol": "https", - "server": "frps.palette.example.com.com" - }' - ``` - - A successful response returns a `204` status code. - - Output - ```shell hideClipboard - 204 - ``` - -You now have a Spectro Proxy server that you can use to access Palette clusters deployed in a different network. Make sure you add the [Spectro Proxy pack](/integrations/frp) to the clusters you want to access using the Spectro Proxy server. - - -# Validate - -Use the following command to validate that the Spectro Proxy server is active. - -
- - - -1. Open a terminal session. - - -2. Log in to the Palette System API by using the `/v1/auth/syslogin` endpoint. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette or use the IP address. Ensure you replace the credentials below with your system console credentials. - -
- - ```bash - curl --insecure --location 'https://palette.example.com/v1/auth/syslogin' \ - --header 'Content-Type: application/json' \ - --data '{ - "password": "**********", - "username": "**********" - }' - ``` - Output - ```json hideClipboard - { - "Authorization": "**********.", - "IsPasswordReset": true - } - ``` - -3. Using the output you received, copy the authorization value to your clipboard and assign it to a shell variable. Replace the authorization value below with the value from the output. - -
- - ```shell hideClipboard - TOKEN=********** - ``` - -4. Query the system API endpoint `/v1/system/config/reverseproxy` to verify the current reverse proxy settings applied to Palette. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette, or use the IP address. You can use the `TOKEN` variable you created earlier for the authorization header. - -
- - ```bash - curl --location --request GET 'https://palette.example.com/v1/system/config/reverseproxy' \ - --header "Authorization: $TOKEN" - ``` - - If the proxy server is configured correctly, you will receive an output similar to the following that contains your settings. The SSL certificate outputs are truncated for brevity. - -
- - ```json hideClipboard - { - "caCert": "-----BEGIN CERTIFICATE-----\n...............\n-----END CERTIFICATE-----\n", - "clientCert": "-----BEGIN CERTIFICATE-----\n...........\n-----END CERTIFICATE-----", - "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n........\n-----END RSA PRIVATE KEY-----\n", - "port": 443, - "protocol": "https", - "server": "frps.palette.example.com" - } - ``` \ No newline at end of file diff --git a/content/docs/12-enterprise-version/08-ssl-certificate-management.md b/content/docs/12-enterprise-version/08-ssl-certificate-management.md deleted file mode 100644 index d211246078..0000000000 --- a/content/docs/12-enterprise-version/08-ssl-certificate-management.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: "SSL Certificate Management" -metaTitle: "SSL Certificate Management" -metaDescription: "Upload and manage SSL certificates in Palette." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -When you install Palette, a self-signed certificate is generated and used by default. You can upload your own SSL certificate to replace the default certificate. - -Palette uses SSL certificates to secure external communication. Palette's internal communication is default secured by default and uses HTTPS. External communication with Palette, such as the system console, gRPC endpoint, and API endpoint, requires you to upload an SSL certificate to enable HTTPS. - -
- - - -Enabling HTTPS is a non-disruptive operation. You can enable HTTPS at any time without affecting the system's functionality. - - - - -# Upload an SSL Certificate - -You can upload an SSL certificate in Palette by using the following steps. - - -## Prerequisites - -- Access to the Palette system console. - - -- You need to have an x509 certificate and a key file in PEM format. The certificate file must contain the full certificate chain. Reach out to your network administrator or security team if you do not have these files. - - -- Ensure the certificate is created for the custom domain name you specified for your Palette installation. If you did not specify a custom domain name, the certificate must be created for the Palette system console's IP address. You can also specify a load balancer's IP address if you are using a load balancer to access Palette. - - -## Enablement - -1. Log in to the Palette system console. - - -2. Navigate to the left **Main Menu** and select **Administration**. - - -3. Select the tab titled **Certificates**. - - -4. Copy and paste the certificate into the **Certificate** field. - - -5. Copy and paste the certificate key into the **Key** field. - - -6. Copy and paste the certificate authority into the **Certificate authority** field. - - -
- - ![A view of the certificate upload screen](/enterprise-version_ssl-certificate-upload.png) - -
- -7. Save your changes. - -If the certificate is invalid, you will receive an error message. Once the certificate is uploaded successfully, Palette will refresh its listening ports and start using the new certificate. - - -## Validate - -You can validate that your certificate is uploaded correctly by using the following steps. - -
- - -1. Log out of the Palette system console. If you are already logged in, log out and close your browser session. Browsers cache connections and may not use the newly enabled HTTPS connection. Closing your existing browser session avoids issues related to your browser caching an HTTP connection. - - -2. Log back into the Palette system console. Ensure the connection is secure by checking the URL. The URL should start with `https://`. - - -Palette is now using your uploaded certificate to create a secure HTTPS connection with external clients. Users can now securely access the system console, gRPC endpoint, and API endpoint. - -
- -
\ No newline at end of file diff --git a/content/docs/12-enterprise-version/10-upgrade.md b/content/docs/12-enterprise-version/10-upgrade.md deleted file mode 100644 index 40438ac948..0000000000 --- a/content/docs/12-enterprise-version/10-upgrade.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: "Upgrade Notes" -metaTitle: "Upgrade Notes" -metaDescription: "Spectro Cloud upgrade notes for specific Palette versions." -icon: "" -hideToC: false -fullWidth: false ---- - - -# Overview - - -This page is a reference resource to help you better prepare for a Palette upgrade. Review each version's upgrade notes for more information about required actions and other important messages to be aware of. If you have questions or concerns, reach out to our support team by opening up a ticket through our [support page](http://support.spectrocloud.io/). - -# Palette 4.0 - -Palette 4.0 includes the following major enhancements that require user intervention to facilitate the upgrade process. - -
- -- **Enhanced security for Palette microservices** - To enhance security, all microservices within Palette now use `insecure-skip-tls-verify` set to `false`. When upgrading to Palette 4.0, you must provide a valid SSL certificate in the system console. - -
- - If you already have an SSL certificate, key, and Certificate Authority (CA) certificate, you can use them when upgrading to Palette 4.0.0. To learn how to upload SSL certificates to Palette, refer to [SSL Certificate Management](/enterprise-version/ssl-certificate-management). - - -- **Self-hosted Palette Kubernetes Upgrade** - If you installed Palette using the Helm Chart method, the Kubernetes version used for Palette is upgraded from version 1.24 to 1.25. You will need to copy the new Kubernetes YAML to the Kubernetes layer in the Enterprise cluster profile. If you have customized your Kubernetes configuration, you will need to manually adjust custom values and include any additional configuration in the upgraded YAML that we provide. Refer to [Upgrade Kubernetes](/enterprise-version/upgrade#upgradekubernetes). - -## Upgrade from Palette 3.x to 4.0 - -From the Palette system console, click the **Update version** button. Palette will be temporarily unavailable while system services update. - -![Screenshot of the "Update version" button in the system consoles.](/enterprise-version_sys-console-update-palette-version.png) - -
- - - -## Upgrade Kubernetes - -Follow the steps below to upgrade Kubernetes. - -
- -1. To obtain the upgraded Kubernetes YAML file for Palette 4.0, contact our support team by sending an email to support@spectrocloud.com. - - -2. In the system console, click on **Enterprise Cluster Migration**. - - -3. Click on the **Profiles** tab, and select the Kubernetes layer. The Kubernetes YAML is displayed in the editor at right. - - -4. If the existing Kubernetes YAML has been customized or includes additional configuration, we suggest you create a backup of it by copying it to another location. - - -5. Copy the Kubernetes YAML you received from our support team and paste it into the editor. - -
- - ![Screenshot of the Kubernetes YAML editor.](/enterprise-version_upgrade_ec-cluster-profile.png) - - -6. If you have made any additional configuration changes or additions, add your customizations to the new YAML. - - -7. Save your changes. - -The Enterprise cluster initiates the Kubernetes upgrade process and leads to the reconciliation of all three nodes. - - -# Palette 3.4 - -Prior versions of Palette installed internal Palette components' ingress resources in the default namespace. The new version of the Helm Chart ensures all Palette required ingress resources are installed in the correct namespace. Self-hosted Palette instances deployed to Kubernetes and upgrading from Palette versions 3.3.X or older must complete the following action. - -
- -1. Connect to the cluster using the cluster's kubeconfig file. - - - -2. Identify all Ingress resources that belong to *Hubble* - an internal Palette component. - -
- - ```shell - kubectl get ingress --namespace default - ``` - -3. Remove each Ingress resource listed in the output that starts with the name Hubble. Use the following command to delete an Ingress resource. Replace `REPLACE_ME` with the name of the Ingress resource you are removing. - -
- - ```shell - kubectl delete ingress --namespace default - ``` - - -
\ No newline at end of file diff --git a/content/docs/12.1-tenant-settings.md b/content/docs/12.1-tenant-settings.md deleted file mode 100644 index 169a2d6597..0000000000 --- a/content/docs/12.1-tenant-settings.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "Tenant Administration" -metaTitle: "Tenant Administration" -metaDescription: "Familiarize yourself with the available Tenant settings and how you can control the behavior of your tenant." -icon: "gears" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; - -# Overview - -In Palette, the *tenant admin* role can access tenant settings. This role is the equivalent of a system administrator. As a tenant admin, you can configure Palette and its features to behave in a manner that best fits your organization and its users. - -
- - - -To learn more about the permissions and privileges available to the tenant role, refer to the [Tenant Scope Roles and Permissions](/user-management/palette-rbac/tenant-scope-roles-permissions) reference page. - - - - -Use the following resources to become familiar with the available tenant settings and how to change the tenant settings. - - -# Resources - -- [Login Banner](/tenant-settings/login-banner) - - -
\ No newline at end of file diff --git a/content/docs/12.1-tenant-settings/30-login-banner.md b/content/docs/12.1-tenant-settings/30-login-banner.md deleted file mode 100644 index 02d2896455..0000000000 --- a/content/docs/12.1-tenant-settings/30-login-banner.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Login Banner" -metaTitle: "Login Banner" -metaDescription: "Learn how to set a login banner for your Palette tenant." -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; - -# Overview - -You can set up a login banner message that all users must acknowledge and accept before they log in to Palette. The message is limited to 1300 characters, and only plain text is supported. - -
- - - - -The login banner message is only accessible when users attempt to log in to Palette through the tenant URL. Using the default Palette SaaS login URL of `https://console.spectrocloud.com` will not display the login banner message. Users of self-hosted Palette use the tenant URL defined during the Palette installation. - - - - - -# Prerequisite - -* Tenant admin access. - - -# Set Up Login Banner - -1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. - - -2. Navigate to the left **Main Menu** and select **Tenant Settings**. - - -3. Next, click on **Platform Settings** from the **Tenant Settings Menu**. - - -4. Toggle the **Display Login Banner** button. - - -5. Fill out the text box with the message you want all Palette users in your tenant to acknowledge before a login. - - -
- - ![A view of the tenant settings platform page with an example login banner message.](/tenant-settings_login-banner_settings-page-view.png) - - -
- -6. Select **Save Message** to save your changes. - -
- - - - -# Validate - -You can validate the banner message is set up correctly by using the following steps. - - -1. Log out of [Palette](https://console.spectrocloud.com). - - -2. From your web browser, navigate to the Palette tenant URL for your organization. - -
- - - - For Palette SaaS, the tenant URL is prefixed with your tenant name. For example, the tenant `spectrodocs` has the URL `spectrodocs-spectrocloud.console.spectrocloud.com`. Users of self-hosted instances of Palette should use the tenant URL defined during the Palette installation. - - - - -3. Acknowledge the login banner message. - -
- - ![A view of a tenant login banner message](/tenant-settings_login-banner_tenant-banner-view.png) - diff --git a/content/docs/12.5-palette-cli.md b/content/docs/12.5-palette-cli.md deleted file mode 100644 index 7a73c6e39c..0000000000 --- a/content/docs/12.5-palette-cli.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "Palette CLI" -metaTitle: "Palette CLI" -metaDescription: "Learn how to use the Palette CLI." -icon: "terminal" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Overview - -The Palette CLI contains various functionalities that you can use to interact with Palette and manage resources. The Palette CLI is well suited for Continuous Delivery/Continuous Deployment (CI/CD) pipelines and recommended for automation tasks, where Terraform or direct API queries are not ideal. - -To get started with the Palette CLI, check out the [Install](/palette-cli/install-palette-cli) guide. - - - -# Resources - -- [Install](/palette-cli/install-palette-cli) - - -- [Commands](/palette-cli/commands) - -
\ No newline at end of file diff --git a/content/docs/12.5-palette-cli/10-install-palette-cli.md b/content/docs/12.5-palette-cli/10-install-palette-cli.md deleted file mode 100644 index 0aa23cce3e..0000000000 --- a/content/docs/12.5-palette-cli/10-install-palette-cli.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: "Install" -metaTitle: "Install" -metaDescription: "Learn how to install the Palette CLI and how you can use the CLI with Palette Dev Engine." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; - - -# Installation - -Use the following steps to install and set up the Palette CLI. - - -## Prerequisites - -- A Palette account. Click [here](https://console.spectrocloud.com/) to create a Palette account. - - -- A Palette API key. Refer to the [API Key](/user-management/user-authentication/#apikey) reference page to learn how to create an API key. - - - -## Download and Setup - -1. Visit the [Downloads](/spectro-downloads#palettecli) page and download the Palette CLI by using the URL provided. - - -2. Open up a terminal session on your local system. - - -3. Navigate to your default download folder. For Mac and Linux environments, the default location is **~/Downloads**. - - -4. Move the binary to a folder that is part of your system's `PATH` environment variable. Use the following command to move the binary to the **/usr/local/bin** folder. - -
- - ```shell - sudo mv ~/Downloads/palette /usr/local/bin/palette && \ - chmod +x /usr/local/bin/palette - ``` - -
- - -## Validate - -Verify the Palette CLI is part of your system path by issuing the Palette CLI `version` command. - -
- - ```shell - palette version - ``` - - - ```shell hideClipboard - Palette CLI version: 4.0.0 - ``` - - - # Next Steps - -Start exploring the Palette CLI by using the `--help` command with the various commands. The Palette CLI will continue to receive more functionality, so you will want to keep it updated by downloading the newest version and replacing the current binary. - -
\ No newline at end of file diff --git a/content/docs/12.5-palette-cli/20-commands.md b/content/docs/12.5-palette-cli/20-commands.md deleted file mode 100644 index e78a31effc..0000000000 --- a/content/docs/12.5-palette-cli/20-commands.md +++ /dev/null @@ -1,379 +0,0 @@ ---- -title: "Commands" -metaTitle: "Commands" -metaDescription: "Reference resource for all the supported Palette CLI commands." -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; - - -# Overview - -You start the Palette CLI with a single command, `palette`. The CLI accepts various subcommands such as `pde`, `help`, and more. The Palette CLI will return a non-zero exit status during error scenarios. You can use the CLI flags `-h` and `--help` to learn more about each subcommand. - -The complete list of subcommands is: - -
- - * `completion` - Generate the autocompletion script for the specified shell. - - - * `help` - Help with any command. - - - * [`ec`](#ec) - Palette Enterprise Cluster installation & pre-validation. - - - * [`login`](#login) - Login to Palette. - - - * [`pcg`](#pcg) - Private Cloud Gateway installation & pre-validation. - - - * [`pde`](#pde) - Palette Developer Experience. - - - * [`project`](#project) - Manage Palette Projects. - - - - -## Global Flags - -Palette CLI supports the following global flags. - -| Short Flag | Long Flag | Description | Type | -|------------|------------------------|--------------------------------------------------------------------------|---------| -| `-c` | `--config` | Config file location. | string | -| `-h` | `--help` | Help with any command. | N/A | -| `-l` | `--log-level` | Log level. Allowed values: `panic` `fatal` `error` `warn` `info` `debug` `trace` (default `info`) | string | -| `-w` | `--workspace` | Workspace location for staging runtime configurations and logs (default `$HOME/.palette`) | string | - - -## EC - -The `ec` subcommand installs a self-hosted Palette Enterprise Cluster (EC) in your target environment. The installation is conducted through an interactive wizard that guides you through the various install configurations available. A local kind cluster is created to facilitate creating the Enterprise cluster in the target environment. You do not need to install kind or any other dependencies. The CLI includes all the required dependencies to stand up the kind cluster. You can use the `ec` command to install a [self-hosted Palette](/enterprise-version/deploying-an-enterprise-cluster) instance or a self-hosted [VerteX](/vertex/install-palette-vertex) instance. - -The `ec` subcommand exposes the following subcommand. - -
- - * `install` - Install a Palette Enterprise Cluster through an interactive wizard. A container runtime is required to install an EC cluster. - -### Install - -The `install` subcommand installs a Palette Enterprise Cluster in your target environment. You can install Palette or Palette VerteX using the `install` subcommand. The `install` subcommand can be used in interactive mode, which prompts you for required values. Alternatively, you can use flags to generate a configuration file. - -
- - | Short Flag | Long Flag | Description | Type | - |------------|------------------------|--------------------------------------------------------------------------|---------| - | `-f` | `--config-file` | Install using a configuration file (optional). Use `-config-only` to generate a configuration file. | string | - | `-o` | `--config-only` | Generate configuration file only. This command will not proceed with installation. | boolean | - | `-v` | `--custom-values-file` | Enterprise Cluster custom values.yaml configuration file (optional). Use this to customize the cluster profile of the Enterprise Cluster. Refer to the [custom value file](#customvaluefile) section for more information. | string | - | `-p` | `--update-passwords` | Update passwords only. Do not proceed with installation. The `--config-file` flag must also be provided. | string | - - - #### Examples - - Install an Enterprise Cluster in interactive mode. - -
- - ```shell - palette ec install - ``` -
- - Create a configuration file for the Enterprise Cluster installation. - -
- - ```shell - palette ec install --config-only - ``` - -
- - Install an Enterprise Cluster using a configuration file. The configuration file is generated using the `--config-only` flag. - -
- - ```shell hideCliboard - palette ec install --config-file ~/.palette/ec/ec-20230807143205/ec.yaml - ``` - -
- - Update the passwords of an Enterprise Cluster using a configuration file. The configuration file is generated using the `--config-only` flag. - -
- - ```shell hideCliboard - palette ec install --config-file ~/.palette/ec/ec-20230807143205/ec.yaml --update-passwords - ``` - - - - -### Custom Value File - -You can customize the [Cluster Profile](/glossary-all#clusterprofile) that makes up the Enterprise Cluster by providing a custom **values.yaml** file that contains values for the various Cluster Profile layers that make up the Enterprise Cluster. The custom **values.yaml** file is used to customize the Enterprise Cluster to your specific needs. This is an advanced feature and should only be used by advanced users or when explicitly instructed by our support team. - - -The **values.yaml** file is made up of the following components: - -
- -- `os` The operating system layer of the Enterprise Cluster. This layer contains the values for the operating system that will be used to install the Enterprise Cluster. - - -- `k8s` The Kubernetes layer of the Enterprise Cluster. This layer contains the configuration values for the Kubernetes cluster that is created as part of the Enterprise Cluster installation. - - -- `csi` The Container Storage Interface (CSI) layer of the Enterprise Cluster. This layer contains the configuration values for the CSI driver that is used to provide persistent storage to the Enterprise Cluster. - - -- `cni` The Container Network Interface (CNI) layer of the Enterprise Cluster. This layer contains the configuration values for the CNI driver that is used to provide networking to the Enterprise Cluster. - - -- `mgmt` The management layer of the Enterprise Cluster. This layer contains the configuration values for the internal management components of the Enterprise Cluster. - - - - You can provide one or more layers in the **values.yaml** file. When you provide a layer configuration, the new configuration will be used instead of the default configuration. For example, if you provide a custom **values.yaml** file that contains the `os` layer, it will replace the default operating system configuration. The Enterprise Cluster profile as follows The **values.yaml** must use the following format: - -
- - ```yaml hideClipboard - os: |- - # ... values.yaml for OS layer go here. - k8s: |- - # ... values.yaml for K8s layer go here. - csi: |- - # ... values.yaml for CSI layer go here. - cni: |- - # ... values.yaml for CNI layer go here. - mgmt: |- - # ... values.yaml for spectro-mgmt layer go here. - ``` - -The following example shows a custom **values.yaml** file that contains the `os` layer. The `os` layer contains the configuration for the operating system that will be used to install the Enterprise Cluster. - -
- - ```yaml hideClipboard - os: |- - kubeadmconfig: - preKubeadmCommands: - - echo "Executing pre kube admin config commands" - - update-ca-certificates - - 'systemctl restart containerd; sleep 3' - - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done' - postKubeadmCommands: - - echo "Executing post kube admin config commands" - files: - - targetPath: /usr/local/share/ca-certificates/mycom.crt - targetOwner: "root:root" - targetPermissions: "0644" - content: | - -----BEGIN CERTIFICATE----- - MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl - cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE - AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA - nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz - qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN - fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2 - 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL - 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK - jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB - /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki - HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y - g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ - ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6 - b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56 - IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc= - -----END CERTIFICATE----- - ``` - -## Login - -The `login` subcommand authenticates the Palette CLI with Palette. The `login` subcommand can be used in interactive mode, which prompts you for required values. Or, you can use flags to provide the subcommand with all the required values such as the API key, the organization ID, and the Palette URL. - -
- -| Flag | Description | Type | | -|-----------------------|--------------------------------------------------------------------------------------|---------| -| `--api-key` | Palette API key (omit for interactive login). | string | -| `--cluster-group-name`| Palette Cluster Group name (optional). Specifies the active Cluster Group. | string | -| `--cluster-group-scope`| Palette Cluster Group scope. Required with `--cluster-group-name`. Allowed values are: `project`, `tenant` , and `system`. |string | -| `--console-url` | Palette URL (omit for interactive login). | string | -| `--help` | Help for the `login` subcommand. | - | -| `--insecure` | Skip Transport Layer Security (TLS) (bypass x509 verification). | - | -| `--org` | Palette Organization name (omit for interactive login). | string | -| `--project` | Palette Project name (optional). Specifies the active Project. | string | - - -#### Examples - -
- -```shell hideClipboard -palette login --api-key 123456789 --org demo-org --console-url https://console.spectrocloud.com -``` - -If you want to target a specific project when using the `login` command, use the `--project` flag. - -
- -```shell hideClipboard -palette login \ - --api-key 123456789 \ - --org demo-org \ - --console-url https://console.spectrocloud.com \ - --project dev-team -``` - - -Upon successful login, a local configuration file named **palette.yaml** is created. This file contains the metadata for CLI operations and is created in your $HOME directory under the folder name **.palette**. The following output is an example of a **palette.yaml** configuration file. Sensitive values, such as passwords, tokens, and API keys are encrypted at rest. - -
- -```yaml hideClipboard -paletteConfig: - organization: demo-org - scope: tenant - projectName: dev-team - projectUid: 6342eab2faa0813ead9082e0 - clusterGroupName: beehive - clusterGroupUid: 635669ba4583891d109fe6c0 - tenantUid: 40b8a9a7f724831be814e5734ea744ed - ubuntuConfig: - enablefips: false - token: "" - scarConfig: - scarLoc: "" - scarUsername: "" - scarPassword: "" - mgmt: - apikey: 2abVsxDfFcJpYZ08+6dNWhkk - endpoint: https://console.spectrocloud.com - insecure: false - pairingcode: "" -runLoc: /Users/demo/.palette/ -workspaceLoc: /Users/demo/.palette -``` - -## PCG - -The `pcg` subcommand supports Private Cloud Gateway (PCG) operations, such as installing a PCG cluster and validating its installation. A local [kind](https://kind.sigs.k8s.io/) cluster is created to facilitate creating the PCG cluster in the target environment. You do not need to install kind or any other dependencies, the CLI includes all the required dependencies to stand up the kind cluster. - - -The `pcg` command exposes the following subcommand. - -
- - * `install` - Install a PCG through an interactive wizard. A container runtime is required to install a PCG cluster. - - -
- -### Install - -Use the `install` subcommand to install a PCG cluster in the following environments. The following flags are supported by the `install` subcommand. - -
- - | Short Flag | Long Flag | Description | Type | - |------------|------------------------|--------------------------------------------------------------------------|---------| - | `-f` | `--config-file` | Install using a configuration file (optional). Use `-config-only` to generate a configuration file. | string | - | `-o` | `--config-only` | Generate configuration file only. This command will not proceed with installation. | boolean | - | `-i` | `--inspect-only` | Validate prerequisites for environment. Do not proceed with installation. | boolean | - - -To learn more about installing a PCG cluster. Refer to each platform's respective PCG install guide. - -| Platform | Install Guide | -|---|---| -| MAAS | [Link](/clusters/data-center/maas/install-manage-maas-pcg#installpcg) | -| OpenStack | [Link](/clusters/data-center/openstack#installingprivatecloudgateway-openstack) | -| VMware | [Link](/clusters/data-center/vmware/#createvmwareprivatecloudgateway(pcg)) | - - -## PDE - -The `pde` subcommand interacts with the Palette Dev Engine (PDE) platform and its resources. You can use the `pde` command to log in to Palette, manage virtual clusters, and switch the project scope. - -The `pde` command exposes the following subcommands. - -
- - * `cluster-group` - Manage Palette Cluster Groups. - - - * `project` - Manage Palette Projects. - - - * `virtual-cluster` - Manage Palette Virtual Clusters. - - -### Cluster Group - -Use the `cluster-group` command to change the cluster group that commands will target. You can also list all available clusters. The `cluster-group` supports the following subcommands. - -
- - * `list` - List Palette Cluster Groups. - - - - * `switch` - Switch your active Palette Cluster Group. - - - -### Virtual Cluster - -You can use the `virtual-cluster` subcommand to manage Palette Virtual Clusters. Below is a list of the supported subcommands you can use. Use the `--help` flag to learn more about each subcommand. - -
- -- `create` - Create a Palette Virtual Cluster. - - -- `delete` - Delete a Palette Virtual Cluster. - - -- `download-kubeconfig` - Download the kubeconfig for a Palette Virtual Cluster. - - -- `events` - View events for a Palette Virtual Cluster. - - -- `lifecycle` - Pause or resume a Palette Virtual Cluster. - - -- `list` - List Palette Virtual Clusters. - - -- `resize` - Resize a Palette Virtual Cluster. - -## Project - -Use the `project` command to manage projects, the project scope for the CLI, and list all available projects. The `project` command supports the following subcommands. - - -
- - * `deactivate` - Deactivate your active Palette project. This command requires you to have tenant admin privileges. - - - * `list` - List Palette projects. Only projects you have access to are listed. - - - * `switch` - Switch your active Palette project. You can only switch to projects you have access to. - -
diff --git a/content/docs/12.5-vertex.md b/content/docs/12.5-vertex.md deleted file mode 100644 index f03e75458c..0000000000 --- a/content/docs/12.5-vertex.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: "Palette VerteX" -metaTitle: "Palette VerteX" -metaDescription: "Learn how Palette VerteX enables regulated industries to meet stringent security requirements." -icon: "shield" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette VerteX offers regulated industries, such as government and public sector organizations that handle sensitive and classified information simplicity, security, and scale in production Kubernetes. - -# FIPS-Compliant - -Palette VerteX integrates validated Federal Information Processing Standards (FIPS) 140-2 cryptographic modules in Kubernetes clusters it deploys to ensure robust data protection for your organization’s infrastructure and applications. To learn more about our FIPS 140-2 certification, review [Spectro Cloud Cryptographic Module](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4349). FIPS modules, which are accessible in our private artifact repository, extend Palette’s existing security features that include security scans, powerful RBAC, and tamper-proof edge device images. Palette VerteX protects sensitive data in clusters across edge, bare metal, on-prem data centers, air-gapped environments, and cloud. - - -To learn more about FIPS in Palette VerteX, check out the [FIPS](/vertex/fips) section. - -# Access Palette VerteX - -To set up a Palette VerteX account, contact our support team by sending an email to support@spectrocloud.com. Include the following information in your email: - -- Your full name -- Organization name (if applicable) -- Email address -- Phone number (optional) -- Target Platform (VMware or Kubernetes) -- A brief description of your intended use of VerteX - - -Our dedicated support team will promptly get in touch with you to provide the necessary assistance and share the installer image, credentials, and an endpoint URL to access the FIPS registry. - -# Resources - -- [FIPS](/vertex/fips) - - -- [Installation](/vertex/install-palette-vertex) - - -- [System Management](/vertex/system-management) - - - - -
- -
- - diff --git a/content/docs/12.5-vertex/00-fips.md b/content/docs/12.5-vertex/00-fips.md deleted file mode 100644 index b6f93f0fbf..0000000000 --- a/content/docs/12.5-vertex/00-fips.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: "FIPS" -metaTitle: "FIPS" -metaDescription: "Learn about FIPS compliance in Palette VerteX." -icon: "" -hideToC: false -fullWidth: false ---- - -# Overview - -Palette VerteX is FIPS 140-2 compliant. This means that Palette VerteX uses FIPS 140-2 compliant algorithms and encryption methods. With its additional security scanning capabilities, Palette VerteX is designed to meet the stringent requirements of regulated industries. Palette VerteX operates on FIPS-compliant Ubuntu Pro versions. - - -## Non-FIPS Enablement - -You can deploy non-FIPS-compliant components in your Palette VerteX environment by enabling non-FIPS settings. Refer to the [Enable non-FIPS Settings](/vertex/system-management/enable-non-fips-settings) guide for more information. - - - -Something to note when using RKE2 and K3s: - -
- - - -- When we scan the binaries, which we consume directly from Rancher's RKE2 repository, issues are reported for the following components. These components were compiled with a Go compiler that is not FIPS-compliant. - - - container-suseconnect - - container-suseconnect-zypp - - susecloud - -
- - Since these components are unrelated to Kubernetes and are instead used to access SUSE’s repositories during the Docker build process, RKE2 itself remains fully compliant. - - RKE2 is designated as FIPS-compliant per official Rancher [FIPS 140-2 Enablement](https://docs.rke2.io/security/fips_support) security documentation. Therefore, Palette VerteX designates RKE2 as FIPS-compliant. - - - - - - - -- Although K3s is not available as a FIPS-certified distribution, Palette VerteX supports K3s as a Kubernetes distribution for Edge clusters. - -Palette VerteX uses icons to show FIPS compliance status. For information about Palette VerteX status icons, review [FIPS Status Icons](/vertex/fips/fips-status-icons). - - -## Legal Notice - -Spectro Cloud has performed a categorization under FIPS 199 with (client/tenant) for the data types (in accordance with NIST 800-60 Vol. 2 Revision 1) to be stored, processed, and/or transmitted by the Palette Vertex environment. (client/tenant) maintains ownership and responsibility for the data and data types to be ingested by the Palette Vertex SaaS in accordance with the agreed upon Palette Vertex FIPS 199 categorization. - - -## Resources - -- [FIPS Status Icons](/vertex/fips/fips-status-icons) - - -- [FIPS-Compliant Components](/vertex/fips/fips-compliant-components) - - -- [RKE2 FIPS 140-2 Enablement](https://docs.rke2.io/security/fips_support) \ No newline at end of file diff --git a/content/docs/12.5-vertex/00-fips/01-fips-compliant-components.md b/content/docs/12.5-vertex/00-fips/01-fips-compliant-components.md deleted file mode 100644 index 4415a9438f..0000000000 --- a/content/docs/12.5-vertex/00-fips/01-fips-compliant-components.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: "FIPS-Compliant Components" -metaTitle: "FIPS-Compliant Components" -metaDescription: "Learn about FIPS-Component Components supported by Palette VerteX." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Federal Information Processing Standards (FIPS) is a series of standards developed by the National Institute of Standards and Technology (NIST) in the United States for computer security and encryption algorithms. - -FIPS 140-2 is a specific standard for security requirements for cryptographic modules. It outlines the criteria these modules must meet to ensure their security and integrity. - - -## FIPS Support in Clusters - -Palette VerteX provides FIPS-compliant infrastructure components in Kubernetes clusters it deploys. These components are: - -
- -- Operating System (OS) - - Ubuntu Pro - - -- Kubernetes - - Palette eXtended Kubernetes (PXK) - - Palette eXtended Kubernetes - Edge (PXK-E) - - -- Container Network Interface (CNI) - - Calico - - -- Container Storage Interface (CSI) - - vSphere CSI - - -## Management Plane - -All services in the management plane are FIPS compiled with Go using [BoringCrypto libraries](https://pkg.go.dev/crypto/internal/boring) and static linking. Refer to the [Spectro Cloud Cryptographic Module](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4349) resource to learn about our NIST certificate. - -
- -## FIPS-Compliant Kubernetes - -Our customized version of Kubernetes is FIPS-compliant. Both [Palette eXtended Kubernetes (PXK)](/integrations/kubernetes) and [Palette eXtended Kubernetes-Edge (PXK-E)](/integrations/kubernetes-edge) are compiled with FIPS-compliant compiler and libraries. - -
- - - - -Refer to the [Palette eXtended Kubernetes (PXK)](/integrations/kubernetes) and [Palette eXtended Kubernetes-Edge (PXK-E)](/integrations/kubernetes-edge) documentation to learn more about the each Kubernetes distribution. - - - - -All PXK and PXKE components and supporting open-source components are compiled in their native programming language using language specific FIPS-compliant libraries and static linking. If the component is not available in the form of a FIPS-compliant binary, we compile it with FIPS-compliant compiler and libraries. The following tables list the FIPS-compliant components in PXK and PXK-E: - -
- - -### Core Kubernetes Components - -| **Component** | **Description** | -| --- | --- | -| API Server | The API server is the central management entity that receives all REST requests for the cluster. | -| Controller Manager | The controller manager is a daemon that embeds the core control loops shipped with Kubernetes. | -| Scheduler | The scheduler is a daemon that finds the best node for a pod, based on the scheduling requirements you specify. | -| Kubelet | The kubelet is the primary *node agent* that is deployed on each node. | -| Kube-proxy | The kube-proxy is a network proxy that runs on each node in your cluster, implementing part of the Kubernetes Service concept. | -| Kubeadm | Kubeadm is a tool built to provide best-practice “fast paths” for creating Kubernetes clusters. | -| Kubectl | Kubectl is a command line interface for issuing commands against Kubernetes clusters. | - - -### Auxiliary Kubernetes Components - -| **Component** | **Description** | -| --- | --- | -| CoreDNS | CoreDNS is a Domain Name System (DNS) server deployed as a cluster DNS service. | -| Etcd | Etcd is a distributed key-value store used as Kubernetes’ backing store for all cluster data. | -| Metrics Server | Metrics Server is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines. | -| Ingress Controller| Nginx is used as the ingress controller. An ingress controller is a piece of software that provides reverse proxy, configurable traffic routing, and Transport Layer Security (TLS) termination for Kubernetes services. | -| Nginx Server| The Nginx server is a web server that can also be used as a reverse proxy, load balancer, mail proxy, and HTTP cache. | -| Nginx Ingress Controller| The Nginx ingress controller uses ConfigMap to store the Nginx configuration. | - - -### Runtime Components - -| **Component** | **Description** | -| --- | --- | -| containerd | Containerd is an industry-standard container runtime with an emphasis on simplicity, robustness, and portability. | -| containerd-shim | Containerd-shim is a shim used by containerd to launch containers. | -| containerd-shim-runc-v1 | Containerd-shim-runc-v1 is a shim used by containerd to launch containers. | -| containerd-shim-runc-v2 | Containerd-shim-runc-v2 is a shim used by containerd to launch containers. | -| ctr| Ctr is a command line interface for containerd. | -| crictl | Crictl is a command line interface for CRI-compatible container runtimes. | -| runc | Runc is a CLI tool for spawning and running containers according to the OCI specification. | - - -### Container Network Interface Components - -| **Component** | **Description** | -| --- | --- | -| Calico | Calico is a Container Network Interface plugin that provides networking and network policy for Kubernetes clusters. | - -### Container Storage Interface Components - -| **Component** | **Description** | -| --- | --- | -| AWS EBS CSI | AWS EBS CSI is a CSI plugin that provides storage for Kubernetes clusters. | -| vSphere CSI | vSphere CSI is a CSI plugin that provides storage for Kubernetes clusters. | -| Longhorn CSI | Longhorn CSI is a CSI plugin that provides storage for Kubernetes clusters. Longhorn is the only supported CSI for PXKE. | - -
- - -##### AWS EBS CSI Components - -| **Component** | **Description** | -| --- | --- | -| Driver| The driver is a CSI plugin that provides storage for Kubernetes clusters. | -| External Attacher | The external attacher is a CSI plugin that attaches volumes to nodes. | -| External Provisioner | The external provisioner is a CSI plugin that provisions volumes. | -| External Resizer | The external resizer is a CSI plugin that resizes volumes. | -| External Snapshotter | The external snapshotter is a CSI plugin that takes snapshots of volumes. | -| Liveness Probe | The liveness probe is a CSI plugin that checks the health of the driver. | -| Node Driver Registrar | The node driver registrar is a CSI plugin that registers the driver with the kubelet. | - -
- -##### Longhorn CSI Components - -| **Component** | **Description** | -|---------------------------|--------------| -| Backing image manager | Manages backing images for Longhorn volumes. | -| Attacher | Handles attaching and detaching of volumes to nodes. | -| Provisioner | Manages provisioning and de-provisioning of storage resources. | -| Resizer | Enables resizing of storage volumes. | -| Snapshotter | Manages snapshots of Longhorn volumes. | -| Node driver registrar | Registers the CSI driver with the Kubernetes node. | -| Liveness probe | Monitors health of CSI components. | -| Longhorn engine | Core component that handles read and write operations to the storage backend. | -| Longhorn instance manager | Manages Longhorn engine and replica instances. | -| Longhorn share manager | Manages shared volumes and exposes them via protocols like Network File System (NFS). | -| Longhorn UI | User interface for managing Longhorn components and resources. | -| Longhorn support bundle kit| Collects logs and system information for debugging. | - - - - - -The Longhorn Manager component is partially FIPS-compliant. This component uses utiltities that are not using a FIPS-compliant version of OpenSSL. The following utilities are not FIPS-compliant: - -- openssl -- curl -- nfs-utils -- bind-tools - - - - - -
- - diff --git a/content/docs/12.5-vertex/00-fips/03-fips-status-icons.md b/content/docs/12.5-vertex/00-fips/03-fips-status-icons.md deleted file mode 100644 index 7e20af609c..0000000000 --- a/content/docs/12.5-vertex/00-fips/03-fips-status-icons.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "FIPS Status Icons" -metaTitle: "FIPS Status Icons" -metaDescription: "Learn how icons can help you identify FIPS compliance when you consume features that are not FIPS compliant." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -While Palette VerteX brings FIPS 140-2 cryptographic modules to the Palette management platform and deployed clusters, it also provides the capability to consume features that are not FIPS compliant. For example, when the cluster import option is enabled, it allows users to import any type of Kubernetes cluster, including some that are not fully FIPS compliant. -Similarly, when the option to add non-FIPS add-on packs is enabled, users can add packs in cluster profiles that are not FIPS compliant. For more information about these tenant-level settings, refer to [Enable non-FIPS Settings](/vertex/system-management/enable-non-fips-settings). - -To avoid confusion and compliance issues, Palette VerteX displays icons to indicate the FIPS compliance status of clusters, profiles, and packs. - -The table lists icons used to indicate FIPS compliance status. The partial FIPS compliance icon applies only to clusters and profiles because these may contain packs with an *Unknown* or *Not FIPS-compliant* status. - -| **Icon** | **Description** | **Applies to Clusters** | **Applies to Profiles** | **Applies to Packs** | -|---------------|------------|----------------|----------------|----------------| -| ![Full FIPS compliance](/vertex_fips-status-icons_compliant.png) | Full FIPS compliance. All packs in the cluster are FIPS-compliant.| ✅ | ✅ | ✅ | -| ![Partial FIPS compliance](/vertex_fips-status-icons_partial.png) | Partial FIPS compliance. Some packs are FIPS compliant, but there is at least one that is not.| ✅ | ✅ | ❌ | -| ![Not FIPS-compliant](/vertex_fips-status-icons_not-compliant.png) | Not FIPS-compliant. None of the packs in the cluster are FIPS-compliant.| ✅ | ✅ | ✅ | -|![Unknown FIPS state](/vertex_fips-status-icons_unknown.png) | Unknown state of FIPS compliance. This applies to imported clusters that were not deployed by Palette. | ✅ | ✅ | ✅ | - - - -The screenshots below show how Palette VerteX applies FIPS status icons. - -When you create a profile, icons display next to packs. - -![Diagram showing FIPS status icons on profile page.](/vertex_fips-status-icons_icons-on-profile-page.png) - - - -
- -Icons appear next to each profile layer to indicate FIPS compliance. - -![Diagram showing FIPS-compliant icons in profile stack.](/vertex_fips-status-icons_icons-in-profile-stack.png) - -
- -In this screenshot, Palette VerteX shows FIPS status for the cluster is partially compliant because one pack in the profile is not FIPS-compliant. - -![Diagram showing FIPS status icons on Cluster Overview page.](/vertex_fips-status-icons_icons-in-cluster-overview.png) - - -
- -
\ No newline at end of file diff --git a/content/docs/12.5-vertex/02-install-palette-vertex.md b/content/docs/12.5-vertex/02-install-palette-vertex.md deleted file mode 100644 index 4ef881ccb6..0000000000 --- a/content/docs/12.5-vertex/02-install-palette-vertex.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: "Installation" -metaTitle: "Installation" -metaDescription: "Review Palette VerteX system requirements." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette VerteX is available as a self-hosted application that you install in your environment. The self-hosted version is a dedicated Palette VerteX environment hosted on VMware instances or in an existing Kubernetes cluster. Palette VerteX is available in the following modes: - -| **Supported Platform** | **Description** | -|------------------------|------------------------------------| -| VMware | Install Palette VerteX in VMware environment. | -| Kubernetes | Install Palette VerteX using a Helm Chart in an existing Kubernetes cluster. | - -The next sections describe specific requirements for installing Palette VerteX. - -# Proxy Requirements - -- A proxy used for outgoing connections should support both HTTP and HTTPS traffic. - - -- Allow connectivity to domains and ports in the table. - -
- - | **Top-Level Domain** | **Port** | **Description** | - |----------------------------|----------|-------------------------------------------------| - | spectrocloud.com | 443 | Spectro Cloud content repository and pack registry | - | s3.amazonaws.com | 443 | Spectro Cloud VMware OVA files | - | gcr.io | 443 | Spectro Cloud and common third party container images | - | ghcr.io | 443 | Kubernetes VIP images | - | docker.io | 443 | Common third party content | - | googleapis.com | 443 | For pulling Spectro Cloud images | - | docker.com | 443 | Common third party container images | - | raw.githubusercontent.com | 443 | Common third party content | - | projectcalico.org | 443 | Calico container images | - | quay.io | 443 | Common 3rd party container images | - | grafana.com | 443 | Grafana container images and manifests | - | github.com | 443 | Common third party content | - - -# Size Guidelines - -This section lists resource requirements for Palette VerteX for various capacity levels. In Palette VerteX, the terms *small*, *medium*, and *large* are used to describe the instance size of worker pools that Palette VerteX is installed on. The following table lists the resource requirements for each size. - - -
- - - -The recommended maximum number of deployed nodes and clusters in the environment should not be exceeded. We have tested the performance of Palette VerteX with the recommended maximum number of deployed nodes and clusters. Exceeding these limits can negatively impact performance and result in instability. The active workload limit refers to the maximum number of active nodes and pods at any given time. - - - -
- - - -| **Size** | **Nodes**| **CPU**| **Memory**| **Storage**| **MongoDB Storage Limit**| **MongoDB Memory Limit**| **MongoDB CPU Limit** |**Total Deployed Nodes**| **Deployed Clusters with 10 Nodes**| -|----------|----------|--------|-----------|------------|--------------------|-------------------|------------------|----------------------------|----------------------| -| Small | 3 | 8 | 16 GB | 60 GB | 20 GB | 4 GB | 2 | 1000 | 100 | -| Medium (Recommended) | 3 | 16 | 32 GB | 100 GB | 60 GB | 8 GB | 4 | 3000 | 300 | -| Large | 3 | 32 | 64 GB | 120 GB | 80 GB | 12 GB | 6 | 5000 | 500 | - - -#### Instance Sizing - -| **Configuration** | **Active Workload Limit** | -|---------------------|---------------------------------------------------| -| Small | Up to 1000 Nodes each with 30 Pods (30,000 Pods) | -| Medium (Recommended) | Up to 3000 Nodes each with 30 Pods (90,000 Pods)| -| Large | Up to 5000 Nodes each with 30 Pods (150,000 Pods) | - - -
- -# Resources - -- [Install on VMware vSphere](/vertex/install-palette-vertex/install-on-vmware) - - -- [Install Using Helm Chart](/vertex/install-palette-vertex/install-on-kubernetes/install) - - - - - -
- -
\ No newline at end of file diff --git a/content/docs/12.5-vertex/02-install-palette-vertex/10-install-on-vmware.md b/content/docs/12.5-vertex/02-install-palette-vertex/10-install-on-vmware.md deleted file mode 100644 index e6147c7298..0000000000 --- a/content/docs/12.5-vertex/02-install-palette-vertex/10-install-on-vmware.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "VMware" -metaTitle: "Install Palette VerteX on VMware" -metaDescription: "Learn how to install Palette VerteX on VMware." -icon: "" -hideToC: false -fullWidth: false ---- - - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette VerteX can be installed on VMware vSphere with internet connectivity or an airgap environment. When you install Palette VerteX, a three-node cluster is created. You use the interactive Palette CLI to install Palette VerteX on VMware vSphere. Refer to [Access Palette VerteX](/vertex#accesspalettevertex) for instructions on requesting repository access. - -# Resources - -- [Install on VMware](/vertex/install-palette-vertex/install-on-vmware/install) - - - - - -- [VMware System Requirements](/vertex/install-palette-vertex/install-on-vmware/vmware-system-requirements) - -
- -
- \ No newline at end of file diff --git a/content/docs/12.5-vertex/02-install-palette-vertex/10-install-on-vmware/10-install.md b/content/docs/12.5-vertex/02-install-palette-vertex/10-install-on-vmware/10-install.md deleted file mode 100644 index ec169d110e..0000000000 --- a/content/docs/12.5-vertex/02-install-palette-vertex/10-install-on-vmware/10-install.md +++ /dev/null @@ -1,341 +0,0 @@ ---- -title: "Instructions" -metaTitle: "Install Palette VerteX on VMware" -metaDescription: "Learn how to deploy Palette VerteX on VMware." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Install Palette VerteX on VMware vSphere - -You install Palette VerteX using the Palette Command Line Interface (CLI) that guides you for details to create a configuration file and a three-node enterprise cluster for high availability (HA). You can invoke the Palette CLI on any Linux x86-64 system with the Docker daemon installed and connectivity to the VMware vSphere environment where Palette VerteX will be deployed. - - -# Prerequisites - -- An AMD64 Linux environment with connectivity to the VMware vSphere environment. - - - -- [Docker](https://docs.docker.com/engine/install/) or equivalent container runtime installed and available on the Linux host. - - - -- Palette CLI installed and available. Refer to the Palette CLI [Install](/palette-cli/install-palette-cli#downloadandsetup) page for guidance. - - -- An Ubuntu Pro Subscription and token. Ubuntu Pro provides access to FIPS 140-2 certified cryptographic packages. - - -- Review required VMware vSphere environment [permissions](/vertex/install-palette-vertex/install-on-vmware/vmware-system-requirements). - - - -- We recommended the following resources for Palette VerteX. Refer to the [Palette VerteX size guidelines](/vertex/install-palette-vertex#sizeguidelines) for additional sizing information. - - - 8 CPUs per VM. - - - 16 GB Memory per VM. - - - 100 GB Disk Space per VM. - - -- The following network ports must be accessible for Palette VerteX to operate successfully. - - - TCP/443: Inbound to and outbound from the Palette VerteX management cluster. - - - TCP/6443: Outbound traffic from the Palette VerteX management cluster to the deployed cluster's Kubernetes API server. - - -- Ensure you have an SSL certificate that matches the domain name you will assign to Palette VerteX. You will need this to enable HTTPS encryption for Palette VerteX. Reach out to your network administrator or security team to obtain the SSL certificate. You need the following files: - - - x509 SSL certificate file in base64 format. - - - x509 SSL certificate key file in base64 format. - - - x509 SSL certificate authority file in base64 format. This file is optional. - - -- Zone tagging is required for dynamic storage allocation across fault domains when provisioning workloads that require persistent storage. Refer to [Zone Tagging](/vertex/install-palette-vertex/install-on-vmware/vmware-system-requirements#zonetagging) for information. - - -- Assigned IP addresses for application workload services, such as Load Balancer services. - - -- Shared Storage between vSphere hosts. - - - -
- - - -Self-hosted Palette VerteX installations provide a system Private Cloud Gateway (PCG) out-of-the-box and typically do not require a separate, user-installed PCG. However, you can create additional PCGs as needed to support provisioning into remote data centers that do not have a direct incoming connection from the Palette console. To learn how to install a PCG on VMware, check out the [VMware](/clusters/data-center/vmware) guide. - - - - -# Install the Enterprise Cluster - -The video below provides a demonstration of the installation wizard and the prompts you will encounter. Take a moment to watch the video before you begin the installation process. Make sure to use values that are appropriate for your environment. Use the **three-dot Menu** in the lower right corner of the video to expand the video to full screen and to change the playback speed. - -
- - `video: title: "vertex-cli-install": /./vertex-install.mp4` - -Use the following steps to install Palette VerteX. - - -
- -1. Open a terminal window and invoke the Palette CLI by using the `ec` command to install the enterprise cluster. The interactive CLI prompts you for configuration details and then initiates the installation. For more information about the `ec` subcommand, refer to [Palette Commands](/palette-cli/commands#ec). - -
- - ```bash - palette ec install - ``` - -2. At the **Enterprise Cluster Type** prompt, choose **Palette VerteX**. - - -3. Type `y` to enable Ubuntu Pro, and provide your Ubuntu Pro token when prompted. - -
- - - - To ensure FIPS compliance, be sure to enter your Ubuntu Pro token. - - - -
- - -4. Provide the FIPS repository URL you received from our support team. - - -5. Enter the FIPS repository credentials. - - -6. Choose `VMware vSphere` as the cloud type. This is the default. - - -7. Type an enterprise cluster name. - - -8. When prompted, enter the information listed in each of the following tables. - -
- - #### Environment Configuration - - |**Parameter**| **Description**| - |:-------------|----------------| - |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all EC nodes and all of its target cluster nodes. Example: `https://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| - |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all EC nodes and all of its target cluster nodes. Example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| - |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: `maas.company.com,10.10.0.0/16`.| - |**Proxy CA Certificate Filepath**|The default is blank. You can provide the filepath of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| - |**Pod CIDR**|Enter the CIDR pool IP that will be used to assign IP addresses to pods in the EC cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| - |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the EC cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| - -
- - - -9. Select the OCI registry type and provide the configuration values. Review the following table for more information. - -
- - #### Pack & Image Registry Configuration - - | **Parameter** | **Description** | - |---------------------------|-----------------------------------------| - | **Registry Type** | Specify the type of registry. Allowed values are `OCI` or `OCI ECR`. | - | **Registry Name** | Enter the name of the registry. | - | **Registry Endpoint** | Enter the registry endpoint. | - | **Registry Base Path** | Enter the registry base path. | - |**Allow Insecure Connection** | Bypasses x509 verification. Type `Y` if using a VMware vSphere instance with self-signed Transport Layer Security (TLS) certificates. Otherwise, type `n`.| - | **Registry Username** or **Registry Access Key** | Enter the registry username or the access key if using `OCI ECR`. | - | **Registry Password** or **Registry Secret Key** | Enter the registry password or the secret key if using `OCI ECR`. | - | **Registry Region** | Enter the registry region. This option is only available if you are using `OCI ECR`. | - | **ECR Registry Private** | Type `y` if the registry is private. Otherwise, type `n`. | - | **Use Public Registry for Images** | Type `y` to use a public registry for images. Type `n` to a different registry for images. If you are using another registry for images, you will be prompted to enter the registry URL, base path, username, and password. | - -
- -10. Next, specify the database storage size to allocate for Palette VerteX. The default is 20 GB. Refer to the [size guidelines](/vertex/install-palette-vertex#sizeguidelines) for additional information. - - - -11. The next set of prompts is for the VMware vSphere account information. Enter the information listed in the following table. - -
- - #### VMware vSphere Account Information - - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - |**vSphere Endpoint** | VMware vSphere endpoint. Must be a fully qualified domain name (FQDN) or IP address without a scheme - that is, without an IP protocol, such as `https://`. Example: `vcenter.mycompany.com`.| - |**vSphere Username** | VMware vSphere account username.| - |**vSphere Password**| VMware vSphere account password.| - |**Allow Insecure Connection** | Bypasses x509 verification. Type `Y` if using a VMware vSphere instance with self-signed Transport Layer Security (TLS) certificates. Otherwise, type `n`.| - -
- - #### VMware vSphere Cluster Configuration - - This information determines where Palette VerteX will be deployed in your VMware vSphere environment. The Palette CLI will use the provided VMware credentials to retrieve information from your VMware vSphere environment and present options for you to select from. - -
- - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - |**Datacenter**| The installer retrieves the Datacenter automatically. | - |**Folder** | Select the folder that contains the VM instance. | - | **Cluster** | Select the cluster where you want to deploy Palette VerteX. | - | **Network** | Select the network where you want to deploy Palette VerteX. | - | **Resource Pool** | Select the resource pool where you want to deploy Palette VerteX. | - | **Datastore** | Select the datastore where you want to deploy Palette VerteX. | - |**Fault Domains** | Configure one or more fault domains by selecting values for these properties: Cluster, Network (with network connectivity), Resource Pool, and Storage Type (Datastore or VM Storage Policy). Note that when configuring the Network, if you are using a distributed switch, choose the network that contains the switch. | - |**NTP Servers** | You can provide a list of Network Time Protocol (NTP) servers. | - |**SSH Public Keys** | Provide any public SSH keys to access your Palette VerteX VMs. This option opens up your system's default text editor. Vi is the default text editor for most Linux distributions. To review basic vi commands, check out the [vi Commands](https://www.cs.colostate.edu/helpdocs/vi.html) reference. | - - -12. Specify the IP pool configuration. The placement type can be Static or Dynamic Domain Name Server (DDNS). Choosing static placement creates an IP pool from which VMs are assigned IP addresses. Choosing DDNS assigns IP addresses using DNS. - -
- - #### Static Placement Configuration - | **Parameter** | **Description** | - |---------------------------|-----------------------------------------| - | **IP Start range** | Enter the first address in the EC IP pool range. | - | **IP End range** | Enter the last address in the EC IP pool range. | - | **Network Prefix** | Enter the network prefix for the IP pool range. Valid values are in [0, 32]. Example: `18`. | - | **Gateway IP Address** | Enter the IP address of the static IP gateway. | - | **Name servers** | Comma-separated list of DNS name server IP addresses. | - | **Name server search suffixes** | An optional comma-separated list of DNS search domains. | - - -
- - -13. The last set of prompts is for the VMware vSphere machine configuration. Enter the information listed in the following table. - -
- - #### vSphere Machine Configuration - - |**Parameter** | **Description**| - |-----------------------------------------|----------------| - | **Number of CPUs** | The number of CPUs allocated to each VM node instance.| - | **Memory** | The amount of memory allocated to each VM node instance.| - | **Disk Size** | The size of the disk allocated to each VM node instance.| - - -
- - - The installation process stands up a [kind](https://kind.sigs.k8s.io/) cluster locally that will orchestrate the remainder of the installation. The installation takes some time. - -
- - Upon completion, the enterprise cluster configuration file named `ec.yaml` contains the information you provided, and its location is displayed in the terminal. Credentials and tokens are encrypted in the YAML file. - -
- - ```bash hideClipboard - ==== Enterprise Cluster config saved ==== - Location: :/home/spectro/.palette/ec/ec-20230706150945/ec.yaml - ``` - -
- - When the installation is complete, Enterprise Cluster Details that include a URL and default credentials are displayed in the terminal. You will use these to access the Palette VerteX System Console. - -
- - ```bash hideClipboard - ==================================== - ==== Enterprise Cluster Details ==== - ==================================== - Console URL: https://10.10.189.100/system - Username: ********** - Password: ********** - ``` - - -14. Copy the URL to the browser to access the System Console. You will be prompted to reset the password. - -
- - - - The first time you visit the Palette VerteX system console, a warning message about an untrusted SSL certificate may appear. This is expected, as you have not yet uploaded your SSL certificate to Palette VerteX. You can ignore this warning message and proceed. - - - -
- - ![Screenshot of the Palette VerteX system console showing Username and Password fields.](/vertex_installation_install-on-vmware_vertex-system-console.png) - -
- - -15. Log in to the System Console using the credentials provided in the Enterprise Cluster Details output. After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette VerteX system console. - - -16. After login, a Summary page is displayed. Palette VerteX is installed with a self-signed SSL certificate. To assign a different SSL certificate you must upload the SSL certificate, SSL certificate key, and SSL certificate authority files to Palette VerteX. You can upload the files using the Palette VerteX system console. Refer to the [Configure HTTPS Encryption](/vertex/system-management/ssl-certificate-management) page for instructions on how to upload the SSL certificate files to Palette VerteX. - - -17. The last step is to start setting up a tenant. To learn how to create a tenant, check out the [Tenant Management](/vertex/system-management/tenant-management) guide. - -
- - ![Screenshot of the Summary page showing where to click Go to Tenant Management button.](/vertex_installation_install-on-vmware_goto-tenant-management.png) - - -# Validate - -You can verify the installation is successful if you can access the system console using the IP address provided in Enterprise Cluster Details and if the Summary page displays the **Go to Tenant Management** button. - -You can also validate that a three-node Kubernetes cluster is launched and Palette VerteX is deployed on it. - -
- -1. Log in to the vCenter Server by using vSphere Client. - - -2. Navigate to the Datacenter and locate your VM instance. - - -3. Select the VM to access its details page, and verify three nodes are listed. - - -4. Open a web browser session, and use the IP address provided in Enterprise Cluster Details at the completion of the installation to connect to the Palette VerteX System Console. Copy the IP address to the address bar and append `/system`. - - -5. Log in using your credentials. - - -6. A **Summary** page will be displayed that contains a tile with a **Go to Tenant Management** button. After initial installation, the **Summary** page shows there are zero tenants. - - -# Next Steps - -You have successfully installed Palette VerteX in vSphere. Your next steps are to configure Palette VerteX for your organization. Start by creating the first tenant to host your users. Refer to [Create a Tenant](/vertex/system-management/tenant-management) for instructions. - -After you create the tenant, you are ready to configure authentication types in tenant settings and create users and teams. - -# Resources - -- [Create a Tenant](/vertex/system-management/tenant-management) - -
- \ No newline at end of file diff --git a/content/docs/12.5-vertex/02-install-palette-vertex/10-install-on-vmware/30-vmware-system-requirements.md b/content/docs/12.5-vertex/02-install-palette-vertex/10-install-on-vmware/30-vmware-system-requirements.md deleted file mode 100644 index 649c8afc8d..0000000000 --- a/content/docs/12.5-vertex/02-install-palette-vertex/10-install-on-vmware/30-vmware-system-requirements.md +++ /dev/null @@ -1,282 +0,0 @@ ---- -title: "VMware System and Permission Requirements" -metaTitle: "VMware System and Permission Requirements" -metaDescription: "Review VMware system requirements and cloud account permissions." -icon: "" -hideToC: false -fullWidth: false ---- - - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -The sections below describe system requirements and cloud account permissions for VMware vSphere environments hosting Palette VerteX. - - - -# VMware Cloud Account Permissions - -The vSphere user account that deploys Palette VerteX must have the minimum root-level VMware vSphere privileges listed in the table below. The **Administrator** role provides superuser access to all vSphere objects. For users without the **Administrator** role, one or more custom roles can be created based on tasks the user will perform. Permissions and privileges vary depending on the vSphere version you are using. - -Select the tab for your vSphere version. - -
- - - -If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” is required. - - - -
- - - - - -## Root-Level Role Privileges - -Root-level role privileges are only applied to root objects and data center objects. - -| **vSphere Object** | **Privilege** | -|--------------------|-----------------------------------------| -| CNS | Searchable | -| Datastore | Browse datastore | -| Host | Configuration
Storage partition configuration | -| vSphere Tagging | Create vSphere Tag
Edit vSphere Tag | -| Network | Assign network | -| Sessions | Validate session | -| VM Storage Policies| View VM storage policies | -| Storage views | View | - - -## Spectro Role Privileges - -Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, templates, datastore, network objects, and Virtual Machines (VMs). A separate table lists Spectro role privileges for VMs by category. - -
- - - -Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. - - - - -| **vSphere Object**| **Privileges** | -|-------------------|---------------------------------------------| -| CNS | Searchable | -| Datastore | Allocate space
Browse datastore
Low-level file operations
Remove file
Update VM files
Update VM metadata | -| Folder | Create Folder
Delete folder
Move folder
Rename folder| -| Host | Local operations: Reconfigure VM | -| Network | Assign network | -| Resource | Apply recommendation
Assign VM to resource pool
Migrate powered off VM
Migrate powered on VM
Query vMotion | -| Sessions | Validate sessions | -| Storage policies | View access for VM storage policies is required.
Ensure ``StorageProfile.View`` is available. | -| spectro-templates | Read only | -| Storage views | View | -| Tasks | Create task
Update task | -| vApp | Import
View OVF environment
Configure vAPP application
Configure vApp instance | -| vSphere tagging | Assign or Unassign vSphere Tag
Create vSphere Tag
Delete vSphere Tag
Edit vSphere Tag | - - -The following table lists Spectro Cloud role privileges for VMs by category. - -| **vSphere Object**| **Category** | **Privileges** | -|-------------------|----------------------|--------------------| -| Virtual Machines | Change Configuration | Acquire disk lease
Add existing disk
Add new disk
Add or remove device
Advanced configuration
Change CPU count
Change memory
Change settings
Change swapfile placement
Change resource
Change host USB device
Configure raw device
Configure managedBy
Display connection settings
Extend virtual disk
Modify device settings
Query fault tolerance compatibity
Query unowned files
Reload from path
Remove disk
Rename
Reset guest information
Set annotation
Toggle disk change tracking
Toggle fork parent
Upgrade VM compatibility| -| | Edit Inventory | Create from existing
Create new
Move
Register
Remove
Unregister | -| | Guest Operations | Alias modification
Alias query
Modify guest operations
Invoke programs
Queries | -| | Interaction | Console Interaction
Power on/off | -| | Provisioning | Allow disk access
Allow file access
Allow read-only disk access
Allow VM download
Allow VM files upload
Clone template
Clone VM
Create template from VM
Customize guest
Deploy template
Mark as template
Mark as VM
Modify customization specification
Promote disks
Read customization specifications | -| | Service Configuration| Allow notifications
Allow polling of global event notifications
Manage service configurations
Modify service configurations
Query service configurations
Read service configurations | -| | Snapshot Management | Create snapshot
Remove snapshot
Rename snapshot
Revert to snapshot | -| | vSphere Replication | Configure replication
Manage replication
Monitor replication | -| | vSAN | Cluster: ShallowRekey | - -
- - - - - - -## Root-Level Role Privileges - -Root-level role privileges are only applied to root objects and Data center objects. - -| **vSphere Object**| **Privileges** | -|-------------------|---------------------------------------------| -| CNS | Searchable | -| Datastore | Browse datastore | -| Host | Configuration
Storage partition configuration| -| vSphere tagging | Create vSphere Tag
Edit vSphere Tag | -| Network | Assign network | -| Profile-driven storage | View | -| Sessions | Validate session | -| Storage views | View | - - -## Spectro Role Privileges - -Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, templates, datastore, network objects, and Virtual Machines (VMs). A separate table lists Spectro role privileges for VMs by category. - -
- - - -Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. - - - -
- -| **vSphere Object**| **Privileges** | -|-------------------|---------------------------------------------| -| CNS | Searchable | -| Datastore | Allocate space
Browse datastore
Low-level file operations
Remove file
Update VM files
Update VM metadata | -| Folder | Create Folder
Delete folder
Move folder
Rename folder| -| Host | Local operations: Reconfigure VM | -| Network | Assign network | -| Resource | Apply recommendation
Assign VM to resource pool
Migrate powered off VM
Migrate powered on VM
Query vMotion | -| Profile-driven storage | Profile-driven storage view | -| Sessions | Validate session | -| spectro-templates | Read only | -| Storage views | Configure service
View | -| Tasks | Create task
Update task | -| vApp | Import
View OVF environment
Configure vAPP applications
Configure vApp instances | -| vSphere tagging | Assign or Unassign vSphere Tag
Create vSphere Tag
Delete vSphere Tag
Edit vSphere Tag | - -
- -The following table lists Spectro role privileges for VMs by category. - -| **vSphere Object**| **Category** | **Privileges** | -|-------------------|----------------------|--------------------| -| Virtual Machines | Change Configuration | Acquire disk lease
Add existing disk
Add new disk
Add or remove device
Advanced configuration
Change CPU count
Change memory
Change Settings
Change Swapfile placement
Change resource
Change host USB device
Configure Raw device
Configure managedBy
Display connection settings
Extend virtual disk
Modify device settings
Query fault tolerance compatibity
Query unowned files
Reload from path
Remove disk
Rename
Reset guest information
Set annotation
Toggle disk change tracking
Toggle fork parent
Upgrade VM compatibility| -| | Edit Inventory | Create from existing
Create new
Move
Register
Remove
Unregister | -| | Guest Operations | Alias modification
Alias query
Modify guest operations
Invoke programs
Query guest operations | -| | Interaction | Console Interaction
Power on/off | -| | Provisioning | Allow disk access
Allow file access
Allow read-only disk access
Allow VM download
Allow VM upload
Clone template
Clone VM
Create template from VM
Customize guest
Deploy template
Mark as template
Modify customization specifications
Promote disks
Read customization specifications | -| | Service Configuration| Allow notifications
Allow polling of global event notifications
Manage service configurations
Modify service configurations
Query service configurations
Read service configurations | -| | Snapshot Management | Create snapshot
Remove snapshot
Rename snapshot
Revert to snapshot | -| | vSphere Replication | Configure replication
Manage replication
Monitor replication | -| | vSAN | Cluster
ShallowRekey | - - -
- - - - - -## Root-Level Role Privileges - -Root-level role privileges are only applied to root objects and Data center objects. - -| **vSphere Object**| **Privileges** | -|-------------------|---------------------------------------------| -| CNS | Searchable | -| Datastore | Browse datastore | -| Host | Configuration
Storage partition configuration| -| vSphere tagging | Create vSphere Tag
Edit vSphere Tag | -| Network | Assign network | -| Profile-driven storage | Profile-driven storage view | -| Sessions | Validate session | -| Storage views | View | - - -## Spectro Role Privileges - -Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, templates, datastore, network objects, and Virtual Machines (VMs). A separate table lists Spectro role privileges for VMs by category. - -
- - - -Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. - - - - -| **vSphere Object**| **Privileges** | -|-------------------|---------------------------------------------| -| CNS | Searchable | -| Datastore | Allocate space
Browse datastore
Low-level file operations
Remove file
Update VM files
Update VM metadata | -| Folder | Create Folder
Delete folder
Move folder
Rename folder| -| Host | Local operations: Reconfigure VM | -| Network | Assign network | -| Profile-driven storage | Profile-driven storage view | -| Resource | Apply recommendation
Assign VM to resource pool
Migrate powered off VM
Migrate powered on VM
Query vMotion | -| Sessions | Validate session | -| spectro-templates | Read only | -| Storage views | View | -| Tasks | Create task
Update task | -| vApp | Import
View OVF environment
Configure vAPP applications
Configure vApp instances | -| vSphere tagging | Assign or Unassign vSphere Tag
Create vSphere Tag
Delete vSphere Tag
Edit vSphere Tag | - -
- -The following table lists Spectro role privileges for VMs by category. - -| **vSphere Object**| **Category** | **Privileges** | -|-------------------|----------------------|--------------------| -| Virtual Machines | Change Configuration | Acquire disk lease
Add existing disk
Add new disk
Add or remove device
Advanced configuration
Change CPU count
Change memory
Change Settings
Change Swapfile placement
Change resource
Change host USB device
Configure Raw device
Configure managedBy
Display connection settings
Extend virtual disk
Modify device settings
Query fault tolerance compatibity
Query unowned files
Reload from path
Remove disk
Rename
Reset guest information
Set annotation
Toggle disk change tracking
Toggle fork parent
Upgrade VM compatibility| -| | Edit Inventory | Create from existing
Create new
Move
Register
Remove
Unregister | -| | Guest Operations | Alias modification
Alias query
Modify guest operations
Invoke programs
Query guest operations | -| | Interaction | Console Interaction
Power on/off | -| | Provisioning | Allow disk access
Allow file access
Allow read-only disk access
Allow VM download
Allow VM upload
Clone template
Clone VM
Create template from VM
Customize guest
Deploy template
Mark as template
Modify customization specifications
Promote disks
Read customization specifications | -| | Service Configuration| Allow notifications
Allow polling of global event notifications
Manage service configurations
Modify service configurations
Query service configurations
Read service configurations | -| | Snapshot Management | Create snapshot
Remove snapshot
Rename snapshot
Revert to snapshot | -| | vSphere Replication | Configure replication
Manage replication
Monitor replication | -| | vSAN | Cluster
ShallowRekey | - -
- - -
- -
- -
- - -# Zone Tagging - -Zone tagging is required for dynamic storage allocation across fault domains when provisioning workloads that require persistent storage. This is required to install the Palette Platform itself and is also helpful for workloads deployed in the tenant clusters if they have persistent storage needs. Use vSphere tags on data centers(k8s-region) and compute clusters (k8s-zone) to create distinct zones in your environment. - -For example, assume your vCenter environment includes three compute clusters, cluster-1, cluster-2, and cluster-3, that are part of vSphere Object, Tag Category, and Tag value as shown in the table. - -| **vSphere Object** | **Tag Category** | **Tag Value** | -|--------------------|------------------|---------------| -| dc-1 | k8s-region | region1 | -| cluster-1 | k8s-zone | az1 | -| cluster-2 | k8s-zone | az2 | -| cluster-3 | k8s-zone | az3 | - - - - - -The exact values for the k8s-region and k8s-zone tags can be different from the ones described in the above example, if they are unique. - - - -## Naming Conventions for vSphere Region and Zone Tags - -The following requirements apply to tags: - -
- -- A valid tag must consist of alphanumeric characters. - - -- The tag must start and end with an alphanumeric characters. - - -- The regex used for validation is ``(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?`` \ No newline at end of file diff --git a/content/docs/12.5-vertex/02-install-palette-vertex/20-install-on-kubernetes.md b/content/docs/12.5-vertex/02-install-palette-vertex/20-install-on-kubernetes.md deleted file mode 100644 index ade151709e..0000000000 --- a/content/docs/12.5-vertex/02-install-palette-vertex/20-install-on-kubernetes.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "Kubernetes" -metaTitle: "Kubernetes" -metaDescription: "Learn how to install Palette VerteX on Kubernetes." -icon: "" -hideToC: false -fullWidth: false ---- - -# Overview - -Palette VerteX can be installed on Kubernetes with internet connectivity or an airgap environment. When you install Palette VerteX, a three-node cluster is created. You use a Helm chart our support team provides to install Palette VerteX on Kubernetes. Refer to [Access Palette VerteX](/vertex#accesspalettevertex) for instructions on requesting access to the Helm Chart. - - -To get started with Palette VerteX on Kubernetes, refer to the [Install Instructions](/vertex/install-palette-vertex/install-on-kubernetes/install) guide. - -# Resources - -- [Install Instructions](/vertex/install-palette-vertex/install-on-kubernetes/install) - - - - - -- [Helm Configuration Reference](/vertex/install-palette-vertex/install-on-kubernetes/vertex-helm-ref) diff --git a/content/docs/12.5-vertex/02-install-palette-vertex/20-install-on-kubernetes/10-install.md b/content/docs/12.5-vertex/02-install-palette-vertex/20-install-on-kubernetes/10-install.md deleted file mode 100644 index 0b87947519..0000000000 --- a/content/docs/12.5-vertex/02-install-palette-vertex/20-install-on-kubernetes/10-install.md +++ /dev/null @@ -1,323 +0,0 @@ ---- -title: "Instructions" -metaTitle: "Install VerteX" -metaDescription: "Learn how to install Palette VerteX on VMware vSphere." -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import Tabs from 'shared/components/ui/Tabs'; - -# Overview - -Use the Palette VerteX Helm Chart to install Palette VerteX in a multi-node Kubernetes cluster in your production environment. Palette VerteX is a FIPS-compliant product that must be installed in a FIPS-compliant environment. This means that Operating System (OS) the Kubernetes cluster you are installing Palette VerteX into must be FIPS-compliant. - -Review our [architecture diagrams](/architecture/networking-ports) to ensure your Kubernetes cluster has the necessary network connectivity for Palette to operate successfully. - -# Prerequisites - -- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed and available. - - -- [Helm](https://helm.sh/docs/intro/install/) is installed and available. - - -- Access to the target Kubernetes cluster's kubeconfig file. You must be able to interact with the cluster using `kubectl` commands and have sufficient permissions to install Palette VerteX. We recommend using a role with cluster-admin permissions to install Palette VerteX. - - -- The Kubernetes cluster must be set up on a supported version of Kubernetes, which includes versions v1.25 to v1.27. - - - -- Ensure the Kubernetes cluster does not have Cert Manager installed. Palette VerteX requires a unique Cert Manager configuration to be installed as part of the installation process. If Cert Manager is already installed, you must uninstall it before installing Palette VerteX. - - -- The Kubernetes cluster must have a Container Storage Interface (CSI) installed and configured. Palette VerteX requires a CSI to store persistent data. You may install any CSI that is compatible with your Kubernetes cluster. - - - -- We recommend the following resources for Palette VerteX. Refer to the [Palette VerteX size guidelines](/vertex/install-palette-vertex#sizeguidelines) for additional sizing information. - - - 8 CPUs per node. - - - 16 GB Memory per node. - - - 100 GB Disk Space per node. - - - A Container Storage Interface (CSI) for persistent data. - - - A minimum of three worker nodes or three untainted control plane nodes. - -
- - - - Refer to the Palette VerteX [size guidelines](/vertex/install-palette-vertex#sizeguidelines) resource for additional sizing information. - - - - -- The following network ports must be accessible for Palette VerteX to operate successfully. - - - TCP/443: Inbound and outbound to and from the Palette VerteX management cluster. - - - TCP/6443: Outbound traffic from the Palette VerteX management cluster to the deployed clusters' Kubernetes API server. - - -- Ensure you have an SSL certificate that matches the domain name you will assign to Palette VerteX. You will need this to enable HTTPS encryption for Palette VerteX. Reach out to your network administrator or security team to obtain the SSL certificate. You need the following files: - - - x509 SSL certificate file in base64 format. - - - x509 SSL certificate key file in base64 format. - - - x509 SSL certificate authority file in base64 format. - - -- Ensure the OS and Kubernetes cluster you are installing Palette VerteX onto is FIPS-compliant. Otherwise, Palette VerteX and its operations will not be FIPS-compliant. - - -- A custom domain and the ability to update Domain Name System (DNS) records. You will need this to enable HTTPS encryption for Palette VerteX. - - -- Access to the Palette Helm Charts. Refer to the [Access Palette VerteX](/vertex#accesspalettevertex) for instructions on how to request access to the Helm Chart. - - - -
- - - -Do not use a Palette-managed Kubernetes cluster when installing Palette VerteX. Palette-managed clusters contain the Palette agent and Palette-created Kubernetes resources that will interfere with the installation of Palette VerteX. - - - - -# Install Palette VerteX - -Use the following steps to install Palette VerteX on Kubernetes. - -
- - - -The following instructions are written agnostic to the Kubernetes distribution you are using. Depending on the underlying infrastructure provider and your Kubernetes distribution, you may need to modify the instructions to match your environment. Reach out to our support team if you need assistance. - - - - -1. Open a terminal session and navigate to the directory where you downloaded the Palette VerteX Helm Charts provided by our support team. We recommend you place all the downloaded files within the same directory. You should have the following Helm Charts: - -
- - - Spectro Management Plane Helm Chart. - -
- - - Cert Manager Helm Chart. - - -2. Extract each Helm Chart into its directory. Use the commands below as a reference. Do this for all the provided Helm Charts. - -
- - ```shell - tar xzvf spectro-mgmt-plane-*.tgz - ``` - -
- - ```yaml - tar xzvf cert-manager-*.tgz - ``` - - -3. Install Cert Manager using the following command. Replace the actual file name of the Cert Manager Helm Chart with the one you downloaded, as the version number may be different. - -
- - ```shell - helm upgrade --values cert-manager/values.yaml cert-manager cert-manager-1.11.0.tgz --install - ``` - -
- - - - The Cert Manager Helm Chart provided by our support team is configured for Palette VerteX. Do not modify the **values.yaml** file unless instructed to do so by our support team. - - - - -4. Open the **values.yaml** in the **spectro-mgmt-plane** folder with a text editor of your choice. The **values.yaml** contains the default values for the Palette VerteX installation parameters. You must populate the following parameters in the YAML file before installing Palette VerteX. - -
- - | **Parameter** | **Description** | **Type** | - | --- | --- | --- | - | `env.rootDomain` | The URL name or IP address you will use for the Palette VerteX installation. | string | - | `ociPackRegistry` or `ociPackEcrRegistry` | The OCI registry credentials for Palette VerteX FIPS packs.| object | - | `scar` | The Spectro Cloud Artifact Repository (SCAR) credentials for Palette VerteX FIPS images. These credentials are provided by our support team. | object | - -
- - Save the **values.yaml** file after you have populated the required parameters listed in the table. - -
- - - - You can learn more about the parameters in the **values.yaml** file in the [Helm Configuration Reference](/vertex/install-palette-vertex/install-on-kubernetes/vertex-helm-ref) page. - - - - - -5. Install the Palette VerteX Helm Chart using the following command. - -
- - ```shell - helm upgrade --values spectro-mgmt-plane/values.yaml hubble spectro-mgmt-plane-0.0.0.tgz --install - ``` - - -6. Track the installation process using the command below. Palette VerteX is ready when the deployments in the namespaces `cp-system`, `hubble-system`, `ingress-nginx`, `jet-system` , and `ui-system` reach the *Ready* state. The installation takes between two to three minutes to complete. - -
- - ```shell - kubectl get pods --all-namespaces --watch - ``` - - -7. Create a DNS CNAME record that is mapped to the Palette VerteX `ingress-nginx-controller` load balancer. You can use the following command to retrieve the load balancer IP address. You may require the assistance of your network administrator to create the DNS record. - -
- - ```shell - kubectl get service ingress-nginx-controller --namespace ingress-nginx --output jsonpath='{.status.loadBalancer.ingress[0].hostname}' - ``` - -
- - - - As you create tenants in Palette VerteX, the tenant name is prefixed to the domain name you assigned to Palette VerteX. For example, if you create a tenant named `tenant1` and the domain name you assigned to Palette VerteX is `vertex.example.com`, the tenant URL will be `tenant1.vertex.example.com`. You can create an additional wildcard DNS record to map all tenant URLs to the Palette VerteX load balancer. - - - - -8. Use the custom domain name or the IP address of the load balancer to visit the Palette VerteX system console. To access the system console, open a web browser and paste the custom domain URL in the address bar and append the value `/system`. Replace the domain name in the URL with your custom domain name or the IP address of the load balancer. Alternatively, you can use the load balancer IP address with the appended value `/`system` to access the system console. - -
- - - - The first time you visit the Palette VerteX system console, a warning message about an untrusted SSL certificate may appear. This is expected, as you have not yet uploaded your SSL certificate to Palette VerteX. You can ignore this warning message and proceed. - - - -
- - ![A view of the Palette system console login screen.](/vertex_install-on-kubernetes_install_system-console.png) - - -9. Log in to the system console using the following default credentials. - -
- - | **Parameter** | **Value** | - | --- | --- | - | Username | `admin` | - | Password | `admin` | - -
- - After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette VerteX system console. - -
- -10. After login, a summary page is displayed. Palette VerteX is installed with a self-signed SSL certificate. To assign a different SSL certificate you must upload the SSL certificate, SSL certificate key, and SSL certificate authority files to Palette VerteX. You can upload the files using the Palette VerteX system console. Refer to the [Configure HTTPS Encryption](/vertex/system-management/ssl-certificate-management) page for instructions on how to upload the SSL certificate files to Palette VerteX. - -
- - - -If you are planning to deploy host clusters into different networks, you may require a reverse proxy. Check out the [Configure Reverse Proxy](/vertex/system-management/reverse-proxy) guide for instructions on configuring a reverse proxy for Palette VerteX. - - - - -You now have a self-hosted instance of Palette VerteX installed in a Kubernetes cluster. Make sure you retain the **values.yaml** file as you may need it for future upgrades. - - -# Validate - -Use the following steps to validate the Palette VerteX installation. - -
- - -1. Open up a web browser and navigate to the Palette VerteX system console. To access the system console, open a web browser and paste the following URL in the address bar and append the value `/system`. Replace the domain name in the URL with your custom domain name or the IP address of the load balancer. - - - -2. Log in using the credentials you received from our support team. After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette VerteX system console. - - -3. Open a terminal session and issue the following command to verify the Palette VerteX installation. The command should return a list of deployments in the `cp-system`, `hubble-system`, `ingress-nginx`, `jet-system` , and `ui-system` namespaces. - -
- - ```shell - kubectl get pods --all-namespaces --output custom-columns="NAMESPACE:metadata.namespace,NAME:metadata.name,STATUS:status.phase" \ - | grep -E '^(cp-system|hubble-system|ingress-nginx|jet-system|ui-system)\s' - ``` - - Your output should look similar to the following. - - ```shell hideClipboard - cp-system spectro-cp-ui-689984f88d-54wsw Running - hubble-system auth-85b748cbf4-6drkn Running - hubble-system auth-85b748cbf4-dwhw2 Running - hubble-system cloud-fb74b8558-lqjq5 Running - hubble-system cloud-fb74b8558-zkfp5 Running - hubble-system configserver-685fcc5b6d-t8f8h Running - hubble-system event-68568f54c7-jzx5t Running - hubble-system event-68568f54c7-w9rnh Running - hubble-system foreq-6b689f54fb-vxjts Running - hubble-system hashboard-897bc9884-pxpvn Running - hubble-system hashboard-897bc9884-rmn69 Running - hubble-system hutil-6d7c478c96-td8q4 Running - hubble-system hutil-6d7c478c96-zjhk4 Running - hubble-system mgmt-85dbf6bf9c-jbggc Running - hubble-system mongo-0 Running - hubble-system mongo-1 Running - hubble-system mongo-2 Running - hubble-system msgbroker-6c9b9fbf8b-mcsn5 Running - hubble-system oci-proxy-7789cf9bd8-qcjkl Running - hubble-system packsync-28205220-bmzcg Succeeded - hubble-system spectrocluster-6c57f5775d-dcm2q Running - hubble-system spectrocluster-6c57f5775d-gmdt2 Running - hubble-system spectrocluster-6c57f5775d-sxks5 Running - hubble-system system-686d77b947-8949z Running - hubble-system system-686d77b947-cgzx6 Running - hubble-system timeseries-7865bc9c56-5q87l Running - hubble-system timeseries-7865bc9c56-scncb Running - hubble-system timeseries-7865bc9c56-sxmgb Running - hubble-system user-5c9f6c6f4b-9dgqz Running - hubble-system user-5c9f6c6f4b-hxkj6 Running - ingress-nginx ingress-nginx-controller-2txsv Running - ingress-nginx ingress-nginx-controller-55pk2 Running - ingress-nginx ingress-nginx-controller-gmps9 Running - jet-system jet-6599b9856d-t9mr4 Running - ui-system spectro-ui-76ffdf67fb-rkgx8 Running - ``` - - -# Next Steps - -You have successfully installed Palette VerteX in a Kubernetes cluster. Your next steps are to configure Palette VerteX for your organization. Start by creating the first tenant to host your users. Use the [Create a Tenant](/vertex/system-management/tenant-management#createatenant) page for instructions on how to create a tenant. diff --git a/content/docs/12.5-vertex/02-install-palette-vertex/20-install-on-kubernetes/20-vertex-helm-ref.md b/content/docs/12.5-vertex/02-install-palette-vertex/20-install-on-kubernetes/20-vertex-helm-ref.md deleted file mode 100644 index 0a9a99773e..0000000000 --- a/content/docs/12.5-vertex/02-install-palette-vertex/20-install-on-kubernetes/20-vertex-helm-ref.md +++ /dev/null @@ -1,419 +0,0 @@ ---- -title: "Helm Configuration Reference" -metaTitle: "Helm Configuration Reference" -metaDescription: "Reference resource for the Palette VerteX Helm Chart installation parameters." -icon: "" -hideToC: false -fullWidth: false ---- - -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import Tabs from 'shared/components/ui/Tabs'; - -# Overview - -You can use the Palette VerteX Helm Chart to install Palette VerteX in a multi-node Kubernetes cluster in your production environment. The Helm chart allows you to customize values in the **values.yaml** file. This reference page lists and describes parameters available in the **values.yaml** file from the Helm Chart for your installation. - -To learn how to install Palette VerteX using the Helm Chart, refer to the Kubernetes [Instructions](/vertex/install-palette-vertex/install-on-kubernetes/install). - -## Required Parameters - -The following parameters are required for a successful installation of Palette VerteX. - - -| **Parameters** | **Description** | **Type** | -| --- | --- | --- | -| `config.env.rootDomain` | Used to configure the domain for the Palette installation. We recommend you create a CNAME DNS record that supports multiple subdomains. You can achieve this using a wild card prefix, `*.vertex.abc.com`. Review the [Environment parameters](/vertex/install-palette-vertex/install-on-kubernetes/vertex-helm-ref#environment) to learn more. | String | -| `config.env.ociPackRegistry` or `config.env.ociPackEcrRegistry`| Specifies the FIPS image registry for Palette VerteX. You can use an a self-hosted OCI registry or a public OCI registry we maintain and support. For more information, refer to the [Registry](#registries) section. | Object | -| `scar`| The Spectro Cloud Artifact Repository (SCAR) credentials for Palette VerteX FIPS images. Our support team provides these credentials. For more information, refer to the [Registry](#registries) section. | Object | - - - - -If you are installing an air-gapped version of Palette VerteX, you must provide the image swap configuration. For more information, refer to the [Image Swap Configuration](#imageswapconfiguration) section. - - - - - -## MongoDB - -Palette VerteX uses MongoDB Enterprise as its internal database and supports two modes of deployment:

- -- MongoDB Enterprise deployed and active inside the cluster. - - -- MongoDB Enterprise is hosted on a software-as-a-service (SaaS) platform, such as MongoDB Atlas. - -The table below lists the parameters used to configure a MongoDB deployment. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `internal` | Specifies the MongoDB deployment either in-cluster or using Mongo Atlas. | Boolean | `true` | -| `databaseUrl`| The URL for MongoDB Enterprise. If using a remote MongoDB Enterprise instance, provide the remote URL. This parameter must be updated if `mongo.internal` is set to `false`. | String | `mongo-0.mongo,mongo-1.mongo,mongo-2.mongo` | -| `databasePassword`| The base64-encoded MongoDB Enterprise password. If you don't provide a value, a random password will be auto-generated. | String | `""` | -| `replicas`| The number of MongoDB replicas to start. | Integer | `3` | -| `memoryLimit`| Specifies the memory limit for each MongoDB Enterprise replica.| String | `4Gi` | -| `cpuLimit` | Specifies the CPU limit for each MongoDB Enterprise member.| String | `2000m` | -| `pvcSize`| The storage settings for the MongoDB Enterprise database. Use increments of `5Gi` when specifying the storage size. The storage size applies to each replica instance. The total storage size for the cluster is `replicas` * `pvcSize`. | string | `20Gi`| -| `storageClass`| The storage class for the MongoDB Enterprise database. | String | `""` | - - -```yaml -mongo: - internal: true - databaseUrl: "mongo-0.mongo,mongo-1.mongo,mongo-2.mongo" - databasePassword: "" - replicas: 3 - cpuLimit: "2000m" - memoryLimit: "4Gi" - pvcSize: "20Gi" - storageClass: "" -``` - -## Config - -Review the following parameters to configure Palette VerteX for your environment. The `config` section contains the following subsections: - -### SSO - -You can configure Palette VerteX to use Single Sign-On (SSO) for user authentication. Configure the SSO parameters to enable SSO for Palette VerteX. You can also configure different SSO providers for each tenant post-install, check out the [SAML & SSO Setup](/user-management/saml-sso) documentation for additional guidance. - -To configure SSO, you must provide the following parameters. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | --- | -| `saml.enabled` | Specifies whether to enable SSO SAML configuration by setting it to true. | Boolean | `false` | -| `saml.acsUrlRoot` | The root URL of the Assertion Consumer Service (ACS).| String | `myfirstpalette.spectrocloud.com`| -| `saml.acsUrlScheme` | The URL scheme of the ACS: `http` or `https`. | String | `https` | -| `saml.audienceUrl` | The URL of the intended audience for the SAML response.| String| `https://www.spectrocloud.com` | -| `saml.entityID` | The Entity ID of the Service Provider.| String | `https://www.spectrocloud.com`| -| `saml.apiVersion` | Specify the SSO SAML API version to use.| String | `v1` | - -```yaml -config: - sso: - saml: - enabled: false - acsUrlRoot: "myfirstpalette.spectrocloud.com" - acsUrlScheme: "https" - audienceUrl: "https://www.spectrocloud.com" - entityId: "https://www.spectrocloud.com" - apiVersion: "v1" -``` - -### Email - -Palette VerteX uses email to send notifications to users. The email notification is used when inviting new users to the platform, password resets, and when [webhook alerts](/clusters/cluster-management/health-alerts#overview) are triggered. Use the following parameters to configure email settings for Palette VerteX. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `enabled` | Specifies whether to enable email configuration. | Boolean| `false`| -| `emailID ` | The email address for sending mail.| String| `noreply@spectrocloud.com` | -| `smtpServer` | Simple Mail Transfer Protocol (SMTP) server used for sending mail. | String | `smtp.gmail.com` | -| `smtpPort` | SMTP port used for sending mail.| Integer | `587` | -| `insecureSkipVerifyTLS` | Specifies whether to skip Transport Layer Security (TLS) verification for the SMTP connection.| Boolean | `true` | -| `fromEmailID` | Email address of the ***From*** address.| String | `noreply@spectrocloud.com` | -| `password` | The base64-encoded SMTP password when sending emails.| String | `""` | - -```yaml -config: - email: - enabled: false - emailId: "noreply@spectrocloud.com" - smtpServer: "smtp.gmail.com" - smtpPort: 587 - insecureSkipVerifyTls: true - fromEmailId: "noreply@spectrocloud.com" - password: "" -``` - -### Environment - -The following parameters are used to configure the environment. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `env.rootDomain` | Specifies the URL name assigned to Palette Vertex. The value assigned should have a Domain Name System (DNS) CNAME record mapped to exposed IP address or the load balancer URL of the service *ingress-nginx-controller*. Optionally, if `ingress.ingressStaticIP` is provided with a value you can use same assigned static IP address as the value to this parameter.| String| `""` | -| `env.installerMode` | Specifies the installer mode. Do not modify the value.| String| `self-hosted` | -| `env.installerCloud` | Specifies the cloud provider. Leave this parameter empty if you are installing a self-hosted Palette VerteX. | String | `""` | - -```yaml -config: - env: - rootDomain: "" -``` -
- - - -As you create tenants in Palette VerteX, the tenant name is prefixed to the domain name you assigned to Palette VerteX. For example, if you create a tenant named tenant1 and the domain name you assigned to Palette VerteX is `vertex.example.com`, the tenant URL will be `tenant1.vertex.example.com`. We recommend you create an additional wildcard DNS record to map all tenant URLs to the Palette VerteX load balancer. For example, `*.vertex.example.com`. - - - -### Cluster - -Use the following parameters to configure the Kubernetes cluster. - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `stableEndpointAccess` | Set to `true` if the Kubernetes cluster is deployed in a public endpoint. If the cluster is deployed in a private network through a stable private endpoint, set to `false`. | Boolean | `false` | - -```yaml -config: - cluster: - stableEndpointAccess: false -``` - -## Registries - -Palette VerteX requires credentials to access the required Palette VerteX images. You can configure different types of registries for Palette VerteX to download the required images. You must configure at least one Open Container Initiative (OCI) registry for Palette VerteX. You must also provide the credentials for the Spectro Cloud Artifact Repository (SCAR) to download the required FIPS images. - -
- -### OCI Registry - - -Palette VerteX requires access to an OCI registry that contains all the required FIPS packs. You can host your own OCI registry and configure Palette VerteX to reference the registry. Alternatively, you can use the public OCI registry provided by us, refer to the [`ociPackRegistry`](#ociecrregistry) section to learn more about the publicly available OCI registry. - - -
- - - -If you are using a self-hosted OCI registry, you must provide the required FIPS packs to the registry. Contact support for additional guidance on how to add the required FIPS packs to your OCI registry. - - - - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `ociPackRegistry.endpoint` | The endpoint URL for the registry. | String| `""` | -| `ociPackRegistry.name` | The name of the registry. | String| `""` | -| `ociPackRegistry.password` | The base64-encoded password for the registry. | String| `""` | -| `ociPackRegistry.username` | The username for the registry. | String| `""` | -| `ociPackRegistry.baseContentPath`| The base path for the registry. | String | `""` | -| `ociPackRegistry.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the registry connection. | Boolean | `false` | -| `ociPackRegistry.caCert` | The registry's base64-encoded certificate authority (CA) certificate. | String | `""` | - - -```yaml -config: - ociPackRegistry: - endpoint: "" - name: "" - password: "" - username: "" - baseContentPath: "" - insecureSkipVerify: false - caCert: "" -``` - -### OCI ECR Registry - -We expose a public OCI ECR registry that you can configure Palette VerteX to reference. If you want to host your own OCI registry, refer to the [OCI Registry](#oci-registry) section. -The OCI Elastic Container Registry (ECR) is hosted in an AWS ECR registry. Our support team provides the credentials for the OCI ECR registry. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `ociPackEcrRegistry.endpoint` | The endpoint URL for the registry. | String| `""` | -| `ociPackEcrRegistry.name` | The name of the registry. | String| `""` | -| `ociPackEcrRegistry.accessKey` | The base64-encoded access key for the registry. | String| `""` | -| `ociPackEcrRegistry.secretKey` | The base64-encoded secret key for the registry. | String| `""` | -| `ociPackEcrRegistry.baseContentPath`| The base path for the registry. | String | `""` | -| `ociPackEcrRegistry.isPrivate` | Specifies whether the registry is private. | Boolean | `true` | -| `ociPackEcrRegistry.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the registry connection. | Boolean | `false` | -| `ociPackEcrRegistry.caCert` | The registry's base64-encoded certificate authority (CA) certificate. | String | `""` | - -```yaml -config: - ociPackEcrRegistry: - endpoint: "" - name: "" - accessKey: "" - secretKey: "" - baseContentPath: "" - isPrivate: true - insecureSkipVerify: false - caCert: "" -``` - -### Spectro Cloud Artifact Repository (SCAR) - -SCAR credentials are required to download the necessary FIPS manifests. Our support team provides the SCAR credentials. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `scar.endpoint` | The endpoint URL of SCAR. | String| `""` | -| `scar.username` |The username for SCAR. | String| `""` | -| `scar.password` | The base64-encoded password for the SCAR. | String| `""` | -| `scar.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the SCAR connection. | Boolean | `false` | -| `scar.caCert` | The base64-encoded certificate authority (CA) certificate for SCAR. | String | `""` | - -
- - ```yaml - config: - scar: - endpoint: "" - username: "" - password: "" - insecureSkipVerify: false - caCert: "" - ``` - -### Image Swap Configuration - -You can configure Palette VerteX to use image swap to download the required images. This is an advanced configuration option, and it is only required for air-gapped deployments. You must also install the Palette VerteX Image Swap Helm chart to use this option, otherwise, Palette VerteX will ignore the configuration. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `imageSwapInitImage` | The image swap init image. | String | `gcr.io/spectro-images-public/thewebroot/imageswap-init:v1.5.2` | -| `imageSwapImage` | The image swap image. | String | `gcr.io/spectro-images-public/thewebroot/imageswap:v1.5.2` | -| `imageSwapConfig`| The image swap configuration for specific environments. | String | `""` | -| `imageSwapConfig.isEKSCluster` | Specifies whether the cluster is an Amazon EKS cluster. Set to `false` if the Kubernetes cluster is not an EKS cluster. | Boolean | `true` | - -
- - ```yaml - config: - imageSwapImages: - imageSwapInitImage: "gcr.io/spectro-images-public/thewebroot/imageswap-init:v1.5.2" - imageSwapImage: "gcr.io/spectro-images-public/thewebroot/imageswap:v1.5.2" - - imageSwapConfig: - isEKSCluster: true - ``` - -## gRPC - -gRPC is used for communication between Palette VerteX components. You can enable the deployment of an additional load balancer for gRPC. Host clusters deployed by Palette VerteX use the load balancer to communicate with the Palette VerteX control plane. This is an advanced configuration option, and it is not required for most deployments. Speak with your support representative before enabling this option. - -If you want to use an external gRPC endpoint, you must provide a domain name for the gRPC endpoint and a valid x509 certificate. Additionally, you must provide a custom domain name for the endpoint. A CNAME DNS record must point to the IP address of the gRPC load balancer. For example, if your Palette VerteX domain name is `vertex.example.com`, you could create a CNAME DNS record for `grpc.vertex.example.com` that points to the IP address of the load balancer dedicated to gRPC. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `external`| Specifies whether to use an external gRPC endpoint. | Boolean | `false` | -| `endpoint`| The gRPC endpoint. | String | `""` | -| `caCertificateBase64`| The base64-encoded certificate authority (CA) certificate for the gRPC endpoint. | String | `""` | -| `serverCrtBase64`| The base64-encoded server certificate for the gRPC endpoint. | String | `""` | -| `serverKeyBase64`| The base64-encoded server key for the gRPC endpoint. | String | `""` | -| `insecureSkipVerify`| Specifies whether to skip Transport Layer Security (TLS) verification for the gRPC endpoint. | Boolean | `false` | - - - - -```yaml -grpc: - external: false - endpoint: "" - caCertificateBase64: "" - serverCrtBase64: "" - serverKeyBase64: "" - insecureSkipVerify: false -``` - -## Ingress - -Palette VerteX deploys an Nginx Ingress Controller. This controller is used to route traffic to the Palette VerteX control plane. You can change the default behavior and omit the deployment of an Nginx Ingress Controller. - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `enabled`| Specifies whether to deploy an Nginx controller. Set to `false` if you do not want an Nginx controller deployed. | Boolean | `true` | -| `ingress.internal`| Specifies whether to deploy a load balancer or use the host network. | Boolean | `false` | -| `ingress.certificate`| Specify the base64-encoded x509 SSL certificate for the Nginx Ingress Controller. If left blank, the Nginx Ingress Controller will generate a self-signed certificate. | String | `""` | -| `ingress.key`| Specify the base64-encoded x509 SSL certificate key for the Nginx Ingress Controller. | String | `""` | -| `ingress.annotations`| A map of key-value pairs that specifies load balancer annotations for ingress. You can use annotations to change the behavior of the load balancer and the Nginx configuration. This is an advanced setting. We recommend you consult with your assigned support team representative prior to modification. | Object | `{}` | -| `ingress.ingressStaticIP`| Specify a static IP address for the ingress load balancer service. If empty, a dynamic IP address will be assigned to the load balancer. | String | `""` | -| `ingress.terminateHTTPSAtLoadBalancer`| Specifies whether to terminate HTTPS at the load balancer. | Boolean | `false` | - - -```yaml -ingress: - enabled: true - ingress: - internal: false - certificate: "" - key: "" - annotations: {} - ingressStaticIP: "" - terminateHTTPSAtLoadBalancer: false -``` - -## Spectro Proxy - -You can specify a reverse proxy server that clusters deployed through Palette VerteX can use to facilitate network connectivity to the cluster's Kubernetes API server. Host clusters deployed in private networks can use the [Spectro Proxy pack](/integrations/frp) to expose the cluster's Kubernetes API to downstream clients that are not in the same network. Check out the [Reverse Proxy](/vertex/system-management/reverse-proxy) documentation to learn more about setting up a reverse proxy server for Palette VerteX. - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `frps.enabled`| Specifies whether to enable the Spectro server-side proxy. | Boolean | `false` | -| `frps.frpHostURL`| The Spectro server-side proxy URL. | String | `""` | -| `frps.server.crt`| The base64-encoded server certificate for the Spectro server-side proxy. | String | `""` | -| `frps.server.key`| The base64-encoded server key for the Spectro server-side proxy. | String | `""` | -| `frps.ca.crt`| The base64-encoded certificate authority (CA) certificate for the Spectro server-side proxy. | String | `""` | - -```yaml -frps: - frps: - enabled: false - frpHostURL: "" - server: - crt: "" - key: "" - ca: - crt : "" -``` - -## UI System - -The table lists parameters to configure the Palette VerteX User Interface (UI) behavior. You can disable the UI or the Network Operations Center (NOC) UI. You can also specify the MapBox access token and style layer ID for the NOC UI. MapBox is a third-party service that provides mapping and location services. To learn more about MapBox and how to obtain an access token, refer to the [MapBox Access tokens](https://docs.mapbox.com/help/getting-started/access-tokens) guide. - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `enabled`| Specifies whether to enable the Palette VerteX UI. | Boolean | `true` | -| `ui.nocUI.enable`| Specifies whether to enable the Palette VerteX Network Operations Center (NOC) UI. Enabling this parameter requires the `ui.nocUI.mapBoxAccessToken`. Once enabled, all cluster locations will be reported to MapBox. This feature is not FIPS compliant. | Boolean | `false` | -| `ui.nocUI.mapBoxAccessToken`| The MapBox access token for the Palette VerteX NOC UI. | String | `""` | -| `ui.nocUI.mapBoxStyledLayerID`| The MapBox style layer ID for the Palette VerteX NOC UI. | String | `""` | - - - -```yaml -ui-system: - enabled: true - ui: - nocUI: - enable: false - mapBoxAccessToken: "" - mapBoxStyledLayerID: "" -``` - - - - -# Reach System - -You can configure Palette VerteX to use a proxy server to access the internet. Set the parameter `reach-system.reachSystem.enabled` to `true` to enable the proxy server. Proxy settings are configured in the `reach-system.reachSystem.proxySettings` section. - - -| **Parameters** | **Description** | **Type** | **Default value** | -| --- | --- | --- | --- | -| `reachSystem.enabled`| Specifies whether to enable the usage of a proxy server for Palette VerteX. | Boolean | `false` | -| `reachSystem.proxySettings.http_proxy`| The HTTP proxy server URL. | String | `""` | -| `reachSystem.proxySettings.https_proxy`| The HTTPS proxy server URL. | String | `""` | -| `reachSystem.proxySettings.no_proxy`| A list of hostnames or IP addresses that should not be proxied. | String | `""` | - - - ```yaml - reach-system: - reachSystem: - enabled: false - proxySettings: - http_proxy: "" - https_proxy: "" - no_proxy: - ``` diff --git a/content/docs/12.5-vertex/60-system-management.md b/content/docs/12.5-vertex/60-system-management.md deleted file mode 100644 index e564fc65ed..0000000000 --- a/content/docs/12.5-vertex/60-system-management.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "System Management" -metaTitle: "System Management" -metaDescription: "Manage your Palette VerteX system settings." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -## Overview - -Palette VerteX contains many system settings you can configure to meet your organization's needs. These settings are available at the system level and are applied to all [tenants](/glossary-all#tenant) in the system. - -You can access the system setting by visiting the IP address or the custom domain name assigned to your Palette VerteX cluster and appending the `/system` path to the URL. For example, if your Palette VerteX cluster is hosted at `https://vertex.abc.com`, you can access the system settings at `https://vertex.abc.com/system`. - - -
- -![View of the VerteX system console landing page.](/vertex_system-management_overview-system-console.png) - -
- - - -Exercise caution when changing system settings as the changes will be applied to all tenants in the system. - - - - -# Resources - -* [Enable non-FIPS Settings](/vertex/system-management/enable-non-fips-settings) - - -* [Tenant Management](/vertex/system-management/tenant-management) - - -* [SSL Certificate Management](/vertex/system-management/ssl-certificate-management) - -
- -
\ No newline at end of file diff --git a/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings.md b/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings.md deleted file mode 100644 index 50d702a228..0000000000 --- a/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: "Enable non-FIPS Settings" -metaTitle: "Enable non-FIPS Settings" -metaDescription: "Enable settings in Palette VerteX that allow you to use non-FIPS resources and perform non-FIPS compliant actions." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette VerteX is FIPS-enforced by default, incorporating the Spectro Cloud Cryptographic Module into the Kubernetes Management Platform and the infrastructure components of target clusters. To learn more about our cryptographic library, check out [FIPS 140-2 Certification](/compliance#fips140-2). - -If desired, you can allow the consumption of certain non-FIPS functionality in Palette VerteX at the tenant level. **Platform Settings** at the tenant level provide toggles to allow non-FIPS-compliant add-on packs and non-FIPS features such as scans, backup, and restore. You can also allow importing clusters created external to Palette. - - -# Resources - -- [Use non-FIPS Add-On Packs](/vertex/system-management/enable-non-fips-settings/use-non-fips-addon-packs) - - -- [Use non-FIPS Features](/vertex/system-management/enable-non-fips-settings/use-non-fips-features) - - -- [Allow Cluster Import](/vertex/system-management/enable-non-fips-settings/allow-cluster-import) - - -- [Spectro Cloud FIPS 140-2 Certification](/compliance#fips140-2) - -
- diff --git a/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings/03-use-non-fips-addon-packs.md b/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings/03-use-non-fips-addon-packs.md deleted file mode 100644 index cc4d7d52fc..0000000000 --- a/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings/03-use-non-fips-addon-packs.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: "Use non-FIPS Add-On Packs" -metaTitle: "Use non-FIPS Add-On Packs" -metaDescription: "Add non-FIPS add-on packs to VerteX cluster profiles." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -Palette VerteX provides the following FIPS-compliant infrastructure components in Kubernetes clusters it deploys. Review [FIPS-Compliant Components](/vertex/fips/fips-compliant-components) to learn more. - -
- - -- Operating System (OS) - -- Kubernetes - -- Container Network Interface (CNI) - -- Container Storage Interface (CSI) - -As shown in the screenshot below, the FIPS-compliant icon used to indicate full FIPS compliance is displayed next to Palette VerteX infrastructure components in the cluster profile stack. To learn about other icons Palette VerteX applies, refer to [FIPS Status Icons](/vertex/fips/fips-status-icons). - -![Diagram showing FIPS-compliant icons in profile stack.](/vertex_fips-status-icons_icons-in-profile-stack.png) - -You can allow tenant users to customize their cluster profiles by using add-on packs, which *may not* be FIPS compliant. Add-on packs enhance cluster functionality by adding profile layers such as system apps, authentication, security, monitoring, logging, and more. - - -# Prerequisites - -- You need tenant admin permission to enable this feature. - - -# Allow Non-FIPS Add-On Packs - - -1. Log in to [Palette VerteX](https://console.spectrocloud.com/) as a tenant admin. - - -2. Navigate to the left **Main Menu** and click on **Tenant Settings**. - - -3. On the **Tenant Settings Menu**, select **Platform Settings**. - - -4. Enable the **Allow non-FIPS add-on packs** option. When you enable this option, you are prompted to confirm the use of non-FIPS add-on packs for the tenant. - - -![Diagram showing the Allow non-FIPS add-on packs toggle enabled.](/vertex_use-non-fips-settings_nonFips-addon-packs.png) - - -To disable the setting, toggle this option off and confirm you want to disable it. - -When you or other users add a pack to a cluster profile. Palette VerteX will apply the appropriate icon next to packs and imported clusters to indicate their FIPS compliance status. - - -# Validate - - -1. Log in to [Palette VerteX](https://console.spectrocloud.com/). - - -2. Navigate to the left **Main Menu** and select **Profiles**. When you select a profile, the **Add New Pack** option is available. - - - -3. Navigate back to the **Main Menu** and re-select **Profiles**. - - -4. Click the **Add Cluster Profile** button. The **Add-on** option is available in the wizard. - - -Palette VerteX will display the appropriate FIPS status icon next to the pack layer and in the profile stack. - - -# Resources - -- [Packs List](/integrations) - - -- [Create an Add-on Profile](/cluster-profiles/create-add-on-profile) - - -- [FIPS Status Icons](/vertex/fips/fips-status-icons) - - - diff --git a/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings/06-use-non-fips-features.md b/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings/06-use-non-fips-features.md deleted file mode 100644 index e8724e3b8f..0000000000 --- a/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings/06-use-non-fips-features.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "Use non-FIPS Features" -metaTitle: "Use non-FIPS Features" -metaDescription: "Use non-FIPS features such as backup, restore, and scans." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -You can allow tenant users access to Palette features that are *not* FIPS-compliant, such as tenant cluster backup and restore or various scanning capabilities for compliance, security, validation, and software bill of materials (SBOM). Prior to enabling non-FIPS features, the **Scan** and **Backups** tabs are not displayed on the **Cluster Overview** page. - -# Prerequisites - -- You need tenant admin permission to enable this feature. - - -- Palette can back up clusters to several locations. To learn about backup requirements, review [Backup-Restore](/clusters/cluster-management/backup-restore). - - -- There are no prerequisites for restoring clusters or performing scans. - - -# Allow non-FIPS Features - - -1. Log in to [Palette VerteX](https://console.spectrocloud.com/) as a tenant admin. - - -2. Navigate to the left **Main Menu** and click on **Tenant Settings**. - - -3. On the **Tenant Settings Menu**, select **Platform Settings**. - - -4. Enable the **Allow non-FIPS features** option. When you enable this option, you are prompted to confirm the use of non-FIPS features for the tenant. - -![Diagram showing the Allow non-FIPS features toggle enabled.](/vertex_use-non-fips-settings_nonFips-features.png) - - -To disable the setting, toggle this option off and confirm you want to disable it. - -# Validate - - -1. Log in to [Palette VerteX](https://console.spectrocloud.com/). - - -2. Navigate to the left **Main Me[Title](http://localhost:9000/vertex/system-management/enable-non-fips-settings/use-non-fips-features)nu** and click on **Clusters**. - - -3. Select your cluster in the list. The **Scan** and **Backups** tabs are now displayed on the **Cluster Overview** page. - - -# Resources - -- [Cluster Backup and Restore](/clusters/cluster-management/backup-restore) - - -- [Scans](/clusters/cluster-management/compliance-scan) - diff --git a/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings/09-allow-cluster-import.md b/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings/09-allow-cluster-import.md deleted file mode 100644 index 94daa1b6a9..0000000000 --- a/content/docs/12.5-vertex/60-system-management/05-enable-non-fips-settings/09-allow-cluster-import.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: "Allow Cluster Import" -metaTitle: "Allow Cluster Import" -metaDescription: "Learn how to import clusters to Palette VerteX." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Overview - -You can allow tenant users to import Kubernetes clusters that were not deployed through Palette, including some that *are not* FIPS compliant or are only *partially* compliant. Prior to enabling cluster import, the **Import Cluster** option is not available. - -Palette VerteX displays icons next to clusters to indicate their FIPS compliance status or when FIPS compliance cannot be confirmed. To learn about icons that Palette VerteX applies, refer to [FIPS Status Icons](/vertex/fips/fips-status-icons). - - - -# Prerequisites - -- You need tenant admin permission to enable this feature. - - -- Refer to [Cluster Import Prerequisites](/clusters/imported-clusters/cluster-import#prerequisites). - - -# Allow non-FIPS Cluster Import - - -1. Log in to [Palette VerteX](https://console.spectrocloud.com/) as a tenant admin. - - -2. Navigate to the left **Main Menu** and click on **Tenant Settings**. Next, on the **Tenant Settings Menu**, select **Platform Settings**. - - -3. Enable the **Allow non-FIPS cluster import** option. When you enable this option, you are prompted to confirm importing clusters into the tenant that may not be FIPS-compliant. - -![Diagram showing the Allow non-FIPS cluster import toggle enabled.](/vertex_use-non-fips-settings_nonFips-cluster-import.png) - -To disable the setting, toggle this option off and confirm you want to disable it. - -Refer to [Import a Cluster](/clusters/imported-clusters/cluster-import) for guidance. Check out [Import Modes](/clusters/imported-clusters#importmodes) to learn about various import modes and limitations to be aware of. - - -# Validate - -1. Log in to [Palette VerteX](https://console.spectrocloud.com/). - - -2. Navigate to the left **Main Menu** and select **Clusters**. - - -3. Click on **Add New Cluster**. The **Import Cluster** option is now displayed on the **Create a New Cluster** page. - - -# Resources - -- [Import a Cluster](/clusters/imported-clusters/cluster-import) - - -- [Import Modes](/clusters/imported-clusters#importmodes) - - -- [Cluster Import Limitations](/clusters/imported-clusters#limitations) - - diff --git a/content/docs/12.5-vertex/60-system-management/10-tenant-management.md b/content/docs/12.5-vertex/60-system-management/10-tenant-management.md deleted file mode 100644 index b9c559ac9a..0000000000 --- a/content/docs/12.5-vertex/60-system-management/10-tenant-management.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: "Tenant Management" -metaTitle: "Tenant Management" -metaDescription: "Learn how to create and remove tenants in Palette VerteX." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -Tenants are isolated environments in Palette VerteX that contain their own clusters, users, and resources. You can create multiple tenants in Palette VerteX to support multiple teams or projects. Instructions for creating and removing tenants are provided below. - - -
- -# Create a Tenant - -You can create a tenant in Palette VerteX by following these steps. - - -## Prerequisites - -* Access to the Palette VerteX system console. - - -## Enablement - -1. Log in to the Palette VerteX system console. - - -2. Navigate to the left **Main Menu** and select **Tenant Management**. - - -3. Click **Create New Tenant**. - - -4. Fill out the **Org Name** and the properties of the admin user by providing the **First Name**, **Last Name**, and **Email**. - - -5. Confirm your changes. - - -6. From the tenant list view, find your newly created tenant and click on the **three dots Menu**. Select **Activate** to activate the tenant. - -
- - ![View of a tenant activation option](/vertex_system-management_tenant-management_activate-tenant.png) - -
- -7. A pop-up box will present you with an activation URL. Copy the URL and paste it into your browser to activate the tenant. - - -8. Provide the admin user with a new password. - - -9. Log in to the tenant console using the admin user credentials. - - -## Validate - -1. Log in to Palette VerteX. - - -2. Verify you can access the tenant as the admin user. - - - -# Remove a Tenant - -You can remove a tenant in Palette VerteX using the following steps. - -## Prerequisites - -* Access to the Palette VerteX system console. - -## Removal - -1. Log in to the Palette VerteX system console. - - -2. Navigate to the left **Main Menu** and select **Tenant Management**. - - -3. From the tenant list view, select the tenant you want to remove and click on the **three dots Menu**. - - -4. Select **Delete** to prepare the tenant for removal. - - -5. Click on your tenant's **three dots Menu** and select **Clean up** to remove all the tenant's resources. - -
- - ![View of a tenant deletion option](/vertex_system-management_tenant-management_remove-tenant.png) - - -
- - - - If you do not clean up the tenant's resources, such as clusters and Private Cloud Gateways (PCGs), the tenant will remain in a **Deleting** state. You can use **Force Cleanup & Delete** to proceed with deletion without manually cleaning up tenant resources. - - - - -After the cleanup process completes, the tenant will be removed from the tenant list view. - -## Validate - - -1. Log in to the Palette VerteX system console. - - -2. Navigate to the left **Main Menu** and select **Tenant Management**. - - -3. Validate that the tenant was removed by checking the tenant list view. \ No newline at end of file diff --git a/content/docs/12.5-vertex/60-system-management/30-ssl-certificate-management.md b/content/docs/12.5-vertex/60-system-management/30-ssl-certificate-management.md deleted file mode 100644 index 35d7d8fc45..0000000000 --- a/content/docs/12.5-vertex/60-system-management/30-ssl-certificate-management.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: "SSL Certificate Management" -metaTitle: "SSL Certificate" -metaDescription: "Upload and manage SSL certificates in Palette VerteX." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -When you install Palette VerteX, a self-signed certificate is generated and used by default. You can upload your own SSL certificate to replace the default certificate. - -Palette VerteX uses SSL certificates to secure external communication. Palette VerteX's internal communication is default secured by default and uses HTTPS. External communication with Palette VerteX, such as the system console, gRPC endpoint, and API endpoint, requires you to upload an SSL certificate to enable HTTPS. - -
- - - -Enabling HTTPS is a non-disruptive operation. You can enable HTTPS at any time without affecting the system's functionality. - - - - -# Upload an SSL Certificate - -You can upload an SSL certificate in Palette VerteX by using the following steps. - - -## Prerequisites - -- Access to the Palette VerteX system console. - - -- You need to have an x509 certificate and a key file in PEM format. The certificate file must contain the full certificate chain. Reach out to your network administrator or security team if you do not have these files. - - -- Ensure the certificate is created for the custom domain name you specified for your Palette VerteX installation. If you did not specify a custom domain name, the certificate must be created for the Palette VerteX system console's IP address. You can also specify a load balancer's IP address if you are using a load balancer to access Palette VerteX. - - -## Enablement - -1. Log in to the Palette VerteX system console. - - -2. Navigate to the left **Main Menu** and select **Administration**. - - -3. Select the tab titled **Certificates**. - - -4. Copy and paste the certificate into the **Certificate** field. - - -5. Copy and paste the certificate key into the **Key** field. - - -6. Copy and paste the certificate authority into the **Certificate authority** field. - - -
- - ![A view of the certificate upload screen](/vertex_system-management_ssl-certifiacte-management_certificate-upload.png) - -
- -7. Save your changes. - -If the certificate is invalid, you will receive an error message. Once the certificate is uploaded successfully, Palette VerteX will refresh its listening ports and start using the new certificate. - - -## Validate - -You can validate that your certificate is uploaded correctly by using the following steps. - -
- - -1. Log out of the Palette VerteX system console. If you are already logged in, log out and close your browser session. Browsers cache connections and may not use the newly enabled HTTPS connection. Closing your existing browser session avoids issues related to your browser caching an HTTP connection. - - -2. Log back into the Palette VerteX system console. Ensure the connection is secure by checking the URL. The URL should start with `https://`. - - -Palette VerteX is now using your uploaded certificate to create a secure HTTPS connection with external clients. Users can now securely access the system console, gRPC endpoint, and API endpoint. \ No newline at end of file diff --git a/content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md b/content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md deleted file mode 100644 index 952bf18e23..0000000000 --- a/content/docs/12.5-vertex/60-system-management/50-reverse-proxy.md +++ /dev/null @@ -1,258 +0,0 @@ ---- -title: "Configure Reverse Proxy" -metaTitle: "Configure Reverse Proxy" -metaDescription: "Learn how to configure a reverse proxy for Palette VerteX." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Overview - -You can configure a reverse proxy for Palette VerteX. The reverse proxy can be used by host clusters deployed in a private network. Host clusters deployed in a private network are not accessible from the public internet or by users in different networks. You can use a reverse proxy to access the cluster's Kubernetes API server from a different network. - -When you configure reverse proxy server for Palette VerteX, clusters that use the [Spectro Proxy pack](/integrations/frp) will use the reverse proxy server address in the kubeconfig file. Clusters not using the Spectro Proxy pack will use the default cluster address in the kubeconfig file. - - -Use the following steps to configure a reverse proxy server for Palette VerteX. - -# Prerequisites - - -- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed and available. - - -- [Helm](https://helm.sh/docs/intro/install/) is installed and available. - - -- Access to the kubeconfig file of the Palette VerteX Kubernetes cluster. You can download the kubeconfig file from the Palette VerteX system console. Navigate to **Enterprise System Migration**, select the Palette VerteX cluster, and click the **Download Kubeconfig** button for the cluster. - - -- A domain name that you can use for the reverse proxy server. You will also need access to the DNS records for the domain so that you can create a CNAME DNS record for the reverse proxy server load balancer. - - -- Ensure you have an SSL certificate that matches the domain name you will assign to Spectro Proxy. You will need this to enable HTTPS encryption for the Spectro Proxy. Contact your network administrator or security team to obtain the SSL certificate. You need the following files: - - x509 SSL certificate file in base64 format. - - - x509 SSL certificate key file in base64 format. - - - x509 SSL certificate authority file in base64 format. - - -- The Spectro Proxy server must have internet access and network connectivity to the private network where the Kubernetes clusters are deployed. - - -# Enablement - -1. Open a terminal session and navigate to the directory where you stored the **values.yaml** for the Palette VerteX installation. - - -2. Use a text editor and open the **values.yaml** file. Locate the `frps` section and update the following values in the **values.yaml** file. Refer to the [Spectro Proxy Helm Configuration](/enterprise-version/helm-chart-install-reference/#spectroproxy) to learn more about the configuration options. - -
- - | **Parameter** | **Description** | **Type** | - | --- | --- | ---| - | `enabled`| Set to `true` to enable the Spectro Proxy server. | boolean | - | `frps.frpHostURL`| The domain name you will use for the Spectro Proxy server. For example, `frps.example.com`. | - | `server.crt`| The x509 SSL certificate file in base64 format. | - | `server.key`| The x509 SSL certificate key file in base64 format. | - | `ca.crt`| The x509 SSL certificate authority file in base64 format. | - -
- - The following is an example of the `frps` section in the **values.yaml** file. The SSL certificate files are truncated for brevity. - -
- - ```yaml - frps: - frps: - enabled: true - frpHostURL: "frps.vertex.example.com" - server: - crt: "LS0tLS1CRU...........tCg==" - key: "LS0tLS1CRU...........tCg==" - ca: - crt : "LS0tLS1CRU...........tCg==" - ``` - - -3. Issue the `helm upgrade` command to update the Palette VerteX Kubernetes configuration. The command below assumes you in the folder that contains the **values.yaml** file and the Palette VerteX Helm chart. Change the directory path if needed. - -
- - ```bash - helm upgrade --values values.yaml hubble spectro-mgmt-plane-0.0.0.tgz --install - ``` - - -4. After the new configurations are accepted, use the following command to get the Spectro Proxy server's load balancer IP address. - -
- - ```bash - kubectl get svc --namespace proxy-system spectro-proxy-svc - ``` -5. Update the DNS records for the domain name you used for the Spectro Proxy server. Create a CNAME record that points to the Spectro Proxy server's load balancer IP address. - - -6. Log in to the Palette VerteX System API by using the `/v1/auth/syslogin` endpoint. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette VerteX or use the IP address. Ensure you replace the credentials below with your system console credentials. - -
- - ```bash - curl --insecure --location 'https://vertex.example.com/v1/auth/syslogin' \ - --header 'Content-Type: application/json' \ - --data '{ - "password": "**********", - "username": "**********" - }' - ``` - Output - ```json hideClipboard - { - "Authorization": "**********.", - "IsPasswordReset": true - } - ``` - -7. Using the output you received, copy the authorization value to your clipboard and assign it to a shell variable. Replace the authorization value below with the value from the output. - -
- - ```shell hideClipboard - TOKEN=********** - ``` - -8. Next, prepare a payload for the`/v1/system/config/` endpoint. This endpoint is used to configure Palette VerteX to use a reverse proxy. The payload requires the following parameters: - -
- - | **Parameter** | **Description** | **Type** | - | --- | --- | --- | - | `caCert`| The x509 SSL certificate authority file in base64 format. | string | - | `clientCert`| The x509 SSL certificate file in base64 format. | string | - | `clientKey`| The x509 SSL certificate key file in base64 format. | string | - | `port` | The port number for the reverse proxy server. We recommend using port `443`. | integer | - | `protocol` | The protocol to use for the reverse proxy server. We recommend using `https`. | string | - | `server`| The domain name you will use for the Spectro Proxy server. For example, `frps.example.com`. Don't include the HTTP schema in the value. | string | - - The following is an example payload. The SSL certificate files are truncated for brevity. - -
- - ```json hideClipboard - { - "caCert": "-----BEGIN CERTIFICATE-----\n.............\n-----END CERTIFICATE-----", - "clientCert": "-----BEGIN CERTIFICATE-----\n..........\n-----END CERTIFICATE-----", - "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n........\n-----END RSA PRIVATE KEY-----", - "port": 443, - "protocol": "https", - "server": "frps.vertex.example.com.com" - } - ``` - - - - You can save the payload to a file and use the `cat` command to read the file contents into the `curl` command. For example, if you save the payload to a file named `payload.json`, you can use the following command to read the file contents into the `curl` command. You can also save the payload as a shell variable and use the variable in the `curl` command. - - - - -
- -9. Issue a PUT request using the following `curl` command. Replace the URL with the custom domain URL you assigned to Palette VerteX or use the IP address. You can use the `TOKEN` variable you created earlier for the authorization header. Ensure you replace the payload below with the payload you created in the previous step. - -
- - ```bash - curl --insecure --silent --include --output /dev/null -w "%{http_code}" --location --request PUT 'https://vertex.example.com/v1/system/config/reverseproxy' \ - --header "Authorization: $TOKEN" \ - --header 'Content-Type: application/json' \ - --data ' { - "caCert": "-----BEGIN CERTIFICATE-----\n................\n-----END CERTIFICATE-----\n", - "clientCert": "-----BEGIN CERTIFICATE-----\n.............\n-----END CERTIFICATE-----", - "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n............\n-----END RSA PRIVATE KEY-----\n", - "port": 443, - "protocol": "https", - "server": "frps.vertex.example.com.com" - }' - ``` - - A successful response returns a `204` status code. - - Output - ```shell hideClipboard - 204 - ``` - -You now have a Spectro Proxy server that you can use to access Palette VerteX clusters deployed in a different network. Make sure you add the [Spectro Proxy pack](/integrations/frp) to the clusters you want to access using the Spectro Proxy server. - - -# Validate - -Use the following command to validate that the Spectro Proxy server is active. - -
- - - -1. Open a terminal session. - - -2. Log in to the Palette VerteX System API by using the `/v1/auth/syslogin` endpoint. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette VerteX or use the IP address. Ensure you replace the credentials below with your system console credentials. - -
- - ```bash - curl --insecure --location 'https://vertex.example.com/v1/auth/syslogin' \ - --header 'Content-Type: application/json' \ - --data '{ - "password": "**********", - "username": "**********" - }' - ``` - Output - ```json hideClipboard - { - "Authorization": "**********.", - "IsPasswordReset": true - } - ``` - -3. Using the output you received, copy the authorization value to your clipboard and assign it to a shell variable. Replace the authorization value below with the value from the output. - -
- - ```shell hideClipboard - TOKEN=********** - ``` - -4. Query the system API endpoint `/v1/system/config/reverseproxy` to verify the current reverse proxy settings applied to Palette VerteX. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette VerteX or use the IP address. You can use the `TOKEN` variable you created earlier for the authorization header. - -
- - ```bash - curl --location --request GET 'https://vertex.example.com/v1/system/config/reverseproxy' \ - --header "Authorization: $TOKEN" - ``` - - If the proxy server is configured correctly, you will receive an output similar to the following containing your settings. The SSL certificate outputs are truncated for brevity. - -
- - ```json hideClipboard - { - "caCert": "-----BEGIN CERTIFICATE-----\n...............\n-----END CERTIFICATE-----\n", - "clientCert": "-----BEGIN CERTIFICATE-----\n...........\n-----END CERTIFICATE-----", - "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n........\n-----END RSA PRIVATE KEY-----\n", - "port": 443, - "protocol": "https", - "server": "frps.vertex.example.com" - } - ``` \ No newline at end of file diff --git a/content/docs/13-terraform.md b/content/docs/13-terraform.md deleted file mode 100644 index 83067743bd..0000000000 --- a/content/docs/13-terraform.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: "Palette Terraform Support" -metaTitle: "Palette Terraform Support" -metaDescription: "Understanding, installing and operating Spectro Cloud's Terraform Provider." -icon: "terraform" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Overview - -Palette supports the open-source Infrastructure as Code (IaC) software tool, [Terraform](https://www.terraform.io/), to provide consistent CLI workflow support to multiple cloud services. - -Terraform organizes cloud APIs into declarative, configuration files. Terraform supports the ability to write configuration files, checks whether the execution plan for a configuration matches your expectations (before deployment), and applies the changes to all the managed resources. - -# Spectro Cloud Provider - -Spectro Cloud Palette's SaaS and On-Premise management API can be used with the Spectro Cloud Terraform provider. The provider is available in the HashiCorp Terraform registry as [Spectro Cloud Provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs). -
- -### Release Notes -Information about the latest changes in the Spectro Cloud provider can be found in the [release notes](https://github.com/spectrocloud/terraform-provider-spectrocloud/releases). -
- -### Provider Documentation -Detailed documentation on supported data sources and resources are available in the Terraform Spectro Cloud Provider [documentation](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) page. -
- -## Prerequisites -The Spectro Cloud provider has the following requirements: -* Spectro Cloud Palette account - [Sign up for a free trial account](https://www.spectrocloud.com/free-trial) -* Terraform (minimum version 0.13+) -* Kubernetes/kubectl CLI (minimum version 1.16+) -
- -## Usage - -For an end-to-end cluster provisioning example, check out the [end-to-end examples](https://github.com/spectrocloud/terraform-provider-spectrocloud/tree/main/examples/e2e). - -You can find resource examples in the [resource directory](https://registry.terraform.io/providers/spectrocloud/spectrocloud). - - -## Modules - -There are two modules available to help you provision Spectro Cloud infrastructure resources. - -- [Palette Edge Native Terraform Module](https://registry.terraform.io/modules/spectrocloud/edge/spectrocloud/latest) -- [Spectro Cloud Terraform Modules](https://registry.terraform.io/modules/spectrocloud/modules/spectrocloud/latest) - -Review the [Spectro Cloud modules readme](https://github.com/spectrocloud/terraform-spectrocloud-modules#module-resources--requirements) document to learn more about supported provider versions and other requirements. diff --git a/content/docs/14-troubleshooting.md b/content/docs/14-troubleshooting.md deleted file mode 100644 index b4fb8b7df3..0000000000 --- a/content/docs/14-troubleshooting.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: "Troubleshooting" -metaTitle: "Common issues and their solutions" -metaDescription: "Common issues and their solutions in the deployment of Spectro Cloud Clusters" -icon: "tools" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Troubleshooting - -Use the following troubleshooting resources to help you address issues that may arise. You can also reach out to our support team by opening up a ticket through our [support page](http://support.spectrocloud.io/). - -
- -- [Kubernetes Debugging](/troubleshooting/kubernetes-tips) - - -- [Cluster Deployment](/troubleshooting/cluster-deployment) - - -- [Nodes & Clusters](/troubleshooting/nodes) - - -- [Packs](/troubleshooting/pack-issues) - - -- [Palette Dev Engine](/troubleshooting/palette-dev-engine) - - -- [Edge](/troubleshooting/edge) - - -- [Private Cloud Gateway](/troubleshooting/pcg) - - -- [Palette Upgrade](/troubleshooting/palette-upgrade) - - - -## Download Cluster Logs -At times it might be required to work with the Spectro Cloud support team to troubleshoot an issue. Spectro Cloud provides the ability to aggregate logs from the clusters it manages. Problems that occur during the orchestration life cycle may require access to the various containers, nodes, and Kube system logs. Spectro Cloud automates this log collection process and provides an easy download option from the Spectro Cloud UI console. Hence reduces the burden on the operator to login into various cluster nodes individually and fetch these logs. - -Follow the link for more details: [Download Cluster Logs](/clusters/#downloadclusterlogs) - -## Event Stream - -Spectro Cloud maintains an event stream with low-level details of the various orchestration tasks being performed. This event stream is a good source for identifying issues in the event an operation does not complete for a long time. - -
- - - - Due to Spectro Cloud’s reconciliation logic, intermittent errors show up in the event stream. As an example, after launching a node, errors might show up in the event stream regarding being unable to reach the node. However, the errors clear up once the node comes up.

- Error messages that persist over a long time or errors indicating issues with underlying infrastructure are an indication of a real problem. - -
- - -## Lifecycle Behaviors - -Typically when a cluster life cycle action such as provisioning, upgrade, or deletion runs into a failure, it does not result in an outright error on the cluster. The Spectro Cloud orchestration engine follows the reconciliation pattern wherein the system repeatedly tries to perform various orchestration tasks to bring the cluster to its desired state until it succeeds. Initial cluster provisioning or subsequent updates can run into a variety of issues related to cloud infrastructure availability, lack of resources, networking issues, etc. - -## Cluster conditions - -Spectro Cloud maintains specific milestones in a life cycle and presents them as “conditions”. Examples include: Creating Infrastructure, Adding Control Plane Node, Customizing Image, etc. The active condition indicates what task Spectro Cloud’s orchestration system is trying to perform. If a task results in failures, the condition is marked as failed, with relevant error messages. Reconciliation however continues behind the scenes and continuous attempts are made to perform the task. Failed conditions are a great source of troubleshooting provisioning issues. - -For example, failure to create a virtual machine in AWS due to the vCPU limit being exceeded would cause this error is shown to the end-users. They could choose to bring down some workloads in the AWS cloud to free up space. The next time a VM creation task is attempted, it would succeed and the condition would be marked as a success. - -
\ No newline at end of file diff --git a/content/docs/14-troubleshooting/02-kubernetes-tips.md b/content/docs/14-troubleshooting/02-kubernetes-tips.md deleted file mode 100644 index ea831f3a4e..0000000000 --- a/content/docs/14-troubleshooting/02-kubernetes-tips.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "Kubernetes Debugging" -metaTitle: "Kubernetes Debugging Tips" -metaDescription: "Learn tips and tricks related to Kubernetes dubbging." -icon: "" -hideToC: false -fullWidth: false ---- -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Kubernetes Debug - -Spectro Cloud provisions standard, upstream Kubernetes clusters using `kubeadm` and `cluster-api`. The official Kubernetes documentation related to support and troubleshooting are great troubleshooting resources that you should also consider reviewing. The official Kubernetes [debugging guide](https://kubernetes.io/docs/tasks/debug-application-cluster/debug-cluster) is about cluster troubleshooting and offers excellent advice on how to resolve common issues that may arise. - - -## Log Tips - -The table below displays useful Kubernetes log types that can aid you in the debugging process. The [Kubernetes Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) page is a good resource you can review to help gain a better understanding of the logging architecture. - -| **Log Type** | **Access Method** | -|----------|---------------| -|Kubelet |`journalctl -u kubelet`| -|Container | `kubectl logs` OR `/var/log/containers` and `/var/log/pods` | -| Previous Container| `kubectl logs --previous` \ No newline at end of file diff --git a/content/docs/14-troubleshooting/04-cluster-deployment.md b/content/docs/14-troubleshooting/04-cluster-deployment.md deleted file mode 100644 index 19c36852c4..0000000000 --- a/content/docs/14-troubleshooting/04-cluster-deployment.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: "Cluster Deployment" -metaTitle: "Troubleshooting steps for errors during a cluster deployment" -metaDescription: "Troubleshooting steps for errors during a cluster deployment." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Cluster Deployment Errors Scenarios - -The following steps will help you troubleshoot errors in the event issues arise while deploying a cluster. - - -## Scenario - Instances Continuously Delete Every 30 Minutes - -An instance is launched and terminated every 30 minutes prior to completion of its deployment, and the **Events Tab** lists errors with the following message: - -
- -```bash hideClipboard -Failed to update kubeadmControlPlane Connection timeout connecting to Kubernetes Endpoint -``` - -This behavior can occur when Kubernetes services for the launched instance fail to start properly. -Common reasons for why a service may fail are: - -- The specified image could not be pulled from the image repository. -- The cloud init process failed. - -### Debug Steps - -1. Initiate an SSH session with the Kubernetes instance using the SSH key provided during provisioning, and log in as user `spectro`. If you are initiating an SSH session into an installer instance, log in as user `ubuntu`. - - ```shell - ssh --identity_file <_pathToYourSSHkey_> spectro@X.X.X.X - ``` - -2. Elevate the user access. - - ```shell - sudo -i - ``` -3. Verify the Kubelet service is operational. - ```shell - systemctl status kubelet.service - ``` - -4. If the Kubelet service does not work as expected, do the following. If the service operates correctly, you can skip this step. - 1. Navigate to the **/var/log/** folder. - ```shell - cd /var/log/ - ``` - 2. Scan the **cloud-init-output** file for any errors. Take note of any errors and address them. - ``` - cat cloud-init-output.log - ``` - -5. If the kubelet service works as expected, do the following. - - Export the kubeconfig file. - - ```shell - export KUBECONFIG=/etc/kubernetes/admin.conf - ``` - - Connect with the cluster's Kubernetes API. - - ```shell - kubectl get pods --all-namespaces - ``` - - When the connection is established, verify the pods are in a *Running* state. Take note of any pods that are not in *Running* state. - - ```shell - kubectl get pods -o wide - ``` - - - If all the pods are operating correctly, verify their connection with the Palette API. - - For clusters using Gateway, verify the connection between the Installer and Gateway instance: - ```shell - curl -k https://:6443 - ``` - - For Public Clouds that do not use Gateway, verify the connection between the public Internet and the Kube endpoint: - ```shell - curl -k https://:6443 - ``` - - - You can obtain the URL for the Kubernetes API using this command: kubectl cluster-info - - -6. Check stdout for errors. You can also open a support ticket. Visit our [support page](http://support.spectrocloud.io/). - - -## Gateway Installer Registration Failures - -There are a couple reasons the Gateway Installer might fail: - -- A bootstrap error might have occurred. When the Gateway Installer VM is powered on, it initiates a bootstrap process and registers itself with the tenant portal. This process typically takes 5 to 10 minutes. If the installer fails to register with the tenant portal during this time, it indicates a bootstrapping error. - - To address the issue, SSH into the Installer virtual machine using the key provided during OVA import and inspect the log file located at *'/var/log/cloud-init-output.log'*. - - The log file contains error messages about any failures that occur while connecting to the Spectro Cloud management platform portal, authenticating, or downloading installation artifacts. - - A common cause for these errors is that the Spectro Cloud management platform console endpoint or the pairing code is typed incorrectly. Ensure that the tenant portal console endpoint does not have a trailing slash. If these properties were incorrectly specified, power down and delete the installer VM and re-launch with the correct values. - - -- The VM may not have an outbound connection. The Gateway Installer VM requires outbound connectivity directly or using a proxy. Adjust proxy settings, if applicable, to fix the connectivity or power down and delete the Installer VM, then relaunch it in a network that enables outbound connections. - -If these steps do not resolve the Gateway Installer issues, copy the following script to the Installer VM to generate a logs archive. Open a support ticket by visiting our [support page](http://support.spectrocloud.io/). Attach the logs archive so the Spectro Cloud Support team can troubleshoot the issue and provide further guidance: - -```bash -#!/bin/bash - -DESTDIR="/tmp/" - -CONTAINER_LOGS_DIR="/var/log/containers/" -CLOUD_INIT_OUTPUT_LOG="/var/log/cloud-init-output.log" -CLOUD_INIT_LOG="/var/log/cloud-init.log" -KERN_LOG="/var/log/kern.log" -KUBELET_LOG="/tmp/kubelet.log" -SYSLOGS="/var/log/syslog*" - -FILENAME=spectro-logs-$(date +%-Y%-m%-d)-$(date +%-HH%-MM%-SS).tgz - - -journalctl -u kubelet > $KUBELET_LOG - -tar --create --gzip -h --file=$DESTDIR$FILENAME $CONTAINER_LOGS_DIR $CLOUD_INIT_LOG $CLOUD_INIT_OUTPUT_LOG $KERN_LOG $KUBELET_LOG $SYSLOGS - -retVal=$? -if [ $retVal -eq 1 ]; then - echo "Error creating spectro logs package" -else - echo "Successfully extracted spectro cloud logs: $DESTDIR$FILENAME" -fi -``` - -## Gateway Cluster Provisioning Failures - -Installation of the Gateway cluster may run into errors or get stuck in the provisioning state for various reasons like lack of infrastructure resources, lack of availability of IP addresses, inability to perform NTP sync, etc. - -While these are the most common failures, some other issues might be related to the underlying VMware environment. The **Cluster Details** page, which you can access by clicking anywhere on the Gateway widget, contains details of every orchestration step, including an indication of the current task. - -Intermittent errors are displayed on the **Cluster Details** page next to the relevant orchestration task. The **Events** tab on this page also provides helpful insights into lower-level operations currently being performed. Suppose you believe the orchestration is stuck or failed due to an invalid selection of infrastructure resources or an intermittent problem with the infrastructure. You may reset the Gateway by clicking on the **Reset** button on the Gateway widget. The Gateway state will transition to Pending. A Gateway in the Pending state allows you to reconfigure the Gateway and start provisioning a new Gateway cluster. If the problem persists, don't hesitate to contact Spectro support via the Service Desk or our [support page](http://support.spectrocloud.io/). - -
\ No newline at end of file diff --git a/content/docs/14-troubleshooting/06-nodes.md b/content/docs/14-troubleshooting/06-nodes.md deleted file mode 100644 index c75bd9cbb7..0000000000 --- a/content/docs/14-troubleshooting/06-nodes.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Nodes & Clusters" -metaTitle: "Troubleshooting steps for Kubernetes nodes and clusters" -metaDescription: "Troubleshooting steps for Kubernetes nodes and clusters when managed by Palette." -icon: "" -hideToC: false -fullWidth: false ---- - -# Nodes & Clusters - -This page covers common debugging scenarios for nodes and clusters after they have been deployed. - -# Nodes - -## Scenario - Repaved Nodes - -Palette performs a rolling upgrade on nodes when it detects a change in the `kubeadm` config. Below are some actions that cause the `kubeadm` configuration to change and result in nodes being upgraded: -* OS layer changes -* Kubernetes layer changes -* Kubernetes version upgrade -* Kubernetes control plane upsize -* Machine pool updates for disk size -* Changes in availability zones -* Changes in instance types -* Certificate renewal - -Logs are provided in Palette for traceability. However, these logs may be lost when the pods are relaunched. To ensure that the cause and context is persisted across repaving, a field titled **upgrades** is available in the status section of [SpectroCluster object](https://docs.spectrocloud.com/api/v1/clusters/). This field is represented in the Palette UI so that you can understand why and when repaving happened. - -For detailed information, review the cluster upgrades [page](/clusters/#clusterupgradedetails). - -
- -# Clusters - -## Scenario - vSphere Cluster and Stale ARP Table - -Sometimes vSphere clusters encounter issues where nodes with an assigned Virtual IP Address (VIP) cannot contact the node with a VIP. The problem is caused by Address Resolution Protocol (ARP) entries becoming stale on non-VIP nodes. - -To minimize this situation, vSphere clusters deployed through Palette now have a daemon set that cleans the ARP entry cache every five minutes. The cleaning process forces the nodes to periodically re-request an ARP entry of the VIP node. This is done automatically without any user action. - -You can verify the cleaning process by issuing the following command on non-VIP nodes and observing that the ARP cache is never older than 300 seconds. - -
- -```shell -watch ip -statistics neighbour -``` - - -## EKS Cluster Worker Pool Failures - -If your EKS cluster worker pool ends up in `Failed`, `Create Failed` or `Error nodes failed to join` state, refer to the Amazon EKS [Runbook](https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-awssupport-troubleshooteksworkernode.html -) for troubleshooting guidance. - -
- -## Palette Agents Workload Payload Size Issue - - -A cluster comprised of many nodes can create a situation where the workload report data the agent sends to Palette exceeds the 1 MB threshold and fails to deliver the messages. If the agent encounters too many workload report deliveries, the agent container may transition into a *CrashLoopBackOff* state. - -If you encounter this scenario, you can configure the cluster to stop sending workload reports to Palette. To disable the workload report feature, create a *configMap* with the following configuration. Use a cluster profile manifest layer to create the configMap. - -
- -```shell -apiVersion: v1 -kind: ConfigMap -metadata: - name: palette-agent-config - namespace: "cluster-{{ .spectro.system.cluster.uid }}" -data: - feature.workloads: disable -``` - -
\ No newline at end of file diff --git a/content/docs/14-troubleshooting/08-pack-issues.md b/content/docs/14-troubleshooting/08-pack-issues.md deleted file mode 100644 index 9eb0bbcce9..0000000000 --- a/content/docs/14-troubleshooting/08-pack-issues.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "Packs" -metaTitle: "Troubleshooting steps for errors during a cluster deployment" -metaDescription: "Troubleshooting steps for errors during a cluster deployment." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Packs - -Packs documentation contains usage and other related documentation, such as troubleshooting steps. If any issue is encountered with Pack, visit the respective Pack documentation for troubleshooting steps. - -
diff --git a/content/docs/14-troubleshooting/10-palette-dev-engine.md b/content/docs/14-troubleshooting/10-palette-dev-engine.md deleted file mode 100644 index d61f4409a7..0000000000 --- a/content/docs/14-troubleshooting/10-palette-dev-engine.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "Palette Dev Engine" -metaTitle: "Palette Dev Engine" -metaDescription: "Troubleshooting steps for errors encountered with Palette Dev Engine." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Palette Dev Engine (PDE) - -Use the following content to help you troubleshoot issues you may encounter when using Palette Dev Engine (PDE). - -
- - -# Resource Requests - -All [Cluster Groups](/clusters/cluster-groups) are configured with a default [*LimitRange*](https://kubernetes.io/docs/concepts/policy/limit-range/). The LimitRange configuration is in the Cluster Group's Virtual Cluster configuration section. Packs deployed to a virtual cluster should have the `resources:` section defined in the **values.yaml** file. Pack authors must specify the `requests` and `limits` or omit the section entirely to let the system manage the resources. - - -If you specify `requests` but not `limits`, the default limits imposed by the LimitRange will likely be lower than the requests, causing the following error. - -
- -```shell hideClipboard -Invalid value: "300m": must be less than or equal to CPU limit spec.containers[0].resources.requests: Invalid value: "512Mi": must be less than or equal to memory limit] -``` -
- -The workaround is to define both the `requests` and `limits`. - - -
\ No newline at end of file diff --git a/content/docs/14-troubleshooting/12-edge.mdx b/content/docs/14-troubleshooting/12-edge.mdx deleted file mode 100644 index d774af4d60..0000000000 --- a/content/docs/14-troubleshooting/12-edge.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: "Edge" -metaTitle: "Edge" -metaDescription: "Troubleshooting steps for common Edge scenarios." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Edge - -The following are common scenarios that you may encounter when using Edge. - -# Scenario - Override or Reconfigure Read-only File System Stage - -If you need to override or reconfigure the read-only file system, you can do so using the following steps. - -## Debug Steps - -
- -1. Power on the Edge host. - - -2. Press the keyboard key `E` after highlighting the menu in `grubmenu`. - - -3. Type `rd.cos.debugrw` and press `Enter`. - -![The grubmenu displays with the command rd.cos.debugrw typed in the terminal.](/troubleshooting_edge_grub-menu.png) - - -4. Press `Ctrl+X` to boot the system. - - -5. Make the required changes to the image. - - -6. Reboot the system to resume the default read-only file system. - -
\ No newline at end of file diff --git a/content/docs/14-troubleshooting/30-pcg.md b/content/docs/14-troubleshooting/30-pcg.md deleted file mode 100644 index 3eec895ba7..0000000000 --- a/content/docs/14-troubleshooting/30-pcg.md +++ /dev/null @@ -1,404 +0,0 @@ ---- -title: "Private Cloud Gateway" -metaTitle: "Private Cloud Gateway" -metaDescription: "Troubleshooting steps for deploying a Private Cloud Gateway." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Private Cloud Gateway - -When you deploy a Kubernetes cluster in a private data center environment, you must already have a Private Cloud Gateway (PCG) cluster deployed in the data center environment. A PCG enables secure communication between Palette and the private data center environment. - -The following are the high-level steps of deploying a PCG in a private data center environment: -
- -1. Initiate the installation in Palette. In this step, you get a pairing code and an installer image. -2. Deploy the PCG installer in the data center environment. -3. Configure the cloud gateway in Palette, and launch the PCG cluster. - -While deploying a PCG, you may encounter one of the following scenarios during the above-mentioned steps. Some scenarios below apply to all data center environments, whereas others apply to a specific data center environment, such as VMware. Each scenario covers a specific problem, including an overview, possible causes, and debugging steps. -
- -# Scenario - Jet CrashLoopBackOff - -After you finish configuring the PCG in Palette, Palette starts provisioning the PCG cluster. During the provisioning, one of the internal Palette components may undergo a *CrashLoopBackOff* state. - -The internal component, *Jet*, will transition to a healthy state once the PCG cluster is successfully registered with Palette. -
- -## Debug Steps - -Wait 10-15 minutes for the PCG installation to finish so that the internal component receives the required authorization token from Palette. Once the internal component is authorized, the PCG cluster will complete the initialization successfully. -
- -# Scenario - PCG Installer VM Unable to Register With Palette - -When deploying the PCG installer in VMware vSphere, you use an OVF template and then power on the PCG installer Virtual Machine (VM). After powering it on, the PCG installer goes through a bootstrap process and attempts to register with Palette. This process typically takes between five to ten minutes. - -If the installer fails to register with Palette within the expected timeframe, it could indicate a bootstrapping error. The error can occur due to network connectivity issues, incorrect pairing code, or an incorrect endpoint configuration for Palette in the PCG installer template settings. -
- -## Debug Steps - -
- -1. SSH into the PCG installer VM using the username `ubuntu` and the SSH key you provided during the OVA import. - - -2. Inspect the log file located at **/var/log/cloud-init-output.log**. - -
- - ```bash - cat /var/log/cloud-init-output.log - ``` - - The **cloud-init-output.log** file will contain error messages if there are failures with connecting to Palette, authenticating, or downloading installation artifacts. A common cause for these errors is incorrect values provided to the OVF template deployment wizard, such as the Palette endpoint or a mistyped pairing code. - - The screenshot below highlights the OVF template properties you must carefully configure and verify before deploying a PCG installer VM. - - ![A screenshot displaying the OVF template properties you configure while deploying the PCG installer VM](/troubleshooting-pcg-template_properties.png) - - -3. Double-check the accuracy of the pairing code used for the PCG installer VM. A pairing code is a unique authentication code Palette generates for each PCG installer instance. Confirm that it matches the value you copied from Palette. - - -4. Ensure the Palette endpoint is correct and has no trailing slash `/`. If you use Palette SaaS, the default endpoint is `https://console.spectrocloud.com`. If you are using a self-hosted Palette instance, use the domain name as applicable to you. If the Palette endpoint is incorrectly specified, relaunch a new PCG installer VM with the correct values. - - -5. Another potential issue may be a lack of outbound connectivity from the PCG installer VM to Palette. The installer VM needs to have outbound connectivity directly or via a proxy to download the installation artifacts from Spectro Cloud. Check for any network restrictions or firewall rules in the network settings that may block communication. Adjust the proxy settings, if applicable, to fix the connectivity. Alternatively, you can relaunch a new PCG installer VM in a network that supports outbound connections to Palette. - - -6. If the problem persists, issue the following command in the PCG installer VM to create a script to generate a log bundle. -
- - ``` bash - cat > pcg-debug.sh << 'EOF' - #!/bin/bash - DESTDIR="/tmp/" - CONTAINER_LOGS_DIR="/var/log/containers/" - CLOUD_INIT_OUTPUT_LOG="/var/log/cloud-init-output.log" - CLOUD_INIT_LOG="/var/log/cloud-init.log" - KERN_LOG="/var/log/kern.log" - KUBELET_LOG="/tmp/kubelet.log" - SYSLOGS="/var/log/syslog*" - FILENAME=spectro-logs-$(date +%-Y%-m%-d)-$(date +%-HH%-MM%-SS).tgz - journalctl -u kubelet > $KUBELET_LOG - tar --create --gzip -h --file=$DESTDIR$FILENAME $CONTAINER_LOGS_DIR $CLOUD_INIT_LOG $CLOUD_INIT_OUTPUT_LOG $KERN_LOG $KUBELET_LOG $SYSLOGS - retVal=$? - if [ $retVal -eq 1 ]; then - echo "Error creating spectro logs package" - else - echo "Successfully extracted spectro cloud logs: $DESTDIR$FILENAME" - fi - EOF - ``` - - - -7. Start the script to generate a log archive. By default, the script places the log archive in the **/tmp/** folder. The log archive file name starts with the prefix **spectro-logs-** followed by a timestamp value. -
- - ```shell - chmod +x pcg-debug.sh && ./pcg-debug.sh - ``` - - -8. Contact our support team by emailing [support@spectrocloud.com](mailto:support@spectrocloud.com) and attach the logs archive to the ticket so the support team can troubleshoot the issue and provide you with further guidance. - -
- -# Scenario - PCG Installer VM IP Address Assignment Error - -When deploying the PCG installer in VMware vSphere, you use an OVF template and then power on the PCG installer VM. After powering it on, the PCG installer VM may fail to get an IP address. - -If the PCG installer VM fails to get an IP address assigned, it implies a networking error or an incomplete cloud-init. The selected IP allocation scheme specified in the network settings of the PCG installer OVF template assigns an IP address to the PCG installer VM. The IP allocation scheme offers two options - static IP or DHCP. You must check the selected IP allocation scheme for troubleshooting. -
- -## Debug Steps -
- -1. If you chose the static IP allocation scheme, ensure you have correctly provided the values for the gateway IP address, DNS addresses, and static IP subnet prefix. Check that the subnet prefix you provided allows the creation of an IP pool with sufficient IP addresses to allocate to the new PCG installer VM. - - -2. If you chose the DHCP allocation scheme, check that the DHCP service is available on the DHCP server. Restart the service if it's not in an active state. - - -3. If the DHCP server is active, recheck the DHCP scope and the DHCP reservations. The DHCP scope defines the range of IP addresses that the DHCP server allocates on the selected network. You must have sufficient IP addresses from the DHCP scope for dynamic allocation. - - -4. If you chose the DHCP allocation scheme, ensure Dynamic DNS is enabled in the DHCP server. A Dynamic DNS is only required if you are using DHCP. Dynamic DNS is not required for a static IP allocation scheme. - - -5. If there are no network-related issues, SSH into the PCG installer VM using the username `ubuntu` and the SSH public key you provided during the OVA import step. Alternatively, you can open the web console of the PCG installer VM. - - -6. Inspect the log files in the **/var/log** directory. - - -7. Examine the cloud-init logs for potential errors or warnings related to the IP address assignment. - - -8. If the problem persists, email the log files to our support team at [support@spectrocloud.com](mailto:support@spectrocloud.com). - -
- -# Scenario - PCG Installer Deployment Failed - -When deploying the PCG installer in VMware, you deploy the OVF template and power on the PCG installer VM. If the VM instance is supposed to receive a public IP address and the deployment fails, you cannot configure the cloud gateway in Palette. - -The PCG installer deployment can fail due to internet connectivity or internal misconfigurations, such as an incorrect pairing code. -
- -## Debug Steps - -If the PCG installer VM has a public IP address assigned, you can access the PCG installer's deployment status and system logs from the monitoring console. Follow the steps below to review the deployment status and logs. -
- -1. Open a web browser on your local machine and visit the `https://[IP-ADDRESS]:5080` URL. Replace the `[IP-ADDRESS]` placeholder with your PCG installer VM's public IP address. - - -2. Provide the username and password when prompted. You can use the default installation credentials: - - username: admin - - password: admin - - -3. Once you are logged in, review the PCG installer's deployment status, system logs, and diagnostic tasks, as highlighted in the screenshot below. The monitoring console allows you to check the high-level status and download the individual log files. - - ![A screenshot of the monitoring console of the PCG installer.](/troubleshooting-pcg-monitoring_console.png) - - -4. If any of the statuses is not **Done** after waiting for a while, download the concerned logs. The screenshot below displays the **Logs** tab in the monitoring console. - - ![A screenshot of the logs in the monitoring console of the PCG installer.](/troubleshooting-pcg-monitoring_logs.png) - - -5. Examine the log files for potential errors and root causes. - - -6. Check if the deployment failed due to a lack of outbound internet connectivity from the PCG installer VM. Use the following steps to check outbound internet connectivity: - - SSH into the PCG installer VM using the username `ubuntu` and the public SSH key you provided during the OVA import. - - Use the ping command to check if the VM can reach a public IP address. For example, ping well-known public IPs like Google's public DNS server (8.8.8.8) or any other public IP address. -
- - ```bash - ping 8.8.8.8 - ``` - - If you receive responses from the ping requests, it indicates that the VM has outbound internet connectivity. - - Suppose you do not receive a response from the ping requests. In that case, go to the next step for further troubleshooting steps. - - -7. Check for any network restrictions or firewall rules in the data center's network settings that may block communication. Adjust the proxy settings, if applicable, to fix the connectivity. Alternatively, you can power down and delete the PCG installer VM and relaunch a new one in a network that supports outbound internet connections. - - -8. If the problem persists, email the log files to our support team at [support@spectrocloud.com](mailto:support@spectrocloud.com). -
- -# Scenario - PCG Cluster Provisioning Stalled or Failed - -After you finish configuring the cloud gateway in Palette, the PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. - -However, if the PCG cluster provisioning gets stuck, it could hint at incorrect cloud gateway configurations, unavailable IP addresses for the worker nodes, or the inability to perform a Network Time Protocol (NTP) sync. -
- -## Debug Steps -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the left **Main Menu** and select **Tenant Settings**. From the **Tenant settings** menu, select **Private Cloud Gateways**. - - -3. Click on the newly provisioned PCG cluster to review its details. - - -4. Click on the **Events** tab. - - -5. Examine all events in the **Events** tab to identify specific errors or issues. Each event will have a status, timestamp, associated service name, and orchestration details. - - -6. If you encounter one of the following error events - `Failed to deploy image: Failed to create govomiClient` or `No route to host`, refer to the remediation steps outlined in the [Scenario - Failed to Deploy Image](#scenario-failedtodeployimage) or the [Scenario - No Route to the Kubernetes API Server](#scenario-noroutetothekubernetesapiserver) section, respectively. - - -7. If you encounter errors other than the ones mentioned in the previous step, it is possible that the cluster configuration or the DNS settings are not set correctly. You can review and edit the cluster configuration in the cluster settings. The screenshot below highlights the cluster configuration section in the cluster settings blade. - ![A screenshot highlighting the cluster configuration section in the cluster settings blade.](/troubleshooting-pcg-cluster_settings.png) - - -8. If the cluster settings look correct, ensure the search domain is correctly defined in the fault domain's DNS settings. The screenshot below highlights how you can review and edit the DNS mapping of an existing PCG cluster. - ![A screenshot highlighting the DNS mapping settings.](/troubleshooting-pcg-dns.png) - - -9. If the problem persists, download the cluster logs from Palette. The screenshot below will help you locate the button to download logs from the cluster details page. - - ![A screenshot highlighting how to download the cluster logs from Palette.](/troubleshooting-pcg-download_logs.png) - - -10. Share the logs with our support team at [support@spectrocloud.com](mailto:support@spectrocloud.com). -
- - -# Scenario - No Progress After Creating the Container Manager -After you finish configuring the cloud gateway in Palette, Palette starts provisioning the PCG cluster. The PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. You can navigate to the cluster details page and review the progressive events in the **Events** tab while the cluster is provisioning. Suppose the PCG events display no progress after the specific event, `Created container manager`. - -This issue can occur when the PCG installer VM fails to connect to the Palette API endpoint and download the installation artifacts. Another potential reason is that the PCG installer may not have the required permissions to store the installation artifacts in the **spectro-templates** folder. The installer downloads the images for the worker nodes and stores them in the **spectro-templates** folder during the cluster provisioning. -
- -## Debug Steps -
- -1. Check the outbound internet connectivity from the PCG installer VM. Internet connectivity is needed to communicate with the Palette API endpoint, `https://api.spectrocloud.com`, or your self-hosted Palette's API endpoint. Use the following steps to check the outbound internet connectivity: - - SSH into the PCG installer VM using the username `ubuntu` and the public SSH key you provided during the OVA import. - - Use the ping command to check if the VM can reach a public IP address. For example, ping well-known public IPs like Google's public DNS server (8.8.8.8) or any other public IP address. -
- - ```bash - ping 8.8.8.8 - ``` - - If you receive responses from the ping requests, it indicates that the VM has outbound internet connectivity. - - Suppose you do not receive a response from the ping requests. In that case, it indicates the machine does not have outbound internet connectivity. Go to the next step for further troubleshooting steps. - - -2. Check for any network restrictions or firewall rules in the data center's network settings that may block communication. Adjust the proxy settings, if applicable, to fix the connectivity. Alternatively, you can power down and delete the PCG installer VM and relaunch a new one in a network that supports outbound internet connections. - - -3. Ensure you have the necessary write permissions for the **spectro-templates** folder in the data center environment. -
- -# Scenario - Failed to Deploy Image - -After you finish configuring the cloud gateway in Palette, Palette starts provisioning the PCG cluster. The PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. You can navigate to the cluster details page and review the progressive events in the **Events** tab while the cluster is provisioning. Suppose one of the events displays the `Failed to deploy image: Failed to create govomiClient` error. - -The error can occur if there is a preceding "https://" or "http://" string in the vCenter server URL or if the PCG installer VM lacks outbound internet connectivity. -
- -## Debug Steps -
- -1. Log in to [Palette](https://console.spectrocloud.com). - - -2. Navigate to the **Tenant Settings** > **Private Cloud Gateways** page. - - -3. Click on the newly provisioned PCG cluster to review its details. - - -4. Click on the **Events** tab. - - -5. In the **Events** tab, search the `Failed to deploy image: Failed to create govomiClient` error. If the error has occurred due to a preceding "https://" or "http://" string in the vCenter server URL, the error details will mention "https://" twice, as highlighted in the screenshot below. - - ![A screenshot highlighting the "https://" prepended twice to the data center server URL.](/troubleshooting-pcg-http_error.png) - -6. Palette does not allow you to edit the vCenter server URL you used for authentication. Therefore, you must redeploy the PCG cluster with the following considerations: - - - Check the VMware vCenter server field. The field expects a URL or an IP address for authentication. If you use a URL, ensure the URL does not include the preceding "http://" or "https://" string. Also, select the **Use Qualified Network Name** checkbox if you use a URL. The screenshot below displays the vCenter server field you configure in Palette. - ![A screenshot displaying the vCenter server field you configure in Palette](/troubleshooting-pcg-cluster_config_1.png) - - - Ensure the VMware cloud properties are specified correctly in the cloud gateway configuration. You must use the vSphere data center and the folder where you have permission to create resources. - - - If you choose the DHCP option, enable the Dynamic DNS in your DNS server. The screenshot below displays the VMware cloud properties you configure in Palette. - ![A screenshot displaying the VMware cloud properties you configure in Palette](/troubleshooting-pcg-cluster_config_2.png) - - -7. If the steps above do not resolve the issue, check if the deployment failed due to a lack of outbound internet connectivity from the PCG installer VM. Use the following steps to check outbound internet connectivity: - - SSH into the PCG installer VM using the username `ubuntu` and the public SSH key you provided during the OVA import. - - Use the ping command to check if the VM can reach a public IP address. For example, ping well-known public IPs like Google's public DNS server (8.8.8.8) or any other public IP address. -
- - ```bash - ping 8.8.8.8 - ``` - - If you receive responses from the ping requests, it indicates that the VM has outbound internet connectivity. - - Suppose you do not receive a response from the ping requests. In that case, it indicates the machine does not have outbound internet connectivity. Go to the next step for further troubleshooting steps. - - -8. Check for any network restrictions or firewall rules in the data center's network settings that may block communication. Adjust the proxy settings, if applicable, to fix the connectivity. Alternatively, you can power down and delete the PCG installer VM and relaunch a new one in a network that supports outbound internet connections. -
- - -# Scenario - No Route to the Kubernetes API Server - -After you finish configuring the cloud gateway in Palette, Palette starts provisioning the PCG cluster. The PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. You can navigate to the cluster details page and review the progressive events in the **Events** tab while the cluster is provisioning. Suppose one of the events displays the `No route to host.` error. - -The error indicates an issue with the PCG cluster nodes attempting to connect to the cluster's Kubernetes API server. This issue can occur due to improper networking configuration or an error in the cloud-init process. -
- -## Debug Steps -
- -1. Check the data center network settings. Ensure no network restrictions, firewalls, or security groups block communication between the nodes and the API server. - - -2. If you use the DHCP allocation scheme, check that the DHCP service is available on the DHCP server. Restart the service if it's not in an active state. - - -3. If you use the DHCP allocation scheme, ensure Dynamic DNS is enabled in the DHCP server. A Dynamic DNS is only required if you are using DHCP. Dynamic DNS is not required for a static IP allocation scheme. - - -4. Check the Kubernetes API server status. The Kubernetes API server must be active and healthy on the control plane node. Use the following steps to check the status. - - - Switch to [Palette](https://console.spectrocloud.com). - - - Navigate to the **Tenant Settings** > **Private Cloud Gateways** page. - - - Click on the newly provisioned PCG cluster to review its details. - - - Download the PCG cluster's kubeconfig file from the **Overview** tab. Click on the kubeconfig file name to download it to your local machine, as highlighted in the screenshot below. - - ![A screenshot highlighting the kubeconfig file to download from Palette.](/troubleshooting-pcg-download_kubeconfig.png) - - - After you download the PCG cluster's kubeconfig file, use the following commands to make a GET request to one of the [Kubernetes API server endpoints](https://kubernetes.io/docs/reference/using-api/health-checks/#api-endpoints-for-health), `/readyz` or `'/livez'`. Replace `[path_to_kubeconfig]` placeholder with the path to the kubeconfig file you downloaded in the previous step. A status code `ok` or `200` indicates the Kubernetes API server is healthy. -
- - ```bash - kubectl --kubeconfig [path_to_kubeconfig] get --raw='/readyz' - ``` - - - If the previous command does not return an `ok`, use the command below to make a verbose GET request by specifying the `verbose` parameter. The output will display the individual health checks so you can decide on further debugging steps based on the failed checks. -
- - ```bash - kubectl --kubeconfig [path_to_kubeconfig] get --raw='/readyz?verbose' - ``` -
- - -5. If the PCG installer VM has a public IP address assigned, SSH into the VM using the username `ubuntu` and the public SSH key you provided during the OVA import. - - -6. Navigate to the **/var/log** directory containing the log files. - - -7. Examine the cloud-init and system logs for potential errors or warnings. -
- -# Scenario - Permission Denied to Provision - -After you finish configuring the cloud gateway in Palette, Palette starts provisioning the PCG cluster. The PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. You can navigate to the cluster details page and review the progressive events in the **Events** tab while the cluster is provisioning. Suppose one of the events displays the `Permission to perform this operation denied` error. - - -You must have the necessary permissions to provision a PCG cluster in the VMware environment. If you do not have adequate permissions, the PCG cluster provisioning will fail, and you will get the above-mentioned error in the events log. -
- -## Debug Steps -
- -1. Ensure you have all the permissions listed in the [VMware Privileges](/clusters/data-center/vmware/#vmwareprivileges) section before proceeding to provision a PCG cluster. - - -2. Contact your VMware administrator if you are missing any of the required permissions. - - -3. Delete the existing PCG cluster and redeploy a new one so that the new permissions take effect. - -
\ No newline at end of file diff --git a/content/docs/14-troubleshooting/90-palette-upgrade.md b/content/docs/14-troubleshooting/90-palette-upgrade.md deleted file mode 100644 index 22f043ecff..0000000000 --- a/content/docs/14-troubleshooting/90-palette-upgrade.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: "Palette Upgrade" -metaTitle: "Palette Upgrade" -metaDescription: "Troubleshooting steps for errors encountered with upgrade actions." -icon: "" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Palette Upgrades - -We recommend you review the [Release Notes](/release-notes) and the [Upgrade Notes](/enterprise-version/upgrade) before attempting to upgrade Palette. Use this information to address common issues that may occur during an upgrade. - - - -# Ingress Errors - -If you receive the following error message when attempting to upgrade to Palette versions greater than Palette 3.4.X in a Kubernetes environment, use the debugging steps to address the issue. - -
- -```text hideClipboard -Error: UPGRADE FAILED: failed to create resource: admission webhook "validate.nginx.ingress.kubernetes.io" denied the request: host "_" and path "/v1/oidc" is already defined in ingress default/hubble-auth-oidc-ingress-resource -``` - - -## Debug Steps - -1. Connect to the cluster using the cluster's kubeconfig file. Refer to the [Access Cluster with CLI](/clusters/cluster-management/palette-webctl) for additional guidance. - - - -2. Identify all Ingress resources that belong to *Hubble* - an internal Palette component. - -
- - ```shell - kubectl get ingress --namespace default - ``` - -3. Remove each Ingress resource listed in the output that starts with the name Hubble. Use the following command to delete an Ingress resource. Replace `REPLACE_ME` with the name of the Ingress resource you are removing. - -
- - ```shell - kubectl delete ingress --namespace default - ``` - - -4. Restart the upgrade process. - -
- diff --git a/content/docs/15-compliance.md b/content/docs/15-compliance.md deleted file mode 100644 index 5538e291c5..0000000000 --- a/content/docs/15-compliance.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Compliance" -metaTitle: "Certification of Compliance" -metaDescription: "Certification of Compliance" -icon: "user-shield" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Compliance - -Spectro Cloud has SOC 2 certification and a FIPS certificate for its Cryptographic Module. - -## SOC 2 Type II - -![soc2.png](/soc2.png "#width=180px") - -Spectro Cloud is certified against SOC2 Type II, compliance with the AICPA’s (American Institute of Certified Public Accountants) TSC (Trust Services Criteria). -* Spectro Cloud SOC 2 Type II audit report assures our organization’s: - * Security - * Availability - * Processing integrity - * Confidentiality - * Privacy -* SOC 2 audits are an important component in regulatory oversight, vendor management programs, internal governance, and risk management. -* These reports help the users and their auditors to understand the Spectro Cloud controls established to support operations and compliance. -* The annual certification of SOC2 is Independent 3rd Party Auditor. -* Spectro Cloud SOC 2 Type II report is available upon request for any customers or prospects with signed MNDA. - -## FIPS 140-2 - - -![FIPS-Compliance](/docs_compliance_compliance_fips-logo.png "#width=180px") - -Spectro Cloud is certified against FIPS 140-2 with [Certificate number 4349](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4349) in compliance with the Cryptographic Module Validation Program (CMVP). - -Our Spectro Cloud Cryptographic Module is a general-purpose cryptographic library. The FIPS-enforced Palette VerteX edition incorporates the module in the Kubernetes Management Platform and the infrastructure components of target clusters to protect the sensitive information of regulated industries. Palette VerteX supports FIPS at the tenant level. For more information about the FIPS-enforced Palette edition, check out [Palette VerteX](/vertex). - -The module is tested against these configurations: - -* Red Hat Enterprise Linux 8 on Dell PowerEdge R440 with Intel Xeon Silver 4214R _with and without_ PAA -* SUSE Linux Enterprise Server 15 on Dell PowerEdge R450 with Intel Xeon Silver 4309Y _with and without_ PAA -* Ubuntu 18.04 on Dell PowerEdge R450 with Intel Xeon Silver 4309Y _with and without_ PAA -* Ubuntu 20.04 on Dell PowerEdge R450 with Intel Xeon Silver 4309Y _with and without_ PAA - - -
- -
diff --git a/content/docs/16-component.md b/content/docs/16-component.md deleted file mode 100644 index 50c4bbb707..0000000000 --- a/content/docs/16-component.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: "Compatibility Matrix" -metaTitle: "Palette Components Compatibility Matrix" -metaDescription: "Learn what Palette components are compatible with what versions." -icon: "audits" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -This page lists the version details of various Palette components and their respective Palette releases. Visit the [Downloads](/spectro-downloads) resource to access the download URLs. - -# Palette CLI Versions - -|Palette Release| Recommended CLI Version| -|---------------------------|----| -|Release 4.0.0 |4.0.0 | -|Release 3.4.0 |3.4.0 | -|Release 3.3.0 |3.3.0 | - - -# Palette Edge CLI Versions - -|Palette Release|CLI Version| -|---|-------| -|Release 4.0.0 |v4.0.0 | -|Release 3.4.0 |v3.4.2 | -|Release 3.3.0 |v3.3.0 | -|Release 3.2.0 |v3.2.0 | - - - -# On-Premises Installer Version - -|Palette Release|On-Prem Installer Version| -|--|---| -|3.4|2.8.0| -|3.3|2.6.0| -|3.2|2.4.0| -|3.1|2.4.0| -|3.0|2.1.0| -|2.8|2.1.0| -|2.7|2.1.0| -|2.6|2.1.0| -|2.5|2.0.2| -|2.3|2.0.2| - -# Latest Air Gapped OVA Version - -|Palette Release|Air Gapped Version| -|--|---| -|3.3|2.8.0| -|3.2|2.4.0| -|3.1|2.0.1| -|3.0|2.0.1| -|2.8|2.0.1| -|2.7|2.0.1| -|2.6|2.0.1| -|2.5|2.0.1| -|2.3|2.0.1| - -# Private Cloud Gateways (PCG) Images - -## vSphere PCG Image Version - -|Palette Release|vSphere PCG Version| -|--|---| -|3.4|1.8.0| -|3.3|1.6.0| -|3.2|1.4.0| -|3.1|1.2.0| -|3.0|1.2.0| -|2.8|1.2.0| -|2.7|1.2.0| -|2.6|1.2.0| -|2.5|1.1.9| -|2.3|1.1.9| ------- - -## MAAS PCG Image Version - -|Palette Release|MAAS PCG Version| -|--|---| -|3.4|1.0.12| -|3.3|1.0.12| -|3.2|1.0.12| -|3.1|1.0.11| -|3.0|1.0.11| -|2.8|1.0.11| -|2.7|1.0.11| -|2.6|1.0.11| -|2.5|1.0.9| -|2.3|1.0.9| ---------- - -## OpenStack PCG Image Version - -|Palette Release|OpenStack PCG Version| -|--|---| -|3.4|1.0.12| -|3.3|1.0.12| -|3.2|1.0.12| -|3.1|1.0.11| -|3.0|1.0.11| -|2.8|1.0.11| -|2.7|1.0.11| -|2.6|1.0.11| -|2.5|1.0.9| -|2.3|1.0.9| -------- - -# Kubernetes Versions - -- Kubernetes: Refer to the Kubernetes [pack documentation](/integrations/kubernetes). - -# Operating System Layer Versions - -|Operating System |Versions| -|--|--| -|Ubuntu| 22.04| -|Ubuntu| 20.04| -|Ubuntu| 18.04| -|CentOS| 8.0| -|CentOS| 7.9| -|CentOS| 7.7| -|OpenSuSE|15.4| - - -# Network Layer Versions - -- Calico: Refer to the Calico [pack documentation](/integrations/calico). - -- Cilium: Refer to the Cilium [pack documentation](/integrations/cilium). - - -|Cilium Enterprise|Versions| -|--|--| -|1.10.x|1.10.8| - - -# Storage Layer Version - -- Azure Disk: Refer to the [pack documentation](/integrations/azure-disk). -- GCE Persistent Disk: Refer to the [pack documentation](/integrations/gce). -- NFS Subdir External Provisioner: Refer to the [pack documentation](/integrations/nfs-subdir-external). -- Open Stack Cinder: Refer to the [pack documentation](/integrations/openstack-cinder). -- Portworx: Refer to the [pack documentation](/integrations/portworx). -- Rook Ceph: Refer to the [pack documentation](/integrations/rook-ceph). -- vSphere CSI: Refer to the [pack documentation](/integrations/vsphere-csi). -- vSphere Volume: Refer to the [pack documentation](/integrations/vsphere-volume). - -# Resources - -- [Packs List](/integrations#integrations) - - -- [Downloads](/spectro-downloads#palettedynamicartifacts) - - - - - - - - - - diff --git a/content/docs/16-spectro-downloads.md b/content/docs/16-spectro-downloads.md deleted file mode 100644 index 93ecd2522a..0000000000 --- a/content/docs/16-spectro-downloads.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: "Downloads" -metaTitle: "Downloads" -metaDescription: "Overview of Palette downloads and their respective URL and checksums." -icon: "cloud-download-alt" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -The following Palette downloads are available: - - -## Self-Hosted - -You can deploy a self-hosted Palette to your environment. Refer to the [Self-Hosted Installation](/enterprise-version/) documentation for additional guidance on how to install Palette. Palette VerteX installation guide can be found in the [Palette VerteX install](/vertex/install-palette-vertex) document. - -
- - - - -Starting with Palette 4.0.0, the Palette CLI, and the Helm Chart, are the only supported methods for installing Palette. The Palette OVA installation method is only available for versions 3.4 and earlier. Refer to the [Install Enterprise Cluster](/enterprise-version/deploying-an-enterprise-cluster), or the [Kubernetes Install Helm Chart](/enterprise-version#kubernetesinstallhelmchart) guides for additional guidance on how to install Palette. - - - -
- -## SAAS - Private Cloud Gateway (PCG) - - -Palette supports on-prem environments through the Private Cloud Gateway (PCG) component. PCG provides support for isolated private cloud or data center environments. When installed on-prem, PCG registers itself with Palette, allowing for secure communication between the SaaS portal and the private cloud environment. The gateway also enables end-to-end lifecycle management of Kubernetes clusters in private cloud environments directly from the SaaS portal. - -
- - - -Starting with Palette 4.0, the installation of PCG is done through the Palette CLI. Refer to the Palette CLI [PCG command](/palette-cli/commands/#pcg) document for installation guidance. - - - -
- -### vSphere PCG Image - -|Version|URL| Checksum (SHA256) | -|---|---|---| -|1.8.0|https://software.spectrocloud.com/pcg/installer/v1.8.0/gateway-installer-v1.8.0.ova| `c860682c8e7dc55c6873ff1c5a0f337f91a74215b8cae92e4fa739b6ddc62720` | -|1.6.0|https://software.spectrocloud.com/pcg/installer/v1.6.0/gateway-installer-v1.6.0.ova| `2cf85c974e00524a2051be514484695ae51065af861bf1eb2c69aeb76816b0ff` | -|1.4.0|https://software.spectrocloud.com/pcg/installer/v1.4.0/gateway-installer-v1.4.0.ova| `67973c6ada136f64d9316dc05cda81d419997487c8007b6d58802bec12fb80dd` | ------- - -### MAAS PCG Image - -|Version|URL| Checksum (SHA256) | -|---|---|---| -|1.0.12|https://gcr.io/spectro-images-public/release/spectro-installer:1.0.12| `a229d2f7593d133a40c559aa0fb45feca8b0cd1b2fcebfe2379d76f60bfe038b`| ---------- - -### OpenStack PCG Image - -|Version|URL| Checksum (SHA256) | -|---|---|---| -|1.0.12|https://gcr.io/spectro-images-public/release/spectro-installer:1.0.12| `a229d2f7593d133a40c559aa0fb45feca8b0cd1b2fcebfe2379d76f60bfe038b`| -------- - - -## Palette CLI - -The Palette Command Line Interface (CLI) is a tool that you can use to interact with Palette programmatically. Check out the [Palette CLI](/palette-cli/install-palette-cli) document for installation guidance. - -|Version| Operating System | Checksum (SHA256) | -|---|---|---| -|4.0.0| [Linux-amd64](https://software.spectrocloud.com/palette-cli/v4.0.0/linux/cli/palette)| `44fe237d2dc8bec04e45878542339cbb5f279ed7374b5dfe6118c4cbe94132b4` | -|3.4.0| [Linux-amd64](https://software.spectrocloud.com/palette-cli/v3.4.0/linux/cli/palette)| `9dd1e1c70b0b30c2a35b54d1cb54b230593842a114f8d7cbeebe4e882fa2795e`| -|3.4.0| [OSX-arm64](https://software.spectrocloud.com/palette-cli/v3.4.0/osx/cli/palette)| `88b9e74705a0d66f9b34481002a8d33889c94ef7788a590807b1538e8513c62a`| - - - -## Palette Edge CLI - -|Version| Operating System | Checksum (SHA256) | -|-------|---| --- | -|4.0.2 | [Linux-amd64](https://software.spectrocloud.com/stylus/v4.0.2/cli/linux/palette-edge) | `257d868b490979915619969815fd78aa5c7526faba374115f8d7c9d4987ba05d`| -|3.4.3 | [Linux-amd64](https://software.spectrocloud.com/stylus/v3.4.3/cli/linux/palette-edge) | `b53ccd28ea2a36a6eda43e0e7884c97bebd7d78922374168e1819b768df54f16`| - - diff --git a/content/docs/17-glossary-all.md b/content/docs/17-glossary-all.md deleted file mode 100644 index 4a2e3933b1..0000000000 --- a/content/docs/17-glossary-all.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: "Glossary" -metaTitle: "Palette Glossary" -metaDescription: "Palette Glossary" -icon: "about" -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Glossary - -This page gives a quick reference to various object types and concepts within the Palette platform. -## App Mode -A mode optimized for a simpler and streamlined developer experience that allows you to focus on the building, maintenance, testing, deployment, and monitoring of your applications. App Mode removes the need to worry about the infrastructure management of a Kubernetes cluster and results in a PaaS-like experience, enabling you to focus on deploying [App Profiles](#app-profile), Apps, and [Palette Virtual Clusters](#palette-virtual-cluster). - - -## App Profile -App Profiles are templates created with preconfigured services required for Palette Virtual Clusters deployment. The App Profile allow creators to integrate various services or tiers, required to run an application, such as cache, databases, and more into a single deliverable. App Profiles provide a way to drive consistency across virtual clusters. You can create as many profiles as required, with multiple tiers serving different functionalities per use case. - -## Air-Gapped - -Palette on-prem installation supports Air-Gapped, a security measure in which its management platform is installed on VMware environments with no direct or indirect connectivity to any other devices or networks of the outside world. This feature provides airtight security to the platform without the risk of compromise or disaster. In addition, it ensures the total isolation of a given system from other networks, especially those that are not secure. -## Attach Manifests - -For integrations and add-ons orchestrated via Palette [Packs](#pack) or [Charts](#helm-charts), at times it is required to provide additional Kubernetes resources to complete the installation. Resources like additional secrets or Custom Resource Definitions may need to be installed for the integration or add-on to function correctly. Attach Manifests are additional raw manifests attached to a cluster profile layer built using a Palette Pack or a Chart. Multiple Attach Manifests can be added to a layer in a cluster profile. - -## Bring Your Own Operating System (BYOOS) - -A feature in Palette that allows you to bring your own operating system and use it with your Kubernetes clusters. With the BYOOS pack, you can reference your own OS images, configure the necessary drivers, and customize the OS to meet your specific requirements. BYOOS gives you greater flexibility, control, and customization options when it comes to managing your Kubernetes clusters. It is especially useful for enterprises and organizations with strict requirements around security, compliance, or specific hardware configurations. - -## Chart Repositories - -Chart Repositories are web servers, either public or private, that host Helm Charts. By default, Palette includes several popular chart registries such as Bitnami. As an administrator, you can add additional public or private chart repositories to leverage charts from those sources. This feature provides greater flexibility in managing and deploying applications, allowing you to access and use Helm Charts from various sources in your Palette environment -## Cloud Account - -Cloud Accounts are where access credentials are stored for public and private clouds. It is used by the system to provide new cluster infrastructure and cluster resources. Cloud account information is treated as sensitive data and fully encrypted using the tenant's unique encryption key. - -## Cluster Mode -Cluster Mode enables you to create, deploy, and manage Kubernetes clusters and applications. In Cluster Mode, you can deploy Kubernetes clusters to public cloud providers, on-prem data centers, and on the edge. - -## Cluster Profile - -A Cluster Profile is a declarative model of a Kubernetes infrastructure stack. A Kubernetes infrastructure stack is broken into multiple layers, from core layers like base OS, Kubernetes, storage, network, to additional add-on layers such as load balancer, ingress controller, logging, monitoring, security, etc. For each layer, Palette provides multiple out-of-the-box options and versions. The cluster profile is essentially a configuration of end-to-end Kubernetes stacks and settings that you create based on your needs, which you can reuse every time you need to deploy a cluster matching that configuration. For example, let us say for AI/ML you need a cluster with a base OS with an NVIDIA driver installed and Kubeflow installed in the cluster, but for a production cluster, you need a different stack with Logging (EFK), Monitoring (Prometheus), Security (Twistlock) pre-installed. - -The diagram below shows an example of a cluster profile: - -![cluster_profile_new](/cluster_profile_new.png) - -Read more about Cluster Profiles [here](/cluster-profiles). -## Edge Appliances - -Palette supports several kinds of appliances for the Edge deployment. These appliances can be registered with the Palette Management Console and used for provisioning a Virtualized or a Native OS (Native Edge Deployment). The following is the list of all the Palette supported Edge appliance types: - - | **Appliance Type** | **Environment** | - | :------------------------------ | :---------------------------------------- | - | Native Edge Deployment | Bare Metal Machines or Virtual Appliances | - | Bare Metal Machine | Virtualized | - | KVM-based virtual machines | Virtualized | - -**Note:** Palette Edge Manager & TUI would be embedded in P6OS. -## Edge Clusters - -Edge Clusters are Kubernetes clusters set up on appliances installed in isolated locations such as hospitals, grocery stores, restaurants, etc., unlike a data center or cloud environment. These appliances can be bare metal machines or virtual machines and are managed by operators at these remote sites. Palette provides the provisioning of Workload Clusters on such edge appliances from its SaaS-based management console. Besides provisioning of the cluster, Palette also provides end-to-end management of these clusters through operations such as scaling, upgrades, reconfiguration, etc. -## Helm Charts - -Helm Charts are Kubernetes YAML manifests that describe a related set of Kubernetes resources into a single package. Just like Palette's native Packs, Palette supports and orchestrates helm charts hosted in any public or private Helm chart registry on to Kubernetes clusters. - -## Host Cluster - -A Kubernetes cluster that is managed by Palette. A host cluster may contain several Palette Virtual Clusters. -## Management Clusters - -Management Cluster is where Palette core components are hosted and are often referred to in on-prem installations of Palette. As part of the Kubernetes workload cluster provisioning, the first control-plane node is launched by Palette in the management cluster or the cloud gateway. Once the first control-plane node goes to running state, all the resources are pivoted from the management cluster or the cloud gateway to the target workload cluster. After that, the target cluster self-manages the cluster and application lifecycle. All Day-2 operations which result in node changes, including OS/Kubernetes upgrades, scaling, and nodes certificate rotation, are triggered by changes to the Cluster API resources in the target workload cluster. -## OIDC - -OpenID Connect [(OIDC)](/user-management/saml-sso/#oidcbasedsso) is an open source, authentication protocol that allows users to verify their identity, based on the authentication performed by an authorization provider. -## Organization - -An organization is the equivalent of a Tenant. Review the [Tenant](#tenant) definition to learn more. -## Pack - -Palette provides multiple integrations/technologies in a [cluster profile](#cluster-profile) for various system layers, such as OS, Kubernetes, storage, networking, monitoring, security, load balancers, etc. These integrations are provided in the form of Packs. A pack is a Palette content package that describes an integration in the Kubernetes infrastructure stack ecosystem and contains relevant artifacts required to deploy and manage that integration. Palette provides packs for core layers of the Kubernetes stack; Operating Systems, Kubernetes distributions, Networking and Storage as well as packs for add-on layers such as ELK Stack, Prometheus, Sysdig Falco, etc. - -## Pack Manifests - -Layers in a [cluster profile](#cluster-profile) are typically built using a Palette [Pack](#pack) or a [Charts](#helm-charts). There may be certain scenarios where additional Kubernetes resources need to be provisioned that are not part of any Palette pack or a chart. Pack manifests provide a pass-through mechanism to allow provisioning through raw manifests. Pack Manifest layers can be added to a cluster profile stack built using Spectro Packs and Charts. -## Palette Edge Manager (Local API) - -A cmd line API that supports TUI operations & site diagnostics. For Dark Site or Air Gapped environments Palette Edge Manager can be used to upload cluster configurations. - -## Palette eXtended Kubernetes (PXK) - -Palette eXtended Kubernetes (PXK) is a customized version of the open-source Cloud Native Computing Foundation (CNCF) distribution of Kubernetes. This Kubernetes version can be deployed through Palette to all major infrastructure providers, public cloud providers, and private data center providers. This is the default distribution when deploying a Kubernetes cluster through Palette. You have the option to choose other Kubernetes distributions, such as MicroK8s, Konvoy, and more, should you want to consume a different Kubernetes distribution. - -PXK is different from the upstream open-source Kubernetes version primarily because of the carefully reviewed and applied hardening of the operating system (OS) and Kubernetes. The hardening ranges from removing unused kernel modules to using an OS configuration that follows industry best practices. Our custom Kubernetes configuration addresses common Kubernetes deployment security pitfalls and implements industry best practices. - -A benefit of Palette when used with PXK is the ability to apply different flavors of container storage interface (CSI) plugins and container network interface (CNI) plugins. -Other open-source Kubernetes distributions, such as MicroK8s, RKE2, and K3s, come with a default CSI and CNI. Additional complexity and overhead are required from you to enable different interfaces. PXK supports the ability to select other interface plugins out of the box without any additional overhead or complexity needed from your side. - -There are no changes to the Kubernetes source code and we also follow the same versioning schema as the upstream open-source Kubernetes distribution. - -## Palette eXtended Kubernetes Edge (PXK-E) - -Palette eXtended Kubernetes Edge (PXK-E) is a customized version of the open-source Cloud Native Computing Foundation (CNCF) distribution of Kubernetes. This Kubernetes distribution is customized and optimized for edge computing environments and can be deployed through Palette. PXK-E is the Kubernetes distribution Palette defaults to when deploying Edge clusters. - - -PXK-E differs from the upstream open-source Kubernetes version by optimizing for operations in an edge computing environment. PXK-E also differentiates itself by using the open-source project, [Kairos](https://kairos.io/) as the base operating system (OS). PXK-E’s use of Kairos means the OS is immutable, which significantly improves the security posture and reduces potential attack surfaces. - -Another differentiator of PXK-E is the carefully reviewed and applied hardening of the OS and Kubernetes. The hardening ranges from removing unused OS kernel modules to using an OS configuration that follows industry best practices. Our custom Kubernetes configuration addresses common deployment security pitfalls and implements industry best practices. - -With PXK-E, you can manage automatic OS upgrades while retaining immutability and the flexibility to roll out changes safely. The A/B partition architecture of Kairos allows for new OS and dependency versions to be installed in a separate partition and mounted at runtime. You can fall back to use the previous partition if issues are identified in the new partition. - -PXK-E manages the underlying OS and the Kubernetes layer together, which reduces the challenge of upgrading and maintaining two separate components. - -PXK-E allows you to apply different flavors of container storage interfaces (CSI) and container network interfaces (CNI). Other open-source Kubernetes distributions such as MicroK8s, RKE2, and K3s come with a default CSI and CNI. There is additional complexity and overhead when you want to consume different interface plugins with traditional Kubernetes distributions. Using PXK-E, you select the interface plugin you want to apply without additional overhead and complexity. - -There are no changes to the Kubernetes source code used in PXK-E, and it follows the same versioning schema as the upstream open-source Kubernetes distribution. - -## Palette Orchestrator -Palette orchestrator supports deploying the clusters as per the specifications desired and modeled in Palette UI. Furthermore, it supports the cluster version upgrades as per the user requirements. The Palette orchestrator also aids in recycling the certificates of the clusters, node health checks, and recycling unhealthy nodes. -## PaletteOS (P6OS) - -PaletteOS is a real-time operating system provisioned by Palette. It is embedded with a base Operating System such as Ubuntu, K3OS, etc., and one of the Kubernetes distributions such as CNCF (Cloud Native Computing Foundation), K3s (a Lightweight Kubernetes Distribution), or RKE (Rancher Kubernetes Engine). Palette builds several of these based on the most desired versions of the base operating system and Kubernetes distribution. - -**Examples**: (Ubuntu20.0.4+CNCFK8s1.21.3, SLES+K3S). We also encourage our customers to build their own Operating system. -## Palette Upgrade Controller - -A Kubernetes controller to be installed into the workload cluster to facilitate upgrades to new P6OS image. - -## Palette Virtual Cluster -Palette Virtual Clusters enable operations teams to partition a [host cluster](#host-cluster) and deploy lightweight virtual clusters on top, similar to how virtualization creates logically isolated virtual servers on top of physical machines. This is great for giving developers quick access to a sandbox environment for testing their code. Virtual clusters provide as strong a level of separation without introducing complicated overhead, such as separating physical resources and managing namespaces with complex RBAC configurations. Palette Virtual Clusters is powered by [vCluster](https://www.vcluster.com/). - -## Permissions - -Permissions are associated with specific actions within the platform such as Create New user in a tenant, Add a Cluster Profile in a project, View Clusters within a cluster, etc. Permissions are granted to the [users](#user) and [teams](#team) through [roles](#role). -## Presets - -Presets are a subset of properties configured for a layer that is pre-configured with defaults to easily enable or turn on a feature. Palette [packs](#pack) and [charts](#helm-charts) provide several settings that can be customized by the user. Although customizable typically in a YAML format, it can be cumbersome to look through a flat list of properties and identify the ones to change for specific functionality. Through presets, Palette groups a bunch of related properties that control a feature and provides them as named presets. During construction of a [cluster profile](#cluster-profile), users may be simply enabled or disable a preset to quickly make the desired changes. - -## Private Cloud Gateway - -A Private Cloud Gateway is a Palette component that enables the communication between Palette's management console and a private cloud/data center. The gateway needs to be installed by the users in their private cloud environments using a private cloud gateway installer appliance. -## Private Cloud Gateway-Edge (PCG-E) - -Deploying Edge Clusters requires a Private Cloud Gateway-Edge (PCG-E) to be installed on the appliances for Palette to discover the appliance and provision workload clusters on them. A PCG-E is Palette's on-premises component to support remote Edge devices. Palette PCG-E, once installed on-premises, registers itself with the Palette's SaaS portal and enables secure communications between the SaaS portal and the Edge Clusters. -## Private Pack Registry - -Palette provides extensibility by providing a way for users to define [packs](#pack) for integrations beyond the ones provided by default in Palette's public pack registry. These user-defined packs need to be hosted in a private registry, which users can bring up in their environment using Palette's pack registry software. -## Project - -Projects provide a way for grouping clusters together for logical separation. Role-based access controls within Palette are applied at the project level. [Users](#user) and [teams](#team) can be assigned one or more [roles](#role) within a project for granular control over [permissions](#permission) within the project scope. -## Public Pack Registry - -Palette maintains a public pack registry containing various [packs](#pack) that can be used in any [cluster profile](#cluster-profile). The pack content in this registry is constantly updated with new integrations. - - -## Repavement - -Repavement is the process of replacing a Kubernetes node with a new one. This is typically done when a node is unhealthy or needs to be upgraded. The process involves draining the node, or in other words, migrating active workloads to another healthy node, and removing it from the cluster. A new node is created and configured with the same settings as the old node and added back to the cluster. The process is fully automated and does not require any manual intervention. - -## Role - -A Role is a collection of [permissions](#permission). There are two kinds of roles in Palette: *tenant roles* and *project roles*. *Tenant roles* are a collection of tenant-level permissions such as create a new user, add a new project, etc. *Project roles* consist of permissions for various actions within the scope of a project such as create a cluster profile, create a cluster, etc. -## Site Configuration Text User Interface (TUI) - -TUI is initially used as an interface to site operator to provide site-specific settings such as NW Settings (Static IP, DHCP, WAN, GW, Proxy), Palette endpoint, and Device ID override. It can accept inputs from the unattended.yaml file. -## Spectro Agent -Spectro Agent bridges the information transfer between Palette SaaS and Palette Orchestrator. The Spectro Agent collects information such as metrics, workloads, and heartbeats and constantly updates to the SaaS platform for user access. In addition to this, the Spectro Agent is responsible for initiating and controlling Backup, OS-Patch, and Compliance Scan on the running cluster. - -## System Console (On-prem System Console) -The console is used to scale up the Enterprise cluster and manage it. The System console supports creating and activating a new tenant in a new instance. It Initiates the installation of a Palette Enterprise Cluster. The On-Prem System Console provides various administrative setup tasks. Most of these are optional and can be performed at any time. To quickly start using the platform's functionality, all that is needed is to create the first tenant and activate it.Initial login:admin/admin. -## System Profiles -System Profiles provide a way to bootstrap an edge appliance with an initial set of virtual and containerized applications. Similar to cluster profiles, System Profiles are templates created using one or more layers that are based on packs or helm charts. -## Team -A Team is a group of [users](#user). Users can be part of one or more teams. Teams provide a convenient way to control platform access for a group of users. [Roles](#role) assigned to a team grant associated tenant or [project](#project) [permissions](#permission) to all users that are part of the team. -## Tenant - -Tenant represents a customer or an organization in Palette. Palette is a multi-tenant platform. All tenant resources are isolated from other tenants. Each tenant has a unique encryption key to encrypt any sensitive data such as cloud credentials and tenant user information. [Users](#user) from a tenant do not have access to resources in another tenant. -## User - -Users are members of a [tenant](#tenant) who are assigned [roles](#role) that control their access within the platform. For example, users with the tenant admin role get permissions to perform all actions across all [projects](#project) in the tenant whereas users assigned project roles, only get specific permission within the associated projects. The user's personal information (email, name) is treated as sensitive data and fully encrypted using the tenant's unique encryption key. - -## VMO -Palette [Virtual Machine Orchestrator](/vm-management) provides a unified platform for managing containerized and virtualized applications. Palette VM Orchestrator allows organizations to onboard, deploy, manage, and scale VMs within the same cluster as their containerized applications. - -## Workload -An application running on the Kubernetes cluster is called a Workload. It can be a set of components that work together or a single independent component, run as a set of pods. In Kubernetes terms, a Pod is a set of running containers on your cluster. -## Workload Cluster - -Workload / Tenant / Application Clusters are a collection of master and worker nodes that cooperate to execute container application workloads. Kubernetes clusters provisioned by users are referred to as Workload Clusters. These clusters are created within [projects](#project) and they are provisioned and managed in the user's cloud environment. Each cluster is provisioned from a [Cluster Profile](#cluster-profile) with additional configuration overrides and cloud-specific settings. -## Workspace - -The multi-cluster management and governance capabilities are supervised with Palette Workspaces. Workspaces enable the logical grouping of clusters and namespaces to provide application or team-specific governance and visibility into workloads, cost, and usage metrics. For example, the application or team workload may be deployed into namespaces across clusters for achieving High Availability (HA), Disaster Recovery (DR), organization-specific placement policies, etc. Grouping the namespaces and clusters into a workspace provide central management and governance in a multi-cluster distributed environment. \ No newline at end of file diff --git a/content/docs/18-knowledgebase.md b/content/docs/18-knowledgebase.md deleted file mode 100644 index 13ef5f140b..0000000000 --- a/content/docs/18-knowledgebase.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "Knowledge Base" -metaTitle: "knowledgebase" -metaDescription: "Knowledge Base Repository" -icon: "book" -hideToCSidebar: true -hideToC: true -fullWidth: true ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Overview - -The Knowledge base is a collection of how-tos and tutorials. Use the topic to help you find content related to the topic. - -
- - -- [How-To](/knowledgebase/how-to) - - - -- [Tutorials](/knowledgebase/tutorials) - - -
-
diff --git a/content/docs/18-knowledgebase/00-how-to.md b/content/docs/18-knowledgebase/00-how-to.md deleted file mode 100644 index c3d5770cc4..0000000000 --- a/content/docs/18-knowledgebase/00-how-to.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "How to" -metaTitle: "Get started with a quick How-to" -metaDescription: "KnowledgeBase with How to" -icon: "laptop" -hideToC: false -fullWidth: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# How To - -Use the following How-tos to learn how to use Palette or enable a feature in your environment. - -# User Management - -- [Palette SSO with Microsoft AD FS](/knowledgebase/how-to/palette-sso-with-adfs) - -
-
diff --git a/content/docs/18-knowledgebase/00-how-to/02-palette-sso-with-adfs.md b/content/docs/18-knowledgebase/00-how-to/02-palette-sso-with-adfs.md deleted file mode 100644 index bbaa119f39..0000000000 --- a/content/docs/18-knowledgebase/00-how-to/02-palette-sso-with-adfs.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: 'Palette SSO with Microsoft AD FS' -metaTitle: 'Set up Palette SSO with Microsoft Active Directory Federation Service (AD FS)' -metaDescription: 'Set up Palette SSO with Microsoft Active Directory Federation Service (AD FS)' -icon: "" -hideToC: false -fullWidth: false -hideToCSidebar: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - - -# Enable SSO with Microsoft Active Directory Federation Service (AD FS) - -Single sign-on (SSO) is an authentication method that enables users to log in to multiple applications and websites with one set of credentials. SSO works upon a trust relationship established and maintained between the service provider (SP) and an identity provider (IdP) using certificates. Palette supports SSO based on either SAML or OIDC. - -The following steps will guide you to enable Palette SSO with [Microsoft AD FS](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/development/ad-fs-openid-connect-oauth-concepts) based on OIDC. - -
- - - - You cannot use Microsoft AD FS for SAML-based SSO with Palette. Microsoft AD FS does not support the Canonical XML 1.1 standard that Palette employs. You can only use the OIDC-based approach for Microsoft AD FS. - - - - -# Prerequisites -In order to setup OIDC-based SSO with Microsoft AD FS, you need to use one of the following versions: -* Microsoft AD FS 2022 (comes with Windows Server 2022) -* Microsoft AD FS 2019 (comes with Windows Server 2019) -* Microsoft AD FS 2016 (comes with Windows Server 2016) - -If you need to be able to your AD FS service from outside your corporate network, you will also need an AD FS Reverse Proxy. An official Microsoft tutorial for setting up an AD FS Reverse Proxy is not available, but you can use this blog post from [Matrixpost](https://blog.matrixpost.net/set-up-active-directory-federation-services-ad-fs-5-0-adfs-reverse-proxy-part-2/) for additional guidance. - - -# Enablement -## Create the AD FS Application Group for Palette - -1. Open the AD FS Management console on your Windows Server and add a new Application Group for Palette: - -
- -![Add AD FS Application Group](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-application-group.png) - -
- -2. Provide a suitable name for the application group and select **Server Application** from the list of templates. Then click **Next**: - -
- -![Name Application Group](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-application-group.png) - -
- -3. The next screen displays the **Client Identifier** for this Application Group: - -![Get Client Identifier](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_get-client-identifier.png) - -
- -4. Copy the client identifier value and save it somewhere. You will need to enter this value into the Palette SSO configuration later. - - -5. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **SSO** and click **OIDC**. Click the button next to **Callback URL** to copy this value to the clipboard: - -![Copy Callback URL](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-callback-url.png) - -
- -6. Switch back to your AD FS Management console and paste the copied value into the **Redirect URI** field, then click **Add** to add it to the list: - -![Paste Redirect URI](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-redirect-uri.png) - -
- -7. Switch back to Palette in the web browser and click the button next to **Logout URL** to copy this value to the clipboard: - -![Copy Logout URL](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-logout-url.png) - -
- -8. Switch back to your AD FS Management console and paste the copied value into the **Redirect URI** field, then click **Add** to add it to the list: - -![Paste Logout URI](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-logout-uri.png) - -
- -9. These two redirect URIs are required for SSO to work with Palette. You can also add additional redirect URIs. The URIs in the table below are useful when you want to use AD FS for OIDC authentication into your Kubernetes clusters. - -| URL | Type of Access | -| --- | --- | -| `http://localhost:8000` | Using kubectl with the kube-login plugin from a workstation | -| `https://console.spectrocloud.com/v1/shelly/oidc/callback` | Using the web-based kubectl console | -| `https:///oauth/callback` | Using OIDC authentication into Kubernetes Dashboard | - -10. When you have completed entering redirect URIs, click **Next**. On the next page of the wizard, select **Generate a shared secret** and click **Copy to clipboard** to copy the secret value and save it somewhere. You will need to enter this value into the Palette SSO configuration later: - -![Copy Shared Secret](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-shared-secret.png) - -
- -11. Click **Next** and on the Summary screen, click **Next** again to complete the wizard. You need to add another application to the application group. Select the newly created application group and click **Properties**: - -![Open Application Group](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_open-oidc-app.png) - -
- -12. In the Properties screen, click **Add application...**. In the wizard that opens, select **Web API** and click **Next**: - -![Add Web API application](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-web-api.png) - -
- -13. In the **Identifier** field, add the following entries: -* The **Client Identifier** value you saved when creating the application group. -* The base URL of your Palette tenant. This is equal to the URL shown by your browser when logged into Palette minus the path. Example `https://johndoe-spectrocloud.console.spectrocloud.com`. - -
- -![Find Base URL](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_base-url.png) - -
- -![Add Web API Identifiers](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-identifiers.png) - -
- -14. Click **Next** when done. On the next screen, select a suitable policy for who can use this SSO and under what circumstances. If you're not sure which policy to choose, select **Permit everyone**, then click **Next**: - -
- -![Select Access Control Policy](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_select-policy.png) - -
- -15. On the next screen, by default only the **openid** scope is ticked. However, to include the user's groups in the OIDC claim, you need to also enable the **allatclaims** scope. If your AD FS server does not yet have an **allatclaims** scope in the list, click **New scope...** and type `allatclaims` in the Name field, then click **OK** to add it. Ensure both scopes are enabled and then click **Next**: - -![Enable Permitted Scopes](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_enable-scopes.png) - - -16. On the Summary screen, click **Next** to finish the wizard. You need to set the **Issuance Transform Rules** for the Web API application. Open the application again by double-clicking on the Web API entry or clicking **Edit**. - -![Re-open Web API Application](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_reopen-webapi-app.png) - -
- -17. Navigate to the **Issuance Transform Rules** tab and click **Add Rule**. - -![Add Issuance Transform Rule 1](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-1.png) - -
- -18. Select the **Send LDAP Attributes as Claims** template and click **Next**: - -![Send LDAP As Claims Rule](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_ldap-as-claims.png) - -
- -19. Name the rule `OpenID - LDAP Attribute Claims`. Select **Active Directory** as the Attribute store and add the following LDAP mappings: -* **E-Mail-Addresses** --> `email` -* **Given Name** --> `given_name` -* **Surname** --> `family_name` - -You can select the items on the left from the list. You will need to type the items on the right manually. Ensure you use all lowercase characters for the values on the right: - -![Set LDAP Claims](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-ldap-claims.png) - -
- -20. Click **Finish** to add the rule. Now click on **Add Rule...** again to add the second rule: - -![Add Issuance Transform Rule 2](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-2.png) - -
- -21. Select the **Send Group Membership as Claims** template and click **Next**: - -![Send Groups As Claims Rule](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_groups-as-claims.png) - -
- -22. In the next screen, define the group claim as desired. In the following example, a group in Active Directory is called `SpectroTeam - Admins`. The desired behavior is for anyone that is a member of that group, to be issued a `groups` claim with the value `Admins`. In Palette this user will automatically be mapped to a group with the same name, `Admins`. You can assign RBAC permissions to that group in Palette to give it the desired access. - -![Set Group Claim](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-group-claim.png) - -
- -23. Click **Finish** to add the rule. Click **OK** to save the changes to the Web API rule and click **OK** again to save the changes to the application group. - -24. Take note of your AD FS identifier, you will need this for Palette in the next step. Typically this is your AD FS name plus `/adfs`. You can also take the Federation Service identifier and remove `/services/trust` from that URL: - -![Note AD FS Name](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_note-adfs-name.png) - -
- -## Enable OIDC SSO in Palette - -25. Open a web browser and navigate to your [Palette](https://console.spectrocloud.com) subscription. - -Navigate to **Tenant Settings** --> **SSO** and click on **OIDC**. Enter the following information. - -| Parameter | Value | -|-------------------|--------------------------------------------------------------------| -| Issuer URL | Your AD FS issuer URL. Typically this is your AD FS name plus /adfs.| -| Client ID | The client identifier that you saved in step **4**. | -| Client Secret | The shared secret that you generated in step **8**. | -| Default Teams | Leave blank if you don't want users without group claims to be assigned to a default group. If you do, enter the desired default group name. If you use this option, be careful with how much access you give to the group. | -| Scopes | Set this to `openid` and `allatclaims`. | -| Email | Keep `email` as the default. | -| First Name | Keep `given_name` as the default. | -| Last Name | Keep `family_name` as the default. | -| Spectro Team | Keep `groups` as the default. | - -![Enable Palette OIDC SSO](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_configure-palette-oidc.png) - - -26. When all the information has been entered, click **Enable** to enable SSO. You will receive a message stating **OIDC configured successfully**. - - -## Create Teams in Palette - -The remaining step is to create teams in Palette for the group claims that you configured in AD FS, and give them the appropriate permissions. For this example, you will create the `Admins` team and give it **Tenant Admin** permissions. You can repeat this for any other team that you configured with group claims. - -27. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **Users & Teams** --> **Teams** tab, and click **+ Create Team**. - -![Create Palette Team](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_create-team.png) - -
- -28. Specify `Admins` in the **Team name** field. You don't need to set any members now, as this will happen automatically from the SSO. Click **Confirm** to create the team. - -![Name Palette Team](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-team.png) - -
- -29. The list of teams displays again. Select the newly created **Admins** team to review its details. To give this team administrative access to the entire tenant and all the projects in it, assign the **Tenant Admin** role. Select **Tenant Roles** and click **+ Add Tenant Role**: - -![Palette Tenant Roles](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_tenant-roles.png) - -
- -30. Click on **Tenant Admin** to enable the role. Click **Confirm** to add the role. - -![Add Tenant Role](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-tenant-role.png) - -
- -You will receive a message stating **Roles have been updated**. Repeat this procedure for any other teams, taking care to ensure they are given the appropriate permissions. - -31. Click the **X** next to **Team Details** in the top left corner to exit this screen. - -You have now successfully configured Palette SSO based on OIDC with Microsoft AD FS. - - -# Validate - -1. Log in to Palette through SSO as a user that is a member of the `SpectroTeam - Admins` group in Active Directory to verify that users are automatically added to the `Admins` group in Palette. - -If you're still logged into Palette with a non-SSO user, log out by selecting **Logout** in the **User Menu** at top right. - -![User Logout](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_user-logout.png) - -
- - -2. The Palette login screen now displays a **Sign in** button and no longer presents a username and password field. Below the **Sign In** button, there is an **SSO issues? --> Use your password** link. This link can be used to bypass SSO and log in with a local Palette account in case there is an issue with SSO and you need to access Palette without SSO. - -Click on the **Sign in** button to log in via SSO. - -![User SSO Login](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_palette-login.png) - -
- -3. If this is the first time you are logging in with SSO, you will be redirected to the Microsoft AD FS login page. Depending on your organization's SSO settings, this could be a simple login form or require MFA (Multi-Factor Authentication). - -Make sure you log in as a user that is a member of the `SpectroTeam - Admins` group in Active Directory. Once authenticated, you will automatically be redirected back to Palette and logged into Palette as that user. - -4. You are now automatically added to the `Admins` team in Palette. To verify, navigate to the left **Main Menu**, select **Tenant Settings** --> **Users & Teams** --> **Teams** tab. Click the **Admins** team and view the team members section. - -![Palette Team Members](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_team-members.png) - - -The user you logged in as has automatically been added to this team. - - -# Resources - -- [Microsoft AD FS](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/development/ad-fs-openid-connect-oauth-concepts) - -- [Microsoft AD FS Reverse Proxy](https://blog.matrixpost.net/set-up-active-directory-federation-services-ad-fs-5-0-adfs-reverse-proxy-part-2/) - -- [Palette User Management](/user-management) - -- [Palette SSO](/user-management/saml-sso) diff --git a/content/docs/18-knowledgebase/01-tutorials.md b/content/docs/18-knowledgebase/01-tutorials.md deleted file mode 100644 index 6e3aae9b4f..0000000000 --- a/content/docs/18-knowledgebase/01-tutorials.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Tutorials" -metaTitle: "Get started with some tutorials" -metaDescription: "KnowledgeBase with Tutorials" -icon: "laptop" -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Tutorials - -Learn what's possible with Spectro Cloud Palette. Use the following tutorials to help expand your knowledge and become proficient with Palette. Choose a topic to review the tutorials available. - -# Clusters - -- [Deploy a Cluster with Palette](/knowledgebase/tutorials/deploy-k8s-cluster) - - -# Packs - -- [Create and Deploy a Custom Add-On Pack](/knowledgebase/tutorials/deploy-pack) - - -# Palette Dev Engine (PDE) - -- [Deploy an Application using Palette Dev Engine](/knowledgebase/tutorials/deploy-app) - - - - -
diff --git a/content/docs/18-knowledgebase/01-tutorials/05-deploy-app.md b/content/docs/18-knowledgebase/01-tutorials/05-deploy-app.md deleted file mode 100644 index 6242bd6970..0000000000 --- a/content/docs/18-knowledgebase/01-tutorials/05-deploy-app.md +++ /dev/null @@ -1,1255 +0,0 @@ ---- -title: "Deploy an Application using Palette Dev Engine" -metaTitle: "Deploy an Application using Palette Dev Engine" -metaDescription: "Learn how to deploy applications to a Kubernetes cluster without the traditional overhead accompanied by Kubernetes. Palette’s App Mode reduces the deployment time and complexity when deploying applications to Kubernetes. Learn how to get started with Palette’s App Mode in this tutorial. Get started with the free tier of Palette App Mode" -icon: "" -category: ["tutorial"] -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import InfoBox from 'shared/components/InfoBox'; -import WarningBox from 'shared/components/WarningBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Deploy an Application using Palette Dev Engine - -Palette’s mission is to reduce the challenges you, as a user, face when interacting with Kubernetes. Whether you are a system administrator or an application developer, Kubernetes can introduce overhead that slows down the development process. One of Palette’s core components, *Dev Engine*, focuses on reducing the application development time by enabling builders to deploy applications to Kubernetes with minimal friction. - -This tutorial will teach you how to deploy single and multiple applications to Kubernetes through Palette’s Dev Engine experience. You will learn about *App Mode*, *App Profiles*, and *Palette Virtual Clusters* and understand how they enable you to deploy applications to Kubernetes quickly with minimal overhead. - -# Prerequisites - -To complete this tutorial, you will need the following items. - -- A Spectro Cloud account -- Basic knowledge about containers. - -If you select the Terraform workflow, you will need the following software installed. -- Terraform v1.3.6 or greater -- Git v2.30.0 or greater - - -There are no expenses associated with this tutorial as everything falls under the Palette Free Tier. - - -# Architecture - -The tutorial includes two scenarios, and for each scenario, you will deploy a separate Kubernetes environment. The following diagram illustrates the different layers that will power the tutorial environment. - -![Architecture diagram depicting two virtual clusters](/tutorials/deploy-app/devx_apps_deploy-apps_architecture-diagram.png) - -The top layer is Palette, which is the product platform. Palette can be used in two modes: app mode or cluster mode. Each mode is intended for different use cases and personas, but for this tutorial, you will use app mode. For an in-depth explanation of each mode’s differences, check out the [App Mode and Cluster Mode](/introduction/palette-modes) documentation. - -# Deploy The Environment - -The following steps will guide you through deploying the two scenarios. You will start with the single application scenario to build up your knowledge before deploying the multiple applications scenario. - -From Palette, you will deploy two Palette Virtual Clusters. Palette Virtual Clusters will be referred to as virtual clusters for the rest of the tutorial. Each virtual cluster will be hosted on a host cluster group managed by us, Spectro Cloud, called *beehive*. You can deploy up to two virtual clusters in the beehive group for free. Each scenario’s virtual cluster will sit on the beehive host cluster group. - -
- - - - -Virtual clusters are standalone Kubernetes environments that sit on top of what you would consider a traditional Kubernetes cluster or host cluster. Palette Virtual Clusters are Kubernetes clusters that run as nested clusters within an existing host cluster and share the host cluster resources, such as CPU, memory, and storage. Palette Virtual Clusters use k3s, a highly available, certified Kubernetes distribution designed for production workloads. Palette Virtual Clusters are also powered by vCluster. - - - -You can complete this tutorial by using the Palette console, simulating a manual workflow. Or you may leverage infrastructure as code and complete the tutorial using Terraform. - -
- - - - - - - - -## UI Workflow. - - - -Start by logging in to Palette. From the landing page, click on the user **drop-down Menu** and click on **App Mode**. - - - -![Image with an arrow pointing to the user drop-down Menu](/tutorials/deploy-app/devx_apps_deploy-apps_toggle-app-mode.png) - - - - -From the app mode landing page, navigate to the left **Main Menu** and click on **Virtual Clusters**. Next, click on the button **New Virtual Cluster**. - - - -![View of the virtual cluster list](/tutorials/deploy-app/devx_apps_deploy-apps_virtual-cluster-list.png) - - - -In the following screen, you will be prompted for the cluster group, virtual cluster name, and the cluster size in terms of CPU, memory, and storage. Select beehive for the cluster group, name the cluster `cluster-1`, and allocate 4 CPU, 4 GiB memory, and 2 GiB of storage. Click on **Deploy Virtual Cluster** after you have filled out all the required information. - - - -Palette’s Dev Engine allows you to deploy up to two virtual clusters into the Beehive cluster group. Each virtual cluster requires a minimum of 4 CPU, 4 GiB memory, and 2 GiB storage. When using the Beehive cluster, you can allocate a maximum of 12 CPU, 16 Gib memory, and 20 GiB of storage. Check out the [Palette Dev Engine and Quotas](/devx/manage-dev-engine/resource-quota) documentation to learn more about limits. - - - -It will take a few minutes for the virtual cluster to deploy. In the meantime, navigate to the left **Main Menu** and click on **App Profiles**. - - - - -![The App Profile page with arrows guiding](/tutorials/deploy-app/devx_apps_deploy-apps_app-profiles.png) - - - - -App Profiles are templates that contain all the configurations and settings required to deploy applications to virtual clusters. App Profiles provide a way to drive consistency across virtual clusters as you can re-use app profiles and deploy them to different virtual clusters. You can think of app profiles as declarative templates that inform the Kubernetes cluster of the desired application or set of applications. - - - -Click on the **New App Profile** button to start creating your first app profile. Give the app profile the name `hello-universe-ui` and add the tag `scenario-1`. Click on **Next**. The following screen is the service type selection page. You have the option to deploy applications through containers, Helm, or Manifests. You can also consume services such as databases and more. Click on **Container Deployment**. - - - -Name the container `ui`, select a public registry, and provide the image URL `ghcr.io/spectrocloud/hello-universe:1.0.10`. Change the network access to **Public** and add the port `8080`. - - - -![App Profile container creation page with details](/tutorials/deploy-app/devx_apps_deploy-apps_app-profile-creation.png) - - - - -Click on **Review** once you have filled out the provided information. On the next page, click on the **Deploy New App** button. - - - -It’s time to deploy your application to a virtual cluster. Name the application `single-scenario`. For the **App profile** input field, click on the button to the right of the input field to get a list of all your available app profiles. Select the **hello-universe-ui profile** and click on **Confirm**. - - - -Next, click the radio button **Deploy in An Existing Palette Virtual Cluster**. Select **cluster-1** and click on **Create App** to deploy the app profile onto the virtual cluster. - - - -
- - - - - - - -If no clusters are displayed, then **cluster-1** is not yet available. Wait a few more moments and return to the above steps. You can refresh the page, but you must fill out all the required input fields. - - - - - - - -The app profile deployment takes a few moments to finish. You can review the application's deployment progress by navigating to the left **Main Menu** and selecting **Virtual Clusters**. Click on **cluster-1** to view its details page. You can review cluster information, log events, access a remote shell session in the cluster, and more from the cluster details page. - - - -![Cluster details view displaying exposed services](/tutorials/deploy-app/devx_apps_deploy-apps_cluster-details-view.png) - - - -When the application is deployed and ready for use, the **Services** row on the details page will automatically be updated by Palette with the app's public-facing URL. Click on the **:8080** link to view the application. - - - -
- - - - - - - - - -It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - - - - -![Hello Universe landing page displaying global clicks](/tutorials/deploy-app/devx_apps_deploy-apps_hello-universe.png) - - - -Welcome to [Hello Universe](https://github.com/spectrocloud/hello-universe), a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the global counter and for a fun image change. - - - -You have deployed your first application to Palette. Your first application is a single container application with no upstream dependencies. In a production environment, you often deploy applications that consume other services and require connectivity with other resources. The next scenario expands on the single application scenario by adding an API server and Postgres database to simulate a common application architecture encountered in a production environment. - - - -## Deploy Multiple Applications - - - -Create another virtual cluster for the multi-application scenario. From the app mode landing page, navigate to the left **Main Menu** and click on **Virtual Clusters**. Next, click on the **New Virtual Cluster** button. - - - -Add the following details. Select beehive for the cluster group, name the cluster **cluster-2**, add the tag **scenario-2**, and allocate 8 CPU, 12 GiB memory, and 12 GiB of storage. Click on **Deploy Virtual Cluster** after you have filled out all the required information. - - - -It will take a few minutes for the new virtual cluster to deploy. In the meantime, go ahead and navigate to the left **Main Menu** and click on **App Profiles**. - - - -### Postgres - - - -Click on the **New App Profile** button to create your second app profile. Give the app profile the name `hello-universe-complete` and add the tag `scenario-2`. Click on **Next**. This application profile will contain three different applications, and you will create a service configuration for each. The three layers or tiers will together make up the entire application deployment. The order in which you create each layer plays an important role, as it dictates the deployment order. For this scenario, you will deploy the database, the API, and the UI. To create the first layer, select the database service Postgres. - - - - -In the next screen, assign the following values to the Postgres database. - - - -- Name: `postgres-db` - -- Username: `pguser` - -- Database Name: `counter` - -- Database Volume Size: `2` - -- Version: `14` - - - -![Postgres service creation page](/tutorials/deploy-app/devx_apps_deploy-apps_postgres-service-create.png) - - - -Take note of the **Output Variables** section. The Postgres service exposes several output variables to help other applications connect with the database. In the next section, you will use these output variables and other output variables that Palette exposes for each service. You can learn more about output variables by reviewing the app profile [output variables](/devx/app-profile/app-profile-macros) documentation. - - - -Next, navigate to the top left side of the wizard screen and click on the **Actions** button **+**. Go ahead and select **Container Deployment**. - - - -### API - - - -The API is available as a container image. To deploy the API successfully, you need to provide the API server with information about the database such as hostname, database user, database name, and password. The required information can be retrieved using Palette's global output variables and the output variables the database service exposes. - - - -Provide the container service with the following information: - - - -- Container Name: `api` - -- Registry: Public - -- Image: `ghcr.io/spectrocloud/hello-universe-api:1.0.8` - -- Network Access: Public - -- Ports: `3000` - - - -Assign the following environment variables to the API service: - - - -| Parameter | Value | -|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `DB_NAME` | `counter` | -| `DB_HOST` | `{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}` | -| `DB_PASSWORD` | `{{.spectro.app.$appDeploymentName.postgres-db.PASSWORD}}` | -| `DB_INIT` | `true` | -| `DB_USER` | `{{.spectro.app.$appDeploymentName.postgres-db.USERNAME}}` | -| `DB_ENCRYPTION` | `require` | -| `AUTHORIZATION` | `true` | - - - - - -You can learn more about each environment variable's purpose by reviewing the API server's [documentation](https://github.com/spectrocloud/hello-universe-api#environment-variables). One variable that you should understand in greater detail is the `DB_HOST.` The value of this environment variable is constructed using the output variables the Postgres service exposed. The `{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}` variable contains the Kubernetes DNS value of the Postgres service container. - -
- - - -To learn more about connecting different service layers, refer to the [Service Connectivity](/devx/app-profile/services/connectivity) resource. - - - - - -A virtual cluster is a Kubernetes environment, and because it’s a Kubernetes environment, you can use the [Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) record created for each service and pod. You will have another opportunity to practice this concept when you deploy the UI. - - - -When you have filled out all the required information, navigate to the top left side of the wizard screen and click on the **Actions** button **+**. Select the **Container Deployment** to add the final service layer, the UI. - - - -### UI - - - -This time the UI will point to the API server that you manage. The API server has authentication enabled, so to ensure all API requests are accepted you will provide the UI with the anonymous token. - - - -![A diagram of the reverse proxy architecture](/tutorials/deploy-app/devx_apps_deploys-apps_reverse-proxy-diagram.png) - - - -Provide the UI container with the following information. - -- Container Name: `ui` - -- Registry: Public - -- Image: `ghcr.io/spectrocloud/hello-universe:1.0.10` - -- Network Access: Public - -- Ports: `8080` - - - -Assign the following environment variables to the UI service: - - - -| Parameter | Value | -|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `API_URI` | `http://{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}:3000` | -| `TOKEN` | `931A3B02-8DCC-543F-A1B2-69423D1A0B94` | - - - -If you want to explore the UI service's environment variables in greater detail, you can review the UI [documentation](https://github.com/spectrocloud/hello-universe). The `API_URI` contains the address of the application load balancer that will be deployed for the API service. - -The output variable `{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}` is used to retrieve the load balancer URL value. - - -Click on the **Review** button at the bottom of the screen to finalize the app profile. Click on **Deploy New App** in the following screen to deploy the new app profile to cluster-2. - - - -Name the app `multiple-app-scenario`, select the app profile **hello-universe-complete**, pick version **1.0.0** and toggle the radio button **Deploy In An Existing Palette Virtual Cluster**. Select **cluster-2** and click on **Create App**. - - -
- - - - - - - -If cluster-2 is not displayed. Wait a few more moments and return to the above steps. You can refresh the page but you must fill out all the required input fields. - - - - - - - -![App deployment cluster-2](/tutorials/deploy-app/devx_app_deploy-apps_cluster-2-deploy-app.png) - - - - -The app profile deployment takes a few moments to finish. You can review the application's deployment progress by navigating to the left **Main Menu** and selecting **Virtual Clusters**. Click on **cluster-2** to view its details page. - -Once the app is successfully deployed, the cluster details page will expose the public-facing URLs of the services. - - - -![Cluster 2's details page](/tutorials/deploy-app/devx_apps_deploy-apps_cluster-2-details-page.png) - - - -Click on the UI’s service URL for port **8080** to access the Hello Universe application in a three-tier configuration. - - - - - -![View of the self-hosted version of Hello Universe](/tutorials/deploy-app/devx_apps_deploy-app_self-hosted-hello-universe.png) - - - - -The global counter is no longer available. Instead, you have a counter that starts at zero. Each time you click on the center image, the counter is incremented and stored in the Postgres database along with metadata. Also, remember that the reverse proxy injects the Bearer token value in each request sent to the API. - - - -## Cleanup - - - -To remove all resources created in this tutorial, begin by navigating to the left **Main Menu** and select **Apps**. For each application, click on the **three-dots Menu** to expand the options menu and click on the **Delete** button. Repeat this process for each application. - - - -![Apps view with an arrow pointing towards the delete button](/tutorials/deploy-app/devx_apps_deploy-apps_delete-apps-view.png) - - - -Next, in the left **Main Menu**, click on the **Cluster** link to access the clusters page. - -Click on **cluster-1** to access its details page. Click on **Settings** from the details page to expand the settings menu. Click on **Delete** to delete the cluster. You will be asked to enter the cluster name to confirm the delete action. Type the cluster name to proceed with the delete step. Repeat this process for cluster-2. - - - -![Delete a cluster view with arrow](/tutorials/deploy-app/devx_apps_deploy-apps_delete-cluster-view.png) - - - - - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for **Force Delete**. To trigger a force delete, navigate to the respective cluster’s details page and click on **Settings**. Click on the **Force Delete Cluster** to delete the cluster. Palette will automatically remove clusters stuck in the cluster deletion phase for over 24 hours. - - - - -
- - - - -## Terraform Workflow - -The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider enables you to create and manage Palette resources in a codified manner by leveraging Infrastructure as Code (IaC). There are many reasons why you would want to utilize IaC. A few reasons worth highlighting are: the ability to automate infrastructure, improve collaboration related to infrastructure changes, self-document infrastructure through codification, and track all infrastructure in a single source of truth. If you need to become more familiar with Terraform, check out the [Why Terraform](https://developer.hashicorp.com/terraform/intro) explanation from HashiCorp. - -
- - - -As you go through the Terraform workflow, be aware that high-level concepts from Palette will not be discussed in-depth to optimize the reader experience and focus more on the Terraform concepts that apply to Palette. To better understand the mentioned Palette concepts, review the UI workflow where the concepts are explained in greater detail. - - - - -Open a terminal window to begin the tutorial and download the tutorial code from GitHub. - -```shell -git@github.com:spectrocloud/tutorials.git -``` - -Change directory to the tutorial folder. - -```shell -cd tutorials/ -``` - -Check out the following git tag. - -```shell -git checkout v1.0.6 -``` - -Change directory to the tutorial code. - -```shell -cd terraform/hello-universe-tf/ -``` - -Before you can get started with the Terraform code, you need a Spectro Cloud API key. - -### API Key - -To create an API key, log in to Palette, and click on the user **User Menu** and select **My API Keys**. - -![Image that points to the user drop-down Menu and points to the API key link](/tutorials/deploy-app/devx_apps_deploy-app_create-api-key.png) - -Next, click on **Add New API Key**. Fill out the required input field, **API Key Name**, and the **Expiration Date**. Click on **Confirm** to create the API key. Copy the key value to your clipboard, as you will use it shortly. - - -### Initialize Terraform - -The tutorial folder contains several Terraform files that you should review and explore. Each file is named after the respective type of Palette resource it supports. Use the following list to gain a high-level overview of the files. - -
- -- **provider.tf** - the provider configuration and version of the provider. -- **inputs.tf** - contains all the Terraform variables and the default values used in the tutorial. -- **outputs.tf** - contains the output variables that are used to expose information. -- **data.tf** - all the data resources that are used to dynamically retrieve data from Palette. -- **virtual-clusters.tf** - the code for the virtual clusters that will be deployed in Palette. -- **application-profiles.tf** - contains the configurations that make up all the app profiles. -- **application.tf** - the configuration that creates a Spectro Cloud app and deploys the app into a virtual cluster. - -The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider requires credentials to interact with the Palette API. Export the API key as an environment variable so that the Spectro Cloud provider can authenticate with the Palette API. - -```shell -export SPECTROCLOUD_APIKEY=YourAPIKeyHere -``` - -Next, initialize the Terraform provider by issuing the following command. - -```shell -terraform init -``` - -``` -Initializing the backend... - -Initializing provider plugins... - -Terraform has been successfully initialized! - -You may now begin working with Terraform. Try running "terraform plan" to see -any changes that are required for your infrastructure. All Terraform commands -should now work. - -If you ever set or change modules or backend configuration for Terraform, -rerun this command to reinitialize your working directory. If you forget, other -commands will detect it and remind you to do so if necessary. -``` - -The `init` command downloads all the required plugins and providers specified in **provider.tf** file. In the provider configuration, the scope or context of Palette is set. The provider is configured for the `Default` project, but you can change this value to point to any other projects you may have in Palette. - -
- -```tf -terraform { - required_providers { - spectrocloud = { - version = ">= 0.11.1" - source = "spectrocloud/spectrocloud" - } - } -} - -provider "spectrocloud" { - project_name = "Default" -} -``` - -To deploy the first scenario, a single application container, you must first create a configuration for the virtual cluster. Look at the virtual cluster resources in **virtual-clusters.tf**, and check out the "cluster-1" resource. The resource specifies the cluster name, the cluster group id, the resource limits, and the tags that will apply to the cluster. - -
- -```tf -resource "spectrocloud_virtual_cluster" "cluster-1" { - name = var.scenario-one-cluster-name - cluster_group_uid = data.spectrocloud_cluster_group.beehive.id - - resources { - max_cpu = 4 - max_mem_in_mb = 4096 - min_cpu = 0 - min_mem_in_mb = 0 - max_storage_in_gb = "2" - min_storage_in_gb = "0" - } - - tags = concat(var.tags, ["scenario-1"]) - - timeouts { - create = "15m" - delete = "15m" - } -} - -``` - -The cluster group id is retrieved from the data resource `spectrocloud_cluster_group.beehive`. The data resource will query the Palette API and retrieve information about the specified cluster group, which is the *beehive* cluster group made available for all Palette users. This resource will create a new virtual cluster that is hosted in the *beehive* cluster group. - -
- -```tf -data "spectrocloud_cluster_group" "beehive" { - name = var.cluster-group-name - context = "system" -} -``` - -Next, take a look at the **application-profiles.tf** file. The resource `spectrocloud_application_profile.hello-universe-ui` is the resource responsible for creating the app profile for the first scenario. There are several points of interest in this resource that you should be familiar with. Focus on these five key points: - -
- -1. The pack object represents a single tier or layer in the app profile. Inside the pack object, you define all the attributes that make up the specific layer of the app profile. - - -2. The type of app layer. This application is hosted on a container image. Therefore a container pack is specified. Instead of hard coding the value, the data resource `data.spectrocloud_pack_simple.container_pack` is specified. - - -3. A pack requires a registry id. To create the app profile, Terraform needs to know what registry is hosting the pack. For containers, you can use the `Public Repo` hosting most of the Palette packs. This time the data resource `data.spectrocloud_registry.public_registry` is specified to avoid hardcoding values. - - -4. The attribute `source_app_tier` is used to specify the unique id of the pack. All packs are assigned a unique id, including different versions of a pack. To ensure the correct pack is selected, the data resource `data.spectrocloud_pack_simple.container_pack` is used. - - -5. The `values` attribute is used to specify the properties of the specific service. In this case, the properties of the container such as the image name, ports, and service type, are specified. These properties can be provided as an extended string using the [Terraform Heredoc strings](https://developer.hashicorp.com/terraform/language/expressions/strings#heredoc-strings), or you can specify these values as a stringified JSON object. - - - - -```tf -resource "spectrocloud_application_profile" "hello-universe-ui" { - name = "hello-universe-ui" - description = "Hello Universe as a single UI instance" - version = "1.0.0" - pack { - name = "ui" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT - pack: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" - postReadinessHooks: - outputParameters: - - name: CONTAINER_NAMESPACE - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.namespace - - name: CONTAINER_SVC - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.annotations["spectrocloud.com/service-fqdn"] - - name: CONTAINER_SVC_EXTERNALHOSTNAME - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].hostname - conditional: true - - name: CONTAINER_SVC_EXTERNALIP - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].ip - conditional: true - - name: CONTAINER_SVC_PORT - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: spec.ports[0].port - containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.single-container-image} - access: public - ports: - - "8080" - serviceType: load balancer - EOT - } - tags = concat(var.tags, ["scenario-1"]) -} -``` - - - - -A tip for gathering the required values to provide the `values` attribute is to visit the Palette console and create the app profile through the UI. During the app profile creation process, click on the API button to display the API payload. Review the payload's `values` attribute to find all of the properties of the service. You can copy the entire string and pass it to the resource `spectrocloud_application_profile` as an input for the `values` attribute. - - -![UI's ability to display the API object](/tutorials/deploy-app/devx_apps_deploy-apps_ui-api-display.png) - - -The last Terraform resource to review before deploying the application is located in the **application.tf** file. The resource `spectrocloud_application.hello-universe-ui` is what creates the *app*. In Palette, an app combines a virtual cluster and an app profile. When you deploy an app profile into a virtual cluster, you create an app. This resource points to the app profile `spectrocloud_application_profile.hello-universe-ui` and the cluster resource `spectrocloud_virtual_cluster.cluster-1`. The two resources are required to create an app. - -
- - - - - -```tf -resource "spectrocloud_application" "scenario-1" { - name = "single-scenario" - application_profile_uid = spectrocloud_application_profile.hello-universe-ui.id - - config { - cluster_name = spectrocloud_virtual_cluster.cluster-1.name - cluster_uid = spectrocloud_virtual_cluster.cluster-1.id - } - tags = concat(var.tags, ["scenario-1"]) -} -``` - - - -You can preview the resources Terraform will create by issuing the following command. - -```shell -terraform plan -``` - -``` -// Output condensed for readability -Plan: 3 to add, 0 to change, 0 to destroy. - -``` - -The output displays the resources Terraform will create in an actual implementation. If you review the output, you will find the three resources previously discussed in great detail. - -Go ahead and deploy the application by using the `terraform apply` command. - -```shell -terraform apply -auto-approve -``` - -``` -// Output condensed for readability -Apply complete! Resources: 3 added, 0 changed, 0 destroyed. -``` - -Log in to [Palette](https://console.spectrocloud.com), navigate to the left **Main Menu**, and select **Apps**. Click on the **scenario-1** row, which takes you to the application’s overview page. Once you are on the scenario-1 overview page, click on the exposed URL for the service. A hyperlink for port 8080 is available. - - -![scenario-1 overview page with an arrow pointing to the URL](/tutorials/deploy-app/devx_app_deploy-apps_scenario-1-overview.png) - -
- - - - -It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - -Welcome to Hello Universe, a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the global counter and for a fun image change. - - -![Hello Universe landing page displaying global clicks](/tutorials/deploy-app/devx_apps_deploy-apps_hello-universe.png) - - -You have deployed your first app profile to Palette. Your first application is a single container application with no upstream dependencies. In a production environment, you often deploy applications that consume other services and require connectivity with other resources. The following scenario expands on the single application scenario by adding an API server and Postgres database to simulate a common application architecture encountered in a production environment. - - -## Deploy Multiple Applications - - -The second scenario contains two additional microservices, an API, and a Postgres database. This time, instead of using a the global API for storing clicks, you will instead deploy your own API server and Postgres database. The following diagram illustrates the network connectivity path and behavior discussed. - - -![A diagram of the three-tier architecture where the load balancer forwards all requests to the UI container OR the API container](/tutorials/deploy-app/devx_apps_deploys-apps_reverse-proxy-diagram.png) - -To deploy the second scenario, you will again deploy the same three resource types previously discussed but another instance of them. - -- `spectrocloud_virtual_cluster` - `cluster-2` - this resource will create the second virtual cluster. - - -- `spectrocloud_application_profile` - `hello-universe-complete` - the application profile that will contain the three different services, database, API, and UI. - - -- `spectrocloud_application` - `scenario-2` - the application that will be deployed into cluster-2 that uses the `spectrocloud_application_profile.hello-universe-complete` app profile. - - -You can review all the resources for the second scenario in the respective Terraform files. You can find the second scenario code after the comment block in all of the files that have resources specific to the second scenario. - -```tf -########################################## -# Scenario 2: Multiple Applications -########################################## -``` - - -From a Terraform perspective, there are no significant differences in the authoring experience. The main difference in the second scenario lies in the application profile resource `spectrocloud_application_profile.hello-universe-complete`. The other difference is that the virtual cluster you will deploy in the second scenario, cluster-2, is much larger than cluster-1. - -You can add multiple services to an app profile, but you must add a `pack {}` block for each service in the `spectrocloud_application_profile` resource. Take a close look at the `spectrocloud_application_profile.hello-universe-complete` resource below. - -
- -``` -resource "spectrocloud_application_profile" "hello-universe-complete" { - count = var.enable-second-scenario == true ? 1 : 0 - name = "hello-universe-complete" - description = "Hello Universe as a three-tier application" - version = "1.0.0" - pack { - name = "postgres-db" - type = data.spectrocloud_pack_simple.postgres_service.type - source_app_tier = data.spectrocloud_pack_simple.postgres_service.id - properties = { - "dbUserName" = var.database-user - "databaseName" = var.database-name - "databaseVolumeSize" = "8" - "version" = var.database-version - } - } - pack { - name = "api" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT -pack: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" -postReadinessHooks: - outputParameters: - - name: CONTAINER_NAMESPACE - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.namespace - - name: CONTAINER_SVC - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.annotations["spectrocloud.com/service-fqdn"] - - name: CONTAINER_SVC_EXTERNALHOSTNAME - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].hostname - conditional: true - - name: CONTAINER_SVC_EXTERNALIP - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].ip - conditional: true - - name: CONTAINER_SVC_PORT - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: spec.ports[0].port -containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.multiple_container_images["api"]} - access: public - ports: - - "3000" - serviceType: load balancer - env: - - name: DB_HOST - value: "{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}" - - name: DB_USER - value: "{{.spectro.app.$appDeploymentName.postgres-db.USERNAME}}" - - name: DB_PASSWORD - value: "{{.spectro.app.$appDeploymentName.postgres-db.PASSWORD}}" - - name: DB_NAME - value: counter - - name: DB_INIT - value: "true" - - name: DB_ENCRYPTION - value: "${var.database-ssl-mode}" - - name: AUTHORIZATION - value: "true" - EOT - } - pack { - name = "ui" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT - pack: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" - postReadinessHooks: - outputParameters: - - name: CONTAINER_NAMESPACE - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.namespace - - name: CONTAINER_SVC - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: metadata.annotations["spectrocloud.com/service-fqdn"] - - name: CONTAINER_SVC_EXTERNALHOSTNAME - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].hostname - conditional: true - - name: CONTAINER_SVC_EXTERNALIP - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: status.load balancer.ingress[0].ip - conditional: true - - name: CONTAINER_SVC_PORT - type: lookupSecret - spec: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" - ownerReference: - apiVersion: v1 - kind: Service - name: "{{.spectro.system.appdeployment.tiername}}-svc" - keyToCheck: spec.ports[0].port - containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.multiple_container_images["ui"]} - access: public - ports: - - "8080" - env: - - name: "API_URI" - value: "http://{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}:3000" - - name: "TOKEN" - value: "${var.token}" - serviceType: load balancer - EOT - } - tags = concat(var.tags, ["scenario-2"]) -} -``` - -Each service has its own `pack {}` and a set of unique properties and values. - -The database service block uses a different data resource, `data.spectrocloud_pack_simple.postgres_service`, to find the Postgres service. If you review the data resource, you will find a different type, `operator-instance`. The Postgres service uses a Postgres [operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) to manage the database inside the virtual cluster. - -
- -``` -data "spectrocloud_pack_simple" "postgres_service" { - name = "postgresql-operator" - type = "operator-instance" - version = "1.8.2" - registry_uid = data.spectrocloud_registry.public_registry.id -} -``` - -Inside the `pack {}` block, the database services uses the `properties` attribute instead of the `values` attribute. The `properties` values provided are the same properties you must fill out when creating the database service through the UI workflow. - -
- -``` - pack { - name = "postgres-db" - type = data.spectrocloud_pack_simple.postgres_service.type - source_app_tier = data.spectrocloud_pack_simple.postgres_service.id - properties = { - "dbUserName" = var.database-user - "databaseName" = var.database-name - "databaseVolumeSize" = "8" - "version" = var.database-version - } - } -``` - -If you go further down the app profile stack, you will find the `pack {}` object for the API. A good part of the content provided to the `values` attribute will be removed in the following code snippet to improve readability. Take a closer look at the `env` block inside the `containerService` section. The API server requires a set of environment variables to start properly, such as the database hostname, user, password, and more. The Postgres service lower in the app profile stack exposes output variables you can use to provide information to other services higher up in the app profile stack. - -The `env` section uses the output variables exposed by the Postgres service. Other environment variables specified will be populated during Terraform runtime because they reference Terraform variables. Palette will populate the environment variables referencing a Palette output variable at runtime inside the virtual cluster. - -
- -``` -pack { - name = "api" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT -pack: - namespace: "{{.spectro.system.appdeployment.tiername}}-ns" - releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" -postReadinessHooks: - outputParameters: - #.... - #... -containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.multiple_container_images["api"]} - access: public - ports: - - "3000" - serviceType: load balancer - env: - - name: DB_HOST - value: "{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}" - - name: DB_USER - value: "{{.spectro.app.$appDeploymentName.postgres-db.USERNAME}}" - - name: DB_PASSWORD - value: "{{.spectro.app.$appDeploymentName.postgres-db.PASSWORD}}" - - name: DB_NAME - value: counter - - name: DB_INIT - value: "true" - - name: DB_ENCRYPTION - value: "${var.database-ssl-mode}" - - name: AUTHORIZATION - value: "true" - EOT - } -``` - -The last `pack {}` block in the app profile resource `spectrocloud_application_profile.hello-universe-complete` is for the UI. Like the API service, environment variables are used to initialize the UI and the reverse proxy. The UI service requires the URL of the API service and the URL of the public-facing load balancer. Palette output variables are used to populate these two environment variables. A Terraform variable will populate the authentication token required for all API requests. - -
- -``` -pack { - name = "ui" - type = data.spectrocloud_pack_simple.container_pack.type - registry_uid = data.spectrocloud_registry.public_registry.id - source_app_tier = data.spectrocloud_pack_simple.container_pack.id - values = <<-EOT - # .... - # .... - containerService: - serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" - registryUrl: "" - image: ${var.multiple_container_images["ui"]} - access: public - ports: - - "8080" - env: - - name: "API_URI" - value: "http://{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}:3000" - - name: "TOKEN" - value: "${var.token}" - serviceType: load balancer - EOT - } - -``` - - - - - -All container services expose their service address, Kubernetes hostname, and the exposed service ports as output variables. -You will use output variables frequently when creating app profiles in the future. You can learn more about connecting services by referring to the [Service Connectivity](/devx/app-profile/services/connectivity) documentation. - - - - -Open the **inputs.tf** file and set the `variable enable-second-scenario"` default value to `true`. - -
- -```terraform -variable "enable-second-scenario" { - type = bool - description = "Whether to enable the second scenario" - default = true -} -``` - -Next, issue the command `terraform apply` to deploy the second scenario. Notice how the `-var` flag is included with the token value in the command. - -
- -```shell -terraform apply -var="token=931A3B02-8DCC-543F-A1B2-69423D1A0B94" -auto-approve -``` - -``` -// Output condensed for readability -Apply complete! Resources: 3 added, 0 changed, 0 destroyed. -``` - -Log in to [Palette](https://console.spectrocloud.com) and navigate to the left **Main Menu**, click on **Apps**. Select the **scenario-2** row. When you are on the scenario-2 overview page, click on the exposed URL for the service. A hyperlink for port 8080 and port 3000 is available. - -![A view of the scenario-2 overview page](/tutorials/deploy-app/devx_apps_deploy_scenario-2-overview.png) - -Click on the UI’s service URL for port **8080** to access the Hello Universe application in a three-tier configuration. - -
- - - - -It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - -![View of the self-hosted hello universe app](/tutorials/deploy-app/devx_apps_deploy-app_self-hosted-hello-universe.png) - -The global counter is no longer available. Instead, you have a counter that starts at zero. Each time you click on the center image, the counter is incremented and stored in the Postgres database along with metadata. - - -## Cleanup - -To remove all resources created in this tutorial, issue the `terraform destroy` command. - -
- -```shell -terraform destroy -var="token=931A3B02-8DCC-543F-A1B2-69423D1A0B94" -auto-approve -``` - -``` -Destroy complete! Resources: 6 destroyed. -``` - -
- - - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for **Force Delete**. To trigger a force delete, navigate to the respective cluster’s details page and click on **Settings**. Click on the **Force Delete Cluster** to delete the cluster. Palette will automatically remove clusters stuck in the cluster deletion phase for over 24 hours. - - - -
- -
- -
- - -# Wrap-Up - -In this tutorial, you learned about Palette’s Dev Engine and App Mode. You deployed two virtual clusters, each containing a different architecture and configuration of the Hello Universe application. Palette’s Dev Engine enables developers to quickly deploy applications into a Kubernetes environment without requiring Kubernetes knowledge. In a matter of minutes, you deployed a new Kubernetes cluster and all its applications without having to write Kubernetes configuration files. - -To learn more about Palette Dev Engine and its capabilities, check out the references resource below. -
- -- [Palette Modes](/introduction/palette-modes) -- [Spectro Cloud Terraform Provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) -- [App Profiles](/devx/app-profile) -- [App Services](/devx/app-profile/services) -- [Palette Virtual Clusters](/devx/palette-virtual-clusters) -- [Hello Universe GitHub repository](https://github.com/spectrocloud/hello-universe) diff --git a/content/docs/18-knowledgebase/01-tutorials/4.5-deploy-pack.md b/content/docs/18-knowledgebase/01-tutorials/4.5-deploy-pack.md deleted file mode 100644 index 3995244b7e..0000000000 --- a/content/docs/18-knowledgebase/01-tutorials/4.5-deploy-pack.md +++ /dev/null @@ -1,938 +0,0 @@ ---- -title: 'Deploy an Add-On Pack' -metaTitle: 'Deploy an Add-On Pack' -metaDescription: 'How to create and deploy an add-on pack using the manifest files or Helm charts in Spectro Cloud.' -icon: '' -hideToC: true -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - - -# Create and Deploy a Custom Add-On Pack -Custom add-on packs allow you to deploy Kubernetes applications in clusters and reuse them in multiple deployments. This ensures uniformity across your clusters. The primary use cases for creating custom packs are: - -- Aggregated configuration and application dependencies simplify deployment and consumption. - -- Open-source contributors can add new Kubernetes applications to a custom add-on pack for the community. - -- Enterprises can add proprietary Kubernetes applications to a custom add-on pack. - -In this tutorial, you will create a custom add-on pack to package a sample Kubernetes application, [Hello Universe](https://github.com/spectrocloud/hello-universe#hello-universe), and deploy that application to a cluster. You will learn to create the pack in two ways, using manifest files and Helm charts. - -After defining the custom pack, you will set up a registry server, publish the pack to that registry, and configure the registry server in Palette. Lastly, you will create a cluster profile that contains your custom pack and apply the profile to a cluster using either Palette or Terraform. - - -# Prerequisites -To complete the tutorial, you will need the following items: -
- -1. A Spectro Cloud account. Visit the [sign-up](https://www.spectrocloud.com/free-tier) page to create an account. - - -2. Tenant admin access to Palette for the purpose of adding a new registry server. - - -3. A cloud account, such as AWS, Azure, or GCP, added to your Palette project settings. - - -4. An SSH key created in the region where you will deploy the cluster. - - -5. [Docker Desktop](https://docs.docker.com/get-docker/) installed on your local machine to start the tutorials container. - - -6. Basic knowledge of Docker containers and Kubernetes manifest file attributes. - - - - -# Set Up the Tutorial Environment -You will work in a Docker container pre-configured with the necessary tools for this tutorial. However, you can practice this tutorial in any `linux/amd64` or `x86_64` environment by installing the [necessary tools](https://github.com/spectrocloud/tutorials/blob/main/docs/docker.md#docker) and cloning the [GitHub repository](https://github.com/spectrocloud/tutorials/) that contains the tutorial files. Here are the steps to start the tutorials container. -
- -Start the Docker Desktop on your local machine and ensure the daemon is available by issuing a command to list the currently active containers. - -
- -```bash -docker ps -``` - -Download the `ghcr.io/spectrocloud/tutorials:1.0.6` image to your local machine. The Docker image includes the necessary tools. -
- -```bash -docker pull ghcr.io/spectrocloud/tutorials:1.0.6 -``` - -Next, start the container, and open a bash session into it. -
- -```bash -docker run --name tutorialContainer --publish 7000:5000 --interactive --tty ghcr.io/spectrocloud/tutorials:1.0.4 bash -``` - -If port 7000 on your local machine is unavailable, you can use any other port of your choice. -
- - - -Wait to exit the container until the tutorial is complete. Otherwise, you may lose your progress. - - - - - -## Tools and Starter Code -After opening a bash session in the active container, verify that the tools necessary for this tutorial are installed. -
- -Check the Spectro CLI version. -
- -```bash -spectro version -``` - -Check the Spectro registry server version. -
- -```bash -registry --version -``` - -Check the Terraform version. -
- -```bash -terraform --version -``` - -In addition to these tools, the tutorials container has other tools, such as `ngrok`, `git`, and `nano`. - -Examine the directories that pertain to the current tutorial in the **root** directory. -
- -```bash -. -├── packs -│   └── hello-universe-pack # Contains the pack files -└── terraform - └── pack-tf # Contains the .tf files for creating Spectro Cloud resources -``` -The **packs** directory contains the pack files. The **terraform** directory contains the Terraform files used to create Spectro Cloud resources, which you will use later in this tutorial. - - -# Build a Pack - -Building a custom pack requires defining specific files. -As outlined in the [Adding Add-on Packs](/registries-and-packs/adding-add-on-packs) guide, you can define a custom pack in two ways: using manifest files or Helm charts. The file structure varies for manifest-based packs and Helm chart-based packs. Below is the reference file structure for each: -
- - - - - -
- -```bash -. -├── pack.json # Mandatory -├── values.yaml # Mandatory -├── manifests # Mandatory - ├── manifest-1.yaml - ├── manifest-2.yaml -│ └── manifest-3.yaml -├── logo.png # Mandatory -└── README.md # Optional -``` - -
- - - -
- -```bash -. -├── pack.json # Mandatory -├── values.yaml # Mandatory. Pack-level values.yaml file. -├── charts # Mandatory -│   ├── chart-1 # Can have nested charts -│   │   ├── Chart.yaml -│   │   ├── templates -│   │   │   ├── template-1.yaml -│   │   │   └── template-2.yaml -│   │   └── values.yaml # Chart-level values.yaml file. -│   ├── chart-1.tgz -│   ├── chart-2 -│   │   ├── Chart.yaml -│   │   ├── templates -│   │   │   ├── template-1.yaml -│   │   │   └── template-2.yaml -│   │   └── values.yaml # Chart-level values.yaml file. -│   └── chart-2.tgz -├── logo.png # Mandatory -└── README.md # Optional -``` - -
- -
- -
- -To simplify this tutorial, we provide you with the manifest file for the *Hello Universe* application in the **packs/hello-universe-pack** folder. Change the directory to the **packs/hello-universe-pack** folder. -
- -```bash -cd /packs/hello-universe-pack -``` -Ensure you have the following files in the current directory. -
- -```bash -. -├── pack.json # Mandatory -├── values.yaml # Mandatory -├── manifests # Mandatory -│ └── hello-universe.yaml -├── logo.png # Mandatory -└── README.md # Optional -``` -
- -## Pack File Structure - -Go ahead and review each of the following five files in the pack. -
- -* **pack.json** - This file contains the pack metadata such as `addonType`, `cloudTypes`, and the `kubeManifests` array that contains the list of manifest files: `layer`, `name`, and `version`. Refer to the [JSON Schema](/registries-and-packs/add-custom-packs#jsonschema) for a list of attributes and respective data types. The schema validation will happen when you push a pack to the registry. - -
- - ```json - { - "addonType":"app services", - "cloudTypes": [ "all" ], - "displayName": "Hello Universe", - "kubeManifests": [ - "manifests/hello-universe.yaml" - ], - "layer": "addon", - "name": "hellouniverse", - "version": "1.0.0" - } - ``` - -
- - - -* **values.yaml** - This file contains configurable parameters you can define while adding the current pack to a cluster profile. In the **values.yaml** file for this tutorial, the `pack/namespace` attribute specifies the namespace on the target cluster to deploy the pack. If the **values.yaml** specifies a namespace value, then Palette first checks to see if the namespace has been created. If so, Palette uses the existing namespace. If the namespace has not been created, Palette creates a new one using the value specified in the YAML file. - - If the **values.yaml** does not specify a namespace value, Palette deploys the application to the default namespace. - - The `manifests` section exposes the configurable parameters for each manifest file listed in the **manifests** directory. For example, in the sample code snippet below, the `hello-universe` attribute exposes the `registry`, `repository`, and `tag` parameters. -
- - ```yaml - pack: - namespace: "hello-universe" - manifests: - hello-universe: - registry: ghcr.io - repository: spectrocloud/hello-universe - tag: 1.0.12 - ``` - -
- - You can optionally define *presets*, which are predefined values to use in the **values.yaml**. You define presets in a separate **presets.yaml** file. The presets become available when you create the cluster profile. Presets facilitate configuring the profile and avoid errors that can happen by manually editing the **values.yaml** file. Refer [Pack Presets](/registries-and-packs/pack-constraints#packpresets) for details and examples of how to define presets. - - The example below shows the parameters you can configure in the **values.yaml** for the `hello-universe` manifest when you create the cluster profile. - -
- - ![Screenshot of the configurable parameters in the values.yaml file.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-values-yaml.png ) - -
- -* **manifests** - This directory contains the manifest files for your Kubernetes application. This tutorial has only one file, **hello-universe.yaml**. Note that the **values.yaml** file has a corresponding `manifests/hello-universe` element with the same name as the YAML file. -
- -* **logo.png** - This file contains a logo that displays when you create a cluster profile. -
- - -* **README.md** - This file may contain the pack description, purpose, authors, and other relevant information. The README in the current example introduces the application used in the pack. -
- - -After finalizing all files in the pack directory, the next step is to set up a registry server and publish the pack to that registry, where you can access it directly from Palette. - -
- -# Set Up the Registry Server - -The tutorials environment already has the Spectro registry service and other necessary tools available. The following sections will guide you to start the registry server, expose the service to the external world using [Ngrok](https://ngrok.com/) reverse proxy, and log in to the registry server to push your custom add-on pack to it. - -## Start and Expose the Registry Server -Start the registry server by issuing the following command from the bash session you opened into the tutorials container. -
- -```bash -registry serve /etc/spectro/config.yml > /var/log/registry.log 2>&1 & -``` - -The registry server will start in HTTP mode (not HTTPS). Refer to the [Add a Custom Registry](/registries-and-packs/adding-a-custom-registry) guide to learn more about deploying an HTTPS registry server. - - -Next, expose the registry server to the public so that you can configure it later in Palette. Use Ngrok reverse proxy to expose the registry server listening on port 5000 via an HTTP tunnel using the following command. -
- -```bash -ngrok http 5000 --log-level debug -``` - -The command above will reserve the current bash session and display the status of each HTTP request made to the Ngrok server later in this tutorial. The screenshot below shows the registry server successfully exposed via Ngrok. - -
- -![Screenshot of registry server exposed via ngrok](/tutorials/deploy-pack/registries-and-packs_deploy-pack_ngrok-start.png ) - -
- -Verify the registry server is accessible from outside the tutorials container by visiting the `/health` endpoint. Access the *https://Your-URL-Here/health* in your host browser. Replace the base URL with the Ngrok URL output you received. You should receive a `{"status":"UP"}` response. - -
- -## Log in to the Registry Server -Once the registry server's `/health` endpoint shows `UP` status, the next step is to log in and then push the pack to it. The pack you will push is in the tutorials container. Open another bash session into the tutorials container from your local terminal. -
- -```bash -docker exec -it tutorialContainer bash -``` - -Log in to the registry server using Ngrok's public URL assigned to you. Issue the command below, but replace the URL with your Ngrok URL. The command below uses these credentials to log in to the registry server: `{username: admin, password: admin}`. -
- -```bash -spectro registry login --insecure --default --username admin --password admin \ -f59e-49-36-220-143.ngrok-free.app -``` - - - -Do not use https:// or http:// keyword in the Ngrok URL. Using either of these keywords will result in an authorization issue. - - - - -You will receive a `Login Succeeded` response upon successful login. -
- -```bash -# Output condensed for readability -WARNING! Your password will be stored unencrypted in /root/.spectro/config.json. -Login Succeeded -``` -
- - -## Push the Pack to the Registry Server -When you are logged in, push the pack to the registry server using the following command. -
- -```bash -spectro pack push /packs/hello-universe-pack/ -``` - -You can verify that the pack is in the registry by using the `ls` command. This command lists all packs in the registry. -
- -```bash -spectro pack ls -``` - -Verify the pack you pushed is listed, as shown in the screenshot below. - -
- -![Screenshot of spectro pack ls](/tutorials/deploy-pack/registries-and-packs_deploy-pack_pack-push.png) - -
- -If you need help with the Spectro CLI commands, such as deleting a pack, refer to the [Spectro CLI commands](/registries-and-packs/spectro-cli-reference#commands) guide. -
- -## Configure the Registry Server in Palette -After you push the pack to the registry server, log in to Palette and configure the registry service so that you can access it when you create your cluster profile. - - -Log in to [Palette](https://console.spectrocloud.com), and switch to the Tenant admin view. -
- -![Screenshot of Palette tenant settings.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_tenant-admin.png) - -
- - -Navigate to **Tenant Settings** > **Registries** > **Pack Registries** section. Click on **Add New Pack Registry** and enter the pack registry name, endpoint URL, and user credentials. For a consistent experience in this tutorial, we suggest using the name **private-pack-registry**, as shown in the screenshot below. Ensure you replace the URL with your Ngrok URL. Click on **Validate** to ensure the URL and credentials are correct, then click on **Confirm** to add the registry server. - -
- -![Screenshot of registry server edit option in Palette tenant settings.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-edit.png) - -
- - -Palette syncs the registry server periodically. However, you can sync it manually the first time you add a server by clicking the **three-dot Menu** next to the registry server name and selecting **Sync**. - -
- -![Screenshot of registry server sync in Palette](/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-sync.png) - -
- - -# Create a Cluster Profile and Deploy a Cluster - -This tutorial guides you to create a cluster profile for AWS. However, you can choose any other cloud service provider, provided you configure the following two items: -
- -* **Cloud account**: A cloud account added to your Palette project settings. - - The AWS cloud account name in this tutorial example is **spectro-cloud**. You can choose another name if desired. The screenshot below shows how to add and verify the AWS cloud account with your project. Navigate to **Project Settings** > **Cloud Accounts** > **AWS** > **Add AWS Account** in Palette. Check out the [Register and Manage AWS Accounts](/clusters/public-cloud/aws/add-aws-accounts) guide for additional help. - -
- - ![Screenshot of Cloud Accounts in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_palette-cloud-account.png) - -
- - -* **SSH key**: An SSH key created in the region where you will deploy the cluster. - - This tutorial example will deploy the cluster in the **us-east-2** region, and the SSH key name used in this example is **aws_key_sk_us_east_2**. You must choose the desired region and the available SSH key name from your AWS account. - -
- -Create a cluster profile and deploy it to a cluster using either Palette or Terraform code. -
- - - - - - -## Create a Cluster Profile -Switch to the **Default** project scope for creating a cluster profile. -
- -![Screenshot of the Palette Default scope.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_default-scope.png) - -
- -Select the **Profile** section in the left **Main Menu** to create a cluster profile that will combine the core infrastructure and add-on layers. Click on the **Add Cluster Profile** button, and provide the details in the wizard that follows. The wizard displays the following sections. -
- -### Basic Information -Use the following values in the **Basic Information** section. - -|**Field**|**Value**| -|---|---| -|Name|pack-tutorial-profile| -|Version|`1.0.0`| -|Description|Cluster profile as part of the pack tutorial.| -|Type|Full| -|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:true`| - -Click on **Next** to continue. -
- -### Cloud Type -In the **Cloud Type** section, choose AWS as the infrastructure provider for this tutorial, and click on **Next** at the bottom to move on to the next section. -
- - - -If you choose a different cloud service provider, the core infrastructure layers options, as outlined in the **Profile Layers** section below, will differ from this tutorial. - - - -
- -### Profile Layers -In the **Profile Layers** section, add the following core infrastructure layers if you have chosen the AWS cloud service provider. To deploy your resource to Azure or Google Cloud, use the core infrastructure layers outlined in [Cloud Service Provider Configurations](https://github.com/spectrocloud/tutorials/tree/main/terraform/pack-tf/README.md#cloud-service-provider-configurations). - -|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| -|---|---|---|---| -|OS|Public Repo|Ubuntu|`LTS__20.4.x`| -|Kubernetes|Public Repo|Kubernetes|`1.24.x`| -|Network|Public Repo|Calico|`3.25.x`| -|Storage|Public Repo|Amazon EBS CSI|`1.16.x`| - -As you add each layer, click on the **Next layer** button. After you add the **Storage** layer, click on the **Confirm** button to complete the core infrastructure stack. Palette displays the newly created infrastructure profile as a layered diagram. You can select any layer to make further edits or change the version if desired. - -Now you are ready to add the add-on layers. Click the **Add New Pack** button. - -Add the Spectro Proxy pack to enable a reverse proxy to connect to the cluster's API. Adding this pack is *optional*, but it will help connect your local machine to the cluster's API for debugging. -Refer to the [Spectro Proxy](/integrations/frp/) guide for more details. - -|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| -|---|---|---|---| -|Authentication | Public Repo| Spectro Proxy | `1.3.x`| - -Click on the **Confirm & Create** button to finish adding the Spectro Proxy pack. Also, add the following certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `apiServer` parameter section to configure the Spectro Proxy pack. -
- -```yaml -certSANs: - - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" -``` -
- -![Screenshot of the certificate Subject Alternative Name.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-certsan.png) - -
- -Next, add the following **Hello Universe** pack. This is the custom add-on pack you defined and pushed to the **private-pack-registry** earlier in this tutorial. - -|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| -|---|---|---|---| -|App Services | private-pack-registry | Hello Universe | `1.0.x` | - - -Click on the **Confirm & Create** button to finish adding the Hello Universe pack. - - -If there are no errors or compatibility issues, Palette displays the newly created full cluster profile. Verify the layers you added, and click **Next**. - - -
- -![Screenshot of the Profile Layers success.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-layer.png) - -

- - -### Review -Review once more and click **Finish Configuration** to create the cluster profile. -
- -## Create a Cluster -From the **Profile** page, click on the newly created cluster profile to view its details page. Palette displays all the layers and allows you to edit any of them. - -Click the **Deploy** button to deploy a new cluster. The cluster deployment wizard will displays the following sections. -
- -### Basic Information -Use the following values in the first section, **Basic Information**. - -|**Field**|**Value**| -|---|---| -|Cluster name| pack-tutorial-cluster | -|Description| Cluster as part of the pack tutorial.| -|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:true`| -|Cloud Account|spectro-cloud| - -Note that the AWS cloud account name in this tutorial example is **spectro-cloud**. If you used a different cloud account name, choose the name configured in your Palette's project settings. - -Click **Next** to continue. - -
- -### Parameters -The **Parameters** section allows you to change the profile configurations. For example, clicking on the **Hello Universe 1.0.x** layer allows you to configure the `registry`, `repository`, and `tag` parameters defined in the **values.yaml** file. -
- -![Screenshot of the Cluster layers.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-layers.png) - -
- -Keep the default values and click **Next**. - -
- -### Cluster config -In the **Cluster config** section, ensure the **Static Placement** field is unchecked. If checked, the **Static Placement** will deploy the cluster in an existing VPC, and you will need the [Amazon Resource Names](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html) (ARNs) for the existing subnets, roles, and other resources. For this tutorial, we will use dynamic placement, where Palette creates a new VPC and all other resources needed for the cluster. - -For the **Region** field, select the region of your choice. The tutorial example will deploy the cluster in the **us-east-2** region. For the **SSH Key Pair Name** field, choose the SSH key pair name from the selected region. You must have an SSH key created already in the AWS region where you will deploy the cluster. - -Click **Next** to continue. - -
- -### Nodes config -In the **Nodes config** section, provide the details for the master and the worker pools. For this tutorial, you can use the following minimal configuration: - -|**Field** | **Value for the master-pool**| **Value for the worker-pool**| -|---| --- | ---| -|Node pool name| master-pool | worker-pool | -|Number of nodes in the pool| `1` | `1` | -|Allow worker capability| Checked | Not applicable | -|Enable Autoscaler | Not applicable | No | -|Rolling update | Not applicable | Expand First.
Launch a new node first, then shut down the old one. | - -Keep the **Cloud Configuration** the same for the master and worker pools. - -|**Field** | **Value**| -|---| --- | ---| -|Instance Type | General purpose `m4.xlarge`
A minimum allocation of four CPU cores are required for the master node. | -|Availability zones | Choose any *one* availability zone.
This tutorial example will deploy to the `us-east-2a` availability zone. | -|Disk size | 60 GiB | - -Click **Next** to continue. -
- -### Settings -The **Settings** section displays options for OS patching, scheduled scans, scheduled backups, and cluster role binding. Use the default values, and click on the **Validate** button. - -
- -### Review -Review all configurations in this section. The **Review** page displays the cluster name, tags, cloud account name, node pools, layers, and an estimated hourly cost. If everything looks good, click on the **Finish Configuration** button to finish deploying the cluster. Deployment may take up to *20 minutes* to finish. - -While deployment is in progress, Palette displays the cluster status as **Provisioning**. While you wait for the cluster to finish deploying, you can explore the various tabs on the cluster details page, such as **Overview**, **Workloads**, and **Events**. - -
- -
- - - -The [Spectro Cloud Terraform provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) allows you to create and manage Palette resources using Infrastructure as Code (IaC). This offers such advantages as automating infrastructure, facilitating collaboration, documenting infrastructure, and keeping all infrastructure in a single source of truth. - -## Starter Code -Navigate back to your tutorials container bash session to locate the starter Terraform files. If you have closed the terminal session, you can reopen another bash session in the tutorials container using the following command. -
- -```bash -docker exec -it tutorialContainer bash -``` - -Switch to the **/terraform/pack-tf** directory, which contains the Terraform code for this tutorial. -
- -```bash -cd /terraform/pack-tf -``` - -## Set Up the Spectro Cloud API Key - -To get started with Terraform code, you need a Spectro Cloud API key to authenticate and interact with the Palette API endpoint. To add a new API key, log in to Palette, click on the user **User Menu** at the top right, and select **My API Keys**, as shown in the screenshot below. - -
- -![Screenshot of generating an API key in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_generate-api-key.png ) - -
- -Below are the steps to add and export an API key: - - -1. Fill in the required fields, such as the API key name and expiration date, and confirm your changes. - - - -2. Copy the key value to your clipboard, and switch back to the tutorials container environment. - - - -3. Export the API key as an environment variable in the tutorials container bash session so the Terraform code can authenticate with Palette API. -
- - ```bash - export SPECTROCLOUD_APIKEY= - ``` -
- -## Review Terraform Files -Ensure you have the following files in the current working directory. -
- -```bash -. -├── profile.tf # Resource -├── cluster.tf # Resource -├── data.tf # Spectro Cloud data resources -├── inputs.tf # Input variables -├── terraform.tfvars # Variable definitions file -├── outputs.tf # Output variables -└── provider.tf # Spectro Cloud Terraform provider -``` - -Note that the Terraform code will deploy the resources to **AWS**. - -We recommend you explore all Terraform files. Below is a high-level overview of each file: -
- -- **profile.tf** - contains the configuration for the `spectrocloud_cluster_profile` resource. Review the core infrastructure layers that make up the `spectrocloud_cluster_profile` resource. - - - -- **cluster.tf** - contains the configuration for the `spectrocloud_cluster_aws` resource. The cluster resource depends upon the `spectrocloud_cluster_profile` resource. - - - -- **data.tf** - contains the configuration for the resources to retrieve data from Palette dynamically. The table below lists the pack details required for each pack layer in order to deploy the `spectrocloud_cluster_profile` resource. - - |**Pack Type**|**Registry**|**Pack Name**|**Tag**| **Version** | - |---|---|---|---| - |OS|Public Repo|`ubuntu-aws`|`LTS__20.4.x`| `20.04`| - |Kubernetes|Public Repo|`kubernetes`|`1.24.x`| `1.24.10` | - |Network|Public Repo|`cni-calico`|`3.25.x`|`3.25.0`| - |Storage|Public Repo|`csi-aws-ebs`|`1.16.x`|`1.16.0`| - - Note that using this Terraform code will deploy the resources to AWS. To deploy your resource to Azure or Google Cloud, use the layer details outlined in [Cloud Service Provider Configurations] (https://github.com/spectrocloud/tutorials/tree/main/terraform/pack-tf/README.md#cloud-service-provider-configurations). - - - -- **inputs.tf** - contains the variables used in the tutorial such as the names of cluster profile, cluster, cloud account, SSH key name, AWS region, pack name, and registry server. - - Some variables have a default value, but you *must* provide the values for `cluster_cloud_account_aws_name`, `aws_region_name`, `ssh_key_name`, and `private_pack_registry` variables. You will find a `#ToDo` tag next to each variable to update. Provide the values for these variables in a separate file, **terraform.tfvars**. Use default values for the remaining variables. - - - -- **terraform.tfvars** - contains the variable definitions. The list of variables is outlined in the code block below. You *must* specify the values for all variables that are marked `"REPLACE ME"`. Read the inline comments below to understand each variable. - - - For example, the value for `cluster_cloud_account_aws_name` will be the name of the cloud account added to your Palette project settings. In this tutorial example, the cloud account name is **spectro-cloud**. - - - For `aws_region_name`, you can choose any [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) for your deployment. This tutorial example uses **us-east-2** region. - - - The value for `ssh_key_name` will be the name of the SSH key available in the region where you will deploy the cluster. The SSH key name used in this example is **aws_key_sk_us_east_2**. - - - Lastly, provide your registry server name for the `private_pack_registry` variable. You can provide the **private-pack-registry** as the value if you have followed the same naming convention as this tutorial. -
- - ```bash - cluster_cloud_account_aws_name = "REPLACE ME" # Name of the cloud account added to your Palette project settings - aws_region_name = "REPLACE ME" # Use "us-east-2" or any other AWS region - ssh_key_name = "REPLACE ME" # Name of the SSH key available in the region where you will deploy the cluster - private_pack_registry = "REPLACE ME" # Your registry server name. This tutorial uses "private-pack-registry". - ``` - - - -- **outputs.tf** - contains the output variables to expose information. - - - -- **provider.tf** - contains the provider configuration and version. - -
- -## Deploy Terraform -After you update the **terraform.tfvars** file and carefully review the other files, initialize the Terraform provider. -
- -```bash -terraform init -``` - -The `init` command downloads plugins and providers from the **provider.tf** file. Next, preview the resources Terraform will create. -
- -```bash -terraform plan -``` - -The output displays the resources Terraform will create in an actual implementation. -
- -```bash -# Output condensed for readability -Plan: 2 to add, 0 to change, 0 to destroy. -``` - -Finish creating all the resources. -
- -```bash -terraform apply -auto-approve -``` - -It can take up to 20 minutes to provision the cluster. When cluster provisioning completes, the following message displays. -
- -```bash -# Output condensed for readability -Apply complete! Resources: 2 added, 0 changed, 0 destroyed. -``` - -You can observe the cluster deployment progress in Palette by navigating back to Palette. -
- - -## Check the In-Progress Deployment -Log into the [Palette](https://console.spectrocloud.com/), and navigate to the **Profile** section in the left **Main Menu**. If the Terraform deployment is successful, the newly created cluster profile is displayed as shown in the screenshot below. - -
- -![Screenshot of the successful Profile in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_verify-profile.png) - -
- -
- -
- -
- -# Validate -In Palette, navigate to the left **Main Menu** and select **Clusters**. Next, select your cluster to display the cluster Overview page and monitor cluster provisioning progress. - -
- -![Screenshot of the cluster health.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-health.png) - -
- -When cluster status displays **Running** and **Healthy**, you can access the application from the exposed service URL with the port number displayed. For the Hello Universe application, port 8080 is exposed. Click on the URL to access the application. -
- - - -We recommend waiting to click on the service URL, as it takes one to three minutes for DNS to properly resolve the public load balancer URL. This prevents the browser from caching an unresolved DNS request. - - - -
- -![Screenshot of the successful accessing the application using the load balancer URL.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_success.png) - -
- -You can also look at real-time metrics, such as CPU and memory consumption, in the cluster's **Overview** tab in Palette. - -
- -![Screenshot of the cluster metrics.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-metrics.png) - -
- -Using your custom pack in the cluster, you have successfully deployed the Hello Universe application to the cluster. - -
- -# Cleanup -Delete the cluster, cluster profile, and registry server, and remove the registry service configuration from Palette's settings. - -The following steps will guide you in cleaning up your environment. Follow the steps for Palette if you used Palette to deploy the cluster. Use Terraform commands to delete the cluster if you used Terraform for deployment. - -
- - - - - -
- -## Delete the Cluster and Profile using Palette -Navigate to the **Cluster** section in Palette's left **Main Menu**, and view the details page of the **pack-tutorial-cluster**. To delete the cluster, click on the **Settings** button to expand the **drop-down Menu**, and select the **Delete Cluster** option. Palette prompts you to enter the cluster name and confirm the delete action. Type the cluster name to proceed with the delete step. - -
- -![Screenshot of deleting the cluster in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-cluster.png) - -
- -The cluster status displays **Deleting**. Deletion takes up to 10 minutes. -
- - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for force deletion. Navigate to the cluster's details page and click on **Settings**. Select **Force Delete Cluster**. Palette automatically removes clusters that are stuck in the cluster deletion phase for over 24 hours. - - -
- -After you delete the cluster, go ahead and delete the profile. From the left **Main Menu**, click **Profiles** and select the profile to delete. Choose the **Delete** option in the **three-dot Menu**. - -
- -![Screenshot of deleting the profile in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-profile.png) - -
- -Wait for the resources to clean up and ensure they are successfully deleted. - -
- -
- - - -
- -## Delete the Cluster and Profile using Terraform -If you've used Terraform to deploy the cluster, switch back to the tutorials container, and issue the following command from within the **/terraform/pack-tf** directory: -
- -```bash -terraform destroy -auto-approve -``` - -Wait for the resources to clean up. Deleting the Terraform resources may take up to 10 minutes. -
- -```bash -# Output condensed for readability -Destroy complete! Resources: 2 destroyed. -``` - -
- -
- -
- -
- -## Delete the Registry Server -After deleting the cluster and cluster profile, navigate to **Tenant Settings** > **Registries** > **Pack Registries** to delete the registry service configuration from Palette. -
- -![Screenshot of registry server delete in Palette](/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-delete.png) - -
- -Stop the registry server by closing the tutorials container bash session that serves the Ngrok reverse proxy server. At this point, you can close all the bash sessions. To remove the container and the image from the local machine, issue the following commands: -
- -```bash -docker container rm --force tutorialContainer -docker image rm --force ghcr.io/spectrocloud/tutorials:1.0.3 -``` - -
- - -# Wrap-Up - -In this tutorial, you learned how to create a custom pack using manifest files. You packaged up an application in a custom pack that you pushed to a private registry server and added to Palette. - -Next, you created a cluster profile that included all the core infrastructure layers, such as the OS, Kubernetes distribution, and more. You also added your custom pack to the cluster profile so your application could be deployed to a Kubernetes cluster. - -Packs are the building blocks of cluster profiles, allowing you to customize your Kubernetes clusters. Palette enables you to use different packs to create multiple cluster profiles, each for specific purposes. As a result, you can ensure all Kubernetes deployments contain all the required dependencies and applications without developing complicated deployment scripts. All you need to do is maintain the cluster profiles. - -To learn more about packs in Palette, we encourage you to check out the reference resources below. -
- -- [Custom OS Pack](/registries-and-packs/add-custom-packs#addinganospack) - - -- [Add-on Packs](/registries-and-packs/adding-add-on-packs) - - -- [Pack Constraints](/registries-and-packs/pack-constraints) - -

\ No newline at end of file diff --git a/content/docs/18-knowledgebase/01-tutorials/80-deploy-k8s-cluster.md b/content/docs/18-knowledgebase/01-tutorials/80-deploy-k8s-cluster.md deleted file mode 100644 index d9770ca8e8..0000000000 --- a/content/docs/18-knowledgebase/01-tutorials/80-deploy-k8s-cluster.md +++ /dev/null @@ -1,1230 +0,0 @@ ---- -title: "Deploy a Cluster" -metaTitle: "Deploy a Cluster" -metaDescription: "Learn how to deploy an application to a Kubernetes cluster with Palette. Experience a streamlined approach to creating and managing multiple Kubernetes clusters, on different public cloud providers, through Palette's optimized process." -icon: "" -category: ["tutorial"] -hideToC: false -fullWidth: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import YouTube from 'shared/components/Video'; - -# Deploy a Cluster - -Palette helps you create and manage Kubernetes clusters in various cloud environments with minimal overhead. - -Palette offers profile-based management for Kubernetes, enabling consistency, repeatability, and operational efficiency across multiple clusters. A [cluster profile](/cluster-profiles) allows you to define the cluster infrastructure stack you prefer in a declarative and reusable manner. It allows you to define *customizable* infrastructure stacks using desired Operating System (OS), Kubernetes, Container Network Interfaces (CNI), Container Storage Interfaces (CSI), and additional add-on application layers. - -After defining a cluster profile, you can provide the cloud environment details, the control plane, and worker node configurations to deploy a host cluster. - -This tutorial will teach you how to deploy a host cluster with Palette by using the following public cloud providers - Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP). You can deploy a cluster using either Palette or Terraform. You will learn about *Cluster Mode* and *Cluster Profiles* and how these components enable you to deploy applications to Kubernetes quickly with minimal effort but with high customization. - -# Architecture - -As you navigate through the tutorial, use this diagram as a reference point when trying to understand how a cluster profile is used when deploying a host cluster. Palette uses the cluster profile as a blueprint when deploying the cluster. Clusters deployed by Palette have the same node pools you may be familiar with, control plane nodes, often called *master nodes* and the *worker nodes* where you will deploy applications. The result is a host cluster managed by Palette. - -![A view of Palette managing the Kubernetes lifecycle](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_application.png) - -
- -# Deploy the Cluster and the Application - -Select the tab representing the workflow you want to learn more about. - -
- - - - - -You can create and manage clusters directly from the Palette dashboard. Use the following steps to learn how to deploy a host cluster to multiple cloud providers. - -## Prerequisites - -To complete this tutorial, you will need the following items - -- A public cloud account from one of the following providers. - - [AWS](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account) - - [Azure](https://learn.microsoft.com/en-us/training/modules/create-an-azure-account) - - [GCP](https://cloud.google.com/docs/get-started) - -
- -- Register the cloud account in Palette. Use the following resource for additional guidance. - - [Register and Manage AWS Accounts](/clusters/public-cloud/aws/add-aws-accounts) - - [Register and Manage Azure Cloud Accounts](/clusters/public-cloud/azure/azure-cloud) - - [Register and Manage GCP Accounts](/clusters/public-cloud/gcp#creatingagcpcloudaccount) - -
- -- An SSH Key Pair. Use the [Create and Upload an SSH Key](/clusters/cluster-management/ssh-keys) guide to learn how to create an SSH key and upload it to Palette. - - - AWS users must create an AWS Key pair before starting the tutorial. If you need additional guidance, check out the [Create EC2 SSH Key Pair](https://docs.aws.amazon.com/ground-station/latest/ug/create-ec2-ssh-key-pair.html) tutorial. - -## Deploy the Environment - -The following steps will guide you through deploying the cluster infrastructure. You will start by creating a cluster profile and deploying the host cluster using your cluster profile. - -
- - - - -### Create Cluster Profile (AWS) - -[Cluster profiles](https://docs.spectrocloud.com/cluster-profiles) are templates created with the following core layers. - - - Operating System (OS). - - Kubernetes distribution. - - Network Container Interface (CNI). - - Storage Container Interface (CSI). - -A cluster profile contains these core layers and additional add-on layers, such as security, monitoring, logging, etc. - -Cluster profiles enable you to create infrastructure stacks that can be customized in terms of the number of layers, type of components, and version and offer a reproducible way to create clusters. - -Log in to Palette and navigate to the left **Main Menu**. Select **Profiles** to view the cluster profile page. -You can view the list of available cluster profiles. To create a cluster profile, click the **Add Cluster Profile** button at the top right. - -![View of the cluster view page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png) - -Follow the wizard to create a new profile. - -In the **Basic Information** section, assign the name **aws-profile**, a brief profile description, select the type as **Full**, and assign the tag **env:aws**. You can leave the version empty if you want to. Just be aware that the version defaults to **1.0.0**. Click on **Next**. - -**Cloud Type** allows you to choose the infrastructure provider with which this cluster profile is associated. Select **AWS** and click on **Next**. - -**Profile Layers**, this is the main configuration step where you specify the packs that compose the profile. There are four required infrastructure packs and several optional add-on packs you can choose from. -Every pack requires you to select the **Pack Type**, **Registry**, and **Pack Name**. - -For this tutorial, use the following packs: - -| Pack Name | Version | Layer | -|--------------------|-----------|--------------------| -| ubuntu-aws LTS | 20.4.x | Operating System | -| Kubernetes | 1.24.x | Kubernetes | -| cni-calico | 3.24.x | Network | -| csi-aws-ebs | 1.16.x | Storage | - - -As you fill out the information for a layer, click on **Next** to proceed to the following layer. - -Click on **Confirm** after you have completed filling out all the core layers. - -![A view of the cluster profile stack](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png) - -The review section gives an overview of the cluster profile configuration you selected. Click on **Finish Configuration** to create the cluster profile. - - -Cluster profiles are mutable, meaning you can modify them when you desire. You can add, remove, or edit the existing layers during modification. - -
- - -## Create a New Cluster - -Navigate to the left **Main Menu** and select **Cluster**. From the clusters page, click on the **Add New Cluster** button. - -![palette clusters overview page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) - -Palette will prompt you to either deploy a new cluster or import an existing one. Click on **Deploy New Cluster** to access the cluster deployment wizard. Select **AWS** and click the **Start AWS Configuration** button. Use the following steps to create a host cluster in AWS. - -
- - -### Basic information - -In the **Basic information** section, insert the general information about the cluster, such as the Cluster name, Description, Tags, and Cloud account. Click on **Next**. - -![palette clusters basic information](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png) - -
- - -### Cluster Profile - -On the right side, there is a list of available cluster profiles you can choose to deploy to AWS. Select the cluster profile you created earlier and click on **Next**. - -
- - -### Parameters - -The **Parameters** section displays all the core and add-on layers in the cluster profile. - -![palette clusters parameters](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_creation_parameters.png) - -Each layer has a pack manifest file with the deploy configurations. The pack manifest file is in a YAML format. Each pack contains a set of default values. You can change the manifest values if you don't want to use the default values of the cluster profile. Click on **Next** to proceed. - -
- - -### Cluster Configuration - -The **Cluster config** section allows you to select the **Region** of where to deploy the host cluster and other options such as FIPS and specifying the **SSH Key Pair** to assign to the cluster. All clusters require you to select an SSH key. After you have selected the **Region** and your **SSH Key Pair Name**, click on **Next**. - -### Nodes Configuration - -The **Nodes config** section allows you to configure the nodes that make up the control plane (master nodes) and data plane (worker nodes) of the host cluster. - - -Before you proceed to next section, take the time to review the following parameters.

-- **Number of nodes in the pool** - Used to set the right amount of nodes that make up the pool of either the master or worker nodes. Set the count to one for the master pool and two for the worker pool. - - -- **Allow worker capability** - This option allows the master node also to accept workloads. This is useful when spot instances are used as worker nodes. You can check this box if you want to. - - -- **Instance Type** - Select the compute type for the node pool. Each instance type displays the amount of CPU, RAM, and hourly cost of the instance. Select `m4.2xlarge`. - - -- **Availability zones** - Used to specify the availability zones the node pool can place nodes. Pick one availability zone. - - -- **Disk size** - Set the disk size to **60 GiB**. - - -- **Instance Option** - Choose between [on-demand instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html) and [spot instance](https://aws.amazon.com/ec2/spot/) as worker nodes. Select **On Demand**. - -![palette clusters basic information](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png) - -Select **Next** to proceed with the cluster deployment. - -
- - -### Settings - -In the **Settings** section, you can configure advanced options such as when to patch the OS, enable security scans, manage backups, add role-based access control (RBAC) bindings, and more. - -For this tutorial, you can use the default settings. Click on **Validate** to continue. - -
- - -### Review - -The **Review** section is an opportunity for you to review all the cluster configurations prior to deploying the cluster. Review all the settings and click on **Finish Configuration** to deploy the cluster. - -![aws creation of a new cluster overview page](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_profile_cluster_profile_review.png) - - -
- -Navigate to the left **Main Menu** and select **Clusters**. - -![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_aws_create_cluster.png) - -Click on your cluster to review details such as deployment status, event logs, cluster profile, monitoring data, and other information about the cluster. - -
- -![A view of the cluster details page](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_details.png) - - -
- - -### Create Cluster Profile (Azure) - -[Cluster profiles](https://docs.spectrocloud.com/cluster-profiles) are templates created with the following core layers. - - Operating System (OS). - - Kubernetes distribution and version. - - Network Container Interface (CNI). - - Storage Container Interface (CSI). - -A cluster profile contains these core and additional add-on layers, such as security, monitoring, logging, etc. - -Cluster profiles enable you to create infrastructure stacks that can be customized in terms of the number of layers, type of components, and version and offer a reproducible way to create clusters. - -Log in to Palette and navigate to the left **Main Menu**. Select **Profiles** to view the cluster profile page. -You can view the list of available cluster profiles. To create a cluster profile, click on the **Add Cluster Profile** button at the top right side. - -![View of the cluster view page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png) - -Follow the wizard to create a new profile. - -In the **Basic Information** section, assign the name **azure-profile**, a brief profile description, select the type as **Full**, and assign the tag **env:azure***. You can leave the version empty if you want to. Just be aware that the version defaults to **1.0.0**. Click on **Next**. - -**Cloud Type** allows you to choose the infrastructure provider with which this cluster profile is associated. Select **Azure** and click on **Next**. - -**Profile Layers** is the main configuration step where you specify the packs that compose the profile. You can choose from four required infrastructure packs and several optional add-on packs. -Every pack requires you to select the **Pack Type**, **Registry**, and **Pack Name**. - -For this tutorial, use the following packs: - -| Pack Name | Version | Layer | -|--------------------|--------------------------------------------------|--------------------| -| ubuntu-azure LTS | 20.4.x | Operating System | -| Kubernetes | 1.24.x | Kubernetes | -| cni-calico-azure | 3.24.x | Network | -| Azure Disk | 1.25.x | Storage | - - -As you fill out the information for a layer, click on **Next** to proceed to the following layer. - -Click on **Confirm** after you have completed filling out all the core layers. - -![azure cluster profile overview page](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_stack.png) - -The review section gives an overview of the cluster profile configuration you selected. Click on **Finish Configuration** to finish creating the cluster profile. - - -You can update cluster profiles after the creation process. You can modify cluster profiles by adding, removing, or editing layers at any moment. - -
- - -## Create a New Cluster - -Navigate to the left **Main Menu** and select **Cluster**. Click the **Add New Cluster** button from the clusters page. - -![palette clusters overview page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) - -Click on **Deploy New Cluster** to access the cluster deployment wizard. Select **Azure** and click the **Start Azure Configuration** button. Use the following steps to create a host cluster in Azure. - -
- - -### Basic information - -In the **Basic information** section, insert the general information about the cluster, such as the Cluster name, Description, Tags, and Cloud account. Click on **Next**. - -![palette clusters basic information](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png) - -
- - -### Cluster Profile - -A list of available cluster profiles you can deploy to Azure is on the right side. Select the cluster profile you created earlier and click on **Next**. - -### Profile Layers - -The **Profile Layers** section displays all the layers and add-on components in the cluster profile. - -![palette clusters basic information](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_parameters.png) - -Each layer has a pack manifest file with the deploy configurations. The pack manifest file is in a YAML format. Each pack contains a set of default values. You can change the manifest values if you don't want to use the default values of the cluster profile. Click on **Next** to proceed. - -
- - -### Cluster Configuration - - -The **Cluster config** section allows you to select the **Subscription**, **Region**, **Resource Group**, **Storage account**, and **SSH Key**** to apply to the host cluster. All clusters require you to assign an SSH key. Refer to the [SSH Keys](/clusters/cluster-management/ssh-keys) guide for uploading an SSH key. - - -
- -After selecting a **Subscription**, **Region**, **Resource Group**, **Storage account** and **SSH Key**, click on **Next**. - - -### Nodes Configuration - -The **Nodes config** section allows configuring the nodes composing the control plane (master nodes) and data plane (worker nodes) of the Kubernetes cluster. - -You can find the list and the parameters' explanation on the [Node Pool](https://docs.spectrocloud.com/clusters/cluster-management/node-pool) documentation page. - -Among the multiple configurations you can set, be sure to consider the following: -- **Number of nodes in the pool** - Used to set the right amount of nodes that make up the pool of either the master or worker nodes. Set the count to one for the master pool and two for the worker pool. - -- **Allow worker capability** - This option allows the master node also to accept workloads. This is useful when spot instances are used as worker nodes. You can check this box if you want to. - - -- **Instance Type** - Select the compute type for the node pool. Each instance type displays the amount of CPU, RAM, and hourly cost of the instance. Select **Standard_A8_v2**. - - -- **Managed disk** - Used to select the storage class. Select **Standard LRS** and set the disk size to **60**. - - -- **Availability zones** - Used to specify the availability zones the node pool can place nodes. Pick one availability zone. - -![palette clusters nodes configuration](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png) - -
- - -### Settings - -In the **Settings** section, you can configure advanced options such as when to patch the OS, enable security scans, manage backups, add role-based access control (RBAC) bindings, and more. - -For this tutorial, you can use the default settings. Click on **Validate** to continue. - -
- - -### Review - -The Review section allows you to review all the cluster configurations before deploying the cluster. Review all the settings and click on **Finish Configuration** to deploy the cluster. - -![azure creation of a new cluster overview page](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_profile_review.png) - - -
- -Navigate to the left **Main Menu** and select **Clusters**. - -![Update the cluster](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster.png) - -Click on your cluster to review details such as deployment status, event logs, cluster profile, monitoring data, and other information about the cluster. - -
- -![View of the cluster details page](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster_details.png) - -
- -
- - -### Create Cluster Profile (GCP) -[Cluster profiles](https://docs.spectrocloud.com/cluster-profiles) are templates created with the following core layers. - - - Operating System (OS). - - Kubernetes distribution and version. - - Network Container Interface (CNI). - - Storage Container Interface (CSI). - -A cluster profile contains these core and additional add-on layers, such as security, monitoring, logging, etc. - -Cluster profiles enable you to create infrastructure stacks that can be customized in terms of the number of layers, type of components, and version and offer a reproducible way to create clusters. - -Log in to Palette and navigate to the left **Main Menu**. Select **Profiles** to view the cluster profile page. -You can view the list of available cluster profiles. To create a cluster profile, click the **Add Cluster Profile** button at the top right. - -![View of the cluster view page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png) - -Follow the wizard to create a new profile. - -In the **Basic Information** section, assign the name **gcp-profile**, a profile description, select the type as **Full**, and assign the tag **env:gcp**. You can leave the version empty if you want to. Just be aware that the version defaults to 1.0.0. Click on **Next**. - -Cloud Type allows you to choose the infrastructure provider with which this cluster profile is associated. Select **Google Cloud** and click on **Next**. - -Profile Layers, this is the main configuration step where you specify the packs that compose the profile. You can choose from four required infrastructure packs and several optional add-on packs. Every pack requires you to select the Pack Type, Registry, and Pack Name. - - -For this tutorial, use the following packs: - -| Pack Name | Version | Layer | -|--------------------|--------------------------|--------------------| -| ubuntu-gcp LTS | 20.4.x | Operating System | -| Kubernetes | 1.24.x | Kubernetes | -| cni-calico | 3.24.x | Network | -| csi-gcp-driver | 1.7.x | Storage | - - -As you fill out the information for a layer, click on **Next** to proceed to the following layer. - -Click on **Confirm** after you have completed filling out all the core layers. - -![gcp cluster profile view](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_gcp_cluster_profile_stack_view.png) - -The review section gives an overview of the cluster profile configuration you selected. Click on **Finish Configuration** to create the cluster profile. - -You can update cluster profiles after the creation process. You can modify cluster profiles by adding, removing, or editing layers at any moment. - -
- -Navigate to the left **Main Menu** and select **Cluster**. Click the **Add New Cluster** button from the clusters page. - -![palette clusters overview page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) - -Click on **Deploy New Cluster** to access the cluster deployment wizard. Select **Google Cloud** and click the **Start Google Cloud Configuration** button. Use the following steps to create a host cluster in Google Cloud. - -
- - -### Basic information - -In the **Basic information** section, insert the general information about the cluster, such as the **Cluster name**, **Description**, **Tags**, and **Cloud account**. Click on **Next**. - -![palette clusters basic information](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_basic_info.png) - -
- - -### Cluster Profile - -On the right side is a list of available cluster profiles you can choose to deploy to GCP. Select the cluster profile you created earlier and click on **Next**. - -![palette clusters basic information](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_gcp_profile.png) - -
- - -### Parameters - -The **Parameters** section displays all the layers and add-on components in the cluster profile. - -![palette clusters basic information](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png) - -Each layer has a pack manifest file with the deploy configurations. The pack manifest file is in a YAML format. Each pack contains a set of default values. You can change the manifest values if you don't want to use the default values of the cluster profile. Click on **Next** to proceed. - -
- - -### Cluster Configuration - -The **Cluster config** section allows you to select the **Project**, **Region**, and **SSH Key** to apply to the host cluster. All clusters require you to assign an SSH key. Refer to the [SSH Keys](/clusters/cluster-management/ssh-keys) guide for uploading an SSH key. - - -
- -After selecting a **Project**, **Region**, and **SSH Key**, click on **Next**. -### Nodes Configuration - -The **Nodes config** section allows you to configure the nodes that make up the control plane (master nodes) and data plane (worker nodes) of the host cluster. - -Before you proceed to the next section, take the time to review the following parameters. - -The list and parameters' explanation are found on the [Node Pool](/clusters/cluster-management/node-pool) documentation page. - -Among the multiple configurations you can set, be sure to consider the following: -- **Number of nodes in the pool** - Used to set the right amount of nodes that make up the pool of either the master or worker nodes. Set the count to one for the master pool and two for the worker pool. - -- **Allow worker capability** - This option allows the master node also to accept workloads. This is useful when spot instances are used as worker nodes. You can check this box if you want to. - - -- **Instance Type** - Select the compute type for the node pool. Each instance type displays the amount of CPU, RAM, and hourly cost of the instance. Select **n1-standard-4**. - -` -- **Disk size** - set the disk size to **60**. - - -- **Availability zones** - Used to specify the availability zones node pool nodes can be placed. Pick one availability zone. - -![palette clusters nodes configuration](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png) - -
- -Select **Next** to proceed with the cluster deployment. - - -### Settings - -In the **Settings** section, you can configure advanced options such as when to patch the OS, enable security scans, manage backups, add role-based access control (RBAC) bindings, and more. - -For this tutorial, you can use the default settings. Click on **Validate** to continue. - -### Review - -The **Review** section allows you to review all the cluster configurations before deploying the cluster. Review all the settings and click on **Finish Configuration** to deploy the cluster. - -![gcp creation of a new cluster overview page](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_review.png) - -
- -Navigate to the left **Main Menu** and select **Clusters**. - -
- -![Update the cluster](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) - -Click on your cluster to review details such as deployment status, event logs, cluster profile, monitoring data, and other information about the cluster. - -
- -![View of the cluster details page](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_details.png) - -
-
- -The cluster deployment may take several minutes depending on the cloud provider, node count, node sizes used, and the cluster profile. You can learn more about the deployment progress by reviewing the event log. Click on the **Events** tab to check the event log. - -![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-event_log.png) - -
- -While you wait for the cluster deployment process to complete, feel free to check out a video where we discuss the growing pains of using Kubernetes and how Palette can help your address these pain points. - -
- - - - ---- - -## Update Cluster Profile - -Once the cluster is deployed and ready you are ready for the next steps. In the following steps, you will learn how to update cluster profiles after deploying a host cluster. In this scenario, you will add a new layer to the cluster profile that contains the application. - -
- -### Add a Manifest - -Navigate to the left **Main Menu** and select **Profiles**. Select the cluster profile you created earlier and applied to the host cluster. - -Click on **Add Manifest** at the top of the page and fill out the following input fields.

- -- **Layer name** - The name of the layer. Assign the name **application**. - - -- **Manifests** - Add your manifest by giving it a name and clicking the **New Manifest** button. Assign a name to the internal manifest and click on the blue button An empty editor will appear on the right side of the screen. - -![Image of the blue button](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest_blue_btn.png) - -
- -In the manifest editor, insert the following content. - -
- -```yaml -apiVersion: v1 -kind: Service -metadata: - name: hello-universe-service -spec: - type: LoadBalancer - ports: - - protocol: TCP - port: 8080 - targetPort: 8080 - selector: - app: hello-universe ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: hello-universe-deployment -spec: - replicas: 2 - selector: - matchLabels: - app: hello-universe - template: - metadata: - labels: - app: hello-universe - spec: - containers: - - name: hello-universe - image: ghcr.io/spectrocloud/hello-universe:1.0.12 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 8080 -``` - -The code snippet you added will deploy the [*hello-universe*](https://github.com/spectrocloud/hello-universe) application. You may have noticed that the code snippet you added is a Kubernetes configuration. Manifest files are a method you can use to achieve more granular customization of your Kubernetes cluster. You can add any valid Kubernetes configuration to a manifest file. - -![manifest](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest.png) - -The manifest defined a replica set for the application to simulate a distributed environment with a web application deployed to Kubernetes. The application is assigned a load balancer. Using a load balancer, you can expose a single access point and distribute the workload to both containers. - -Click on **Confirm & Create** to save your changes. - -
- - -### Deploy - -Navigate to the left **Main Menu** and select **Clusters**. Click on the host cluster you deployed to open its details page. - - -On the top right-hand corner is a green button **Updates Available**. Click on the button to review the available updates. Compare the new changes against the previous cluster profile definition. The only difference is the addition of a manifest that will deploy the Hello Universe application. - - -![Available updates details](/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_update_details_compare.png) - -Click on **Confirm updates** to apply the updates to the host cluster. Depending on the scope of the change this may take a few moments. - -
- - -## Verify the Application - -Navigate to the cluster's details page and verify you are in the **Overview** tab. Once the application is deployed and ready for network traffic, in the **Services** row, Palette will expose the service URL. Click on the URL for port **:8080** to access the Hello Universe application. - -![Deployed application](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_app.png) - -
- - - - -It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - - -
- -Welcome to Hello Universe, a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the counter and for a fun image change. - -You have deployed your first application to a cluster managed by Palette. Your first application is a single container application with no upstream dependencies. - - -## Cleanup - -Use the following steps to remove all the resources you created for the tutorial. - -To remove the cluster, navigate to the left **Main Menu** and click on **Clusters** to access the clusters page. Select the cluster you want to delete to access its details page. - -Click on **Settings**. In the top-right-hand corner of the page, expand the **settings Menu** and select **Delete Cluster** to delete the cluster. - -![Destroy-cluster](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_delete-cluster-button.png) - -You will be asked to type in the cluster name to confirm the delete action. Type in the cluster name to proceed with the delete step. The deletion process will take several minutes to complete. - -
- - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for Force Delete. To trigger a force delete, navigate to the respective cluster’s details page and click on Settings. Click on the Force Delete Cluster to delete the cluster. Palette will automatically remove clusters stuck in the cluster deletion phase for over 24 hours. - - - - -
- -Once the cluster is deleted, navigate to the left **Main Menu** and click on **Profiles**. Find the cluster profile you created and click on the **three-dot Menu** to display the **Delete** button. Select **Delete** and confirm the selection to remove the cluster profile. - - -
- - - -## Terraform - -The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider enables you to create and manage Palette resources in a codified manner by leveraging Infrastructure as Code (IaC). There are many reasons why you would want to utilize IaC. A few reasons worth highlighting are: the ability to automate infrastructure, improve collaboration related to infrastructure changes, self-document infrastructure through codification, and track all infrastructure in a single source of truth. - -If want to become more familiar with Terraform, we recommend you check out the [Terraform](https://developer.hashicorp.com/terraform/intro) learning resources from HashiCorp. - -
- -## Prerequisites - -To complete this tutorial, you will need the following items - -- Basic knowledge of containers. -- [Docker Desktop](https://www.docker.com/products/docker-desktop/) or another container management tool. -- Terraform v1.4.0 or greater. -- Create a Cloud account from one of the following providers. - - [AWS](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account) - - [Azure](https://learn.microsoft.com/en-us/training/modules/create-an-azure-account) - - [GCP](https://cloud.google.com/docs/get-started) -- Register the [cloud account with Palette](https://console.spectrocloud.com/auth/signup). Use the following resource for additional guidance. - - [Register and Manage AWS Accounts](/clusters/public-cloud/aws/add-aws-accounts) - - [Register and Manage Azure Cloud Accounts](/clusters/public-cloud/azure/azure-cloud) - - [Register and Manage GCP Accounts](/clusters/public-cloud/gcp#creatingagcpcloudaccount) - -
- -## Setup Local Environment - -You can clone the tutorials repository locally or follow along by downloading a Docker image that contains the tutorial code and all dependencies. - - - - -Open a terminal window to begin the tutorial and download the tutorial code from GitHub. - -
- -```shell -git@github.com:spectrocloud/tutorials.git -``` - -Change directory to the tutorial folder. - -
- -```shell -cd tutorials/ -``` - -Check out the following git tag. - -
- -```shell -git checkout v1.0.6 -``` - -Change the directory to the tutorial code. - -
- -```shell -cd terraform/iaas-cluster-deployment-tf/ -``` - -
- - - - -Ensure Docker Desktop on your local machine is available. Use the following command and ensure you receive an output displaying the version number. - -
- -```bash -docker version -``` - -Download the tutorial image to your local machine. -
- -```bash -docker pull ghcr.io/spectrocloud/tutorials:1.0.6 -``` - -Next, start the container, and open a bash session into it. -
- -```shell -docker run --name tutorialContainer --interactive --tty ghcr.io/spectrocloud/tutorials:1.0.6 bash -``` - -Navigate to the tutorial code. - -
- -```shell -cd /terraform/iaas-cluster-deployment-tf -``` - - -
- - -
- ---- - -## Create an API Key - -Before you can get started with the Terraform code, you need a Spectro Cloud API key. - -To create an API key, log in to [Palette](https://console.spectrocloud.com) and click on the user **User Menu** and select **My API Keys**. - -![Image that points to the user drop-down Menu and points to the API key link](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create_api_key.png) - -Next, click on **Add New API Key**. Fill out the required input field, **API Key Name**, and the **Expiration Date**. Click on **Confirm** to create the API key. Copy the key value to your clipboard, as you will use it shortly. - -
- -In your terminal session, issue the following command to export the API key as an environment variable. - -
- -```shell -export SPECTROCLOUD_APIKEY=YourAPIKeyHere -``` - -The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider requires credentials to interact with the Palette API. -The Spectro Cloud Terraform provider will use the environment variable to authenticate with the Spectro Cloud API endpoint. - - -## Resources Review - -To help you get started with Terraform, the tutorial code is structured to support deploying a cluster to either Azure, GCP, or AWS. Before you deploy a host cluster to your target provider, take a few moments to review the following files in the folder structure. - -
- -- **providers.tf** - this file contains the Terraform providers that are used to support the deployment of the cluster. - - -- **inputs.tf** - a file containing all the Terraform variables for the deployment logic. - - -- **data.tf** - contains all the query resources that perform read actions. - - -- **cluster_profiles.tf** - this file contains the cluster profile definitions. Each cloud provider has its own cluster profile definition. - - -- **cluster.tf** - this file has all the required cluster configurations to deploy a host cluster to one of the cloud providers. - - -- **terraform.tfvars** - use this file to customize the deployment and target a specific cloud provider. This is the primary file you will make modifications to. - - -- **outputs.tf** - contains content that will be output in the terminal session upon a successful Terraform `apply` action. - -In the following section, you will be able to review the core terraform resources more closely. - -
- -### Provider - -The **provider.tf** file contains the Terraform providers and their respective versions. The tutorial uses two providers - the Spectro Cloud Terraform provider and the TLS Terraform provider. Take note of how the project name is specified in the `provider "spectrocloud" {}` block. You can change the target project by changing the value specified to the `project_name` parameter. - -
- - -```terraform -terraform { - required_providers { - spectrocloud = { - version = ">= 0.13.1" - source = "spectrocloud/spectrocloud" - } - tls = { - source = "hashicorp/tls" - version = "4.0.4" - } - } -} - -provider "spectrocloud" { - project_name = "Default" -} -``` - -The next file you should become familiar with is the **cluster-profiles.tf** file. - -### Cluster Profile - -The Spectro Cloud Terraform provider has several resources available for use. When creating a cluster profile, use the `spectrocloud_cluster_profile`. -This resource can be used to customize all layers of a cluster profile. You can specify all the different packs and versions to use and add a manifest or Helm chart. - - -In the **cluster-profiles.tf** file, the cluster profile resource is declared three times. Each instance of the resources is for a specific cloud provider. Using the AWS cluster profile as an example, notice how the cluster profile uses `pack {}` blocks to specify each layer of the cluster profile. The order you arrange the `pack {}` plays an important role, as each layer maps to the core infrastructure in a cluster profile. The first layer must be the OS, followed by Kubernetes, the container network interface, the container storage interface, etc. Ensure you define the bottom layer of the cluster profile first. - -
- -```terraform -resource "spectrocloud_cluster_profile" "aws-profile" { - name = "tf-aws-profile" - description = "A basic cluster profile for AWS" - tags = concat(var.tags, ["env:aws"]) - cloud = "aws" - type = "cluster" - - pack { - name = data.spectrocloud_pack.aws_ubuntu.name - tag = data.spectrocloud_pack.aws_ubuntu.version - uid = data.spectrocloud_pack.aws_ubuntu.id - values = data.spectrocloud_pack.aws_ubuntu.values - } - - pack { - name = data.spectrocloud_pack.aws_k8s.name - tag = data.spectrocloud_pack.aws_k8s.version - uid = data.spectrocloud_pack.aws_k8s.id - values = data.spectrocloud_pack.aws_k8s.values - } - - pack { - name = data.spectrocloud_pack.aws_cni.name - tag = data.spectrocloud_pack.aws_cni.version - uid = data.spectrocloud_pack.aws_cni.id - values = data.spectrocloud_pack.aws_cni.values - } - - pack { - name = data.spectrocloud_pack.aws_csi.name - tag = data.spectrocloud_pack.aws_csi.version - uid = data.spectrocloud_pack.aws_csi.id - values = data.spectrocloud_pack.aws_csi.values - } - - pack { - name = "hello-universe" - type = "manifest" - tag = "1.0.0" - values = "" - manifest { - name = "hello-universe" - content = file("manifests/hello-universe.yaml") - } - } -} -``` - -The last `pack {}` block contains a manifest file that contains all the Kubernetes configurations for the [Hello Universe](https://github.com/spectrocloud/hello-universe) application. Including the application in the cluster profile ensures the application is installed during the cluster deployment process. If you are wondering what all the data resources are for, head on to the next section, where data resources are reviewed. - - -### Data Resources - -You may have noticed that each `pack {}` block contains references to a data resource. - -
- - -```terraform - pack { - name = data.spectrocloud_pack.aws_csi.name - tag = data.spectrocloud_pack.aws_csi.version - uid = data.spectrocloud_pack.aws_csi.id - values = data.spectrocloud_pack.aws_csi.values - } -``` - -[Data resources](https://developer.hashicorp.com/terraform/language/data-sources) are used to perform read actions in Terraform. The Spectro Cloud Terraform provider exposes several data resources to help you make your Terraform code more dynamic. The data resource used in the cluster profile is `spectrocloud_pack`. This resource enables you to query Palette for information about a specific. You can get information about the pack using the data resource, such as unique ID, registry ID, version available, and YAML values. - -Below is the data resource used to query Palette for information about the Kubernetes pack for version `1.24.10`. - -
- -```terraform -data "spectrocloud_pack" "aws_k8s" { - name = "kubernetes" - version = "1.24.10" -} -``` - -Using the data resource, you avoid manually typing in the parameter values required by the cluster profile's `pack {}` block. - -### Cluster - -The file **clusters.tf** contains the definitions for deploying a host cluster to one of the cloud providers. To create a host cluster, you must use one of the cluster resources specific to the cloud provider you want to target. - -In this tutorial, the following Terraform cluster resources are used. - -
- -| Terraform Resource | Platform | Documentation | -|---|---|---| -| `spectrocloud_cluster_aws` | AWS | [Link](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_aws) | -| `spectrocloud_cluster_azure` | Azure | [Link](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_azure)| -| `spectrocloud_cluster_gcp` | GCP | [Link](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_gcp)| - - -Using the `spectrocloud_cluster_azure` resource used in the tutorial as an example, notice how the resource accepts a set of parameters.When deploying a cluster, you can change the same parameters through the Palette User Interface (UI). You can learn more about each parameter by reviewing the resource's documentation page hosted in the Terraform registry. - -
- -```terraform -resource "spectrocloud_cluster_azure" "cluster" { - name = "azure-cluster" - tags = concat(var.tags, ["env:azure"]) - cloud_account_id = data.spectrocloud_cloudaccount_azure.account[0].id - - cloud_config { - subscription_id = var.azure_subscription_id - resource_group = var.azure_resource_group - region = var.azure-region - ssh_key = tls_private_key.tutorial_ssh_key[0].public_key_openssh - } - - cluster_profile { - id = spectrocloud_cluster_profile.azure-profile[0].id - } - - machine_pool { - control_plane = true - control_plane_as_worker = true - name = "master-pool" - count = var.azure_master_nodes.count - instance_type = var.azure_master_nodes.instance_type - azs = var.azure_master_nodes.azs - is_system_node_pool = var.azure_master_nodes.is_system_node_pool - disk { - size_gb = var.azure_master_nodes.disk_size_gb - type = "Standard_LRS" - } - } - - machine_pool { - name = "worker-basic" - count = var.azure_worker_nodes.count - instance_type = var.azure_worker_nodes.instance_type - azs = var.azure_worker_nodes.azs - is_system_node_pool = var.azure_worker_nodes.is_system_node_pool - } - - timeouts { - create = "30m" - delete = "15m" - } -} -``` -## Deploy Cluster - -You must first make changes to the **terraform.tfvars** file. Open the **terraform.tfvars** file in any editor of your choice, and focus on the cloud provider you want to deploy a host cluster. - -In this Terraform template, to help simplify things, we have added a toggle variable that you can use to select the deployment environment. Each cloud provider has its section containing all the variables you must populate. If a variable under your chosen cloud provider has the value `REPLACE_ME`, it must be replaced. - -As an example, review the AWS section. To deploy to AWS, you would change `deploy-aws = false` to `deploy-aws = true`. Additionally, you would replace all the variables with a value `REPLACE_ME`. You can also change the nodes in either the master pool or worker pool by updating the values. - -```terraform -########################### -# AWS Deployment Settings -############################ -deploy-aws = false # Set to true to deploy to AWS - -aws-cloud-account-name = "REPLACE_ME" -aws-region = "REPLACE_ME" -aws-key-pair-name = "REPLACE_ME" - -aws_master_nodes = { - count = "1" - control_plane = true - instance_type = "m4.2xlarge" - disk_size_gb = "60" - availability_zones = ["REPLACE_ME"] # If you want to deploy to multiple AZs, add them here -} - -aws_worker_nodes = { - count = "1" - control_plane = false - instance_type = "m4.2xlarge" - disk_size_gb = "60" - availability_zones = ["REPLACE_ME"] # If you want to deploy to multiple AZs, add them here -} -``` - -After you have made all the required changes, issue the following command to initialize Terraform. - -
- -```shell -terraform init -``` - -Next, issue the plan command to preview the changes. - -
- -```shell -terraform plan -``` - - -Output: -```shell -Plan: 2 to add, 0 to change, 0 to destroy. -``` - -If you change the desired cloud provider's toggle variable to `true,` you will receive an output stating two new resources will be created. The two resources are your cluster profile and the host cluster. - -To deploy all the resources, use the apply command. - -
- -```shell -terraform apply -auto-approve -``` - - -### Verify the Profile - - -To check out the cluster profile creation in Palette, login to [Palette](https://console.spectrocloud.com), and from the left **Main Menu** click on **Profiles** to access the profile page. Scan the list and look for a cluster profile with the following name pattern `tf-[cloud provier]-profile`. Click on the cluster profile to review its details, such as layers, packs, and versions. - -![A view of the cluster profile](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_view.png) - - -
- - -### Verify the Cluster - - -You can also check the cluster creation process by navigating to the left **Main Menu** and selecting **Clusters**. - -![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_aws_create_cluster.png) - -
- -Select your cluster to review its details page, which contains the status, cluster profile, event logs, and more. - -
- -The cluster deployment may take several minutes depending on the cloud provider, node count, node sizes used, and the cluster profile. You can learn more about the deployment progress by reviewing the event log. Click on the **Events** tab to check the event log. - -![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-event_log.png) - -
- -While you wait for the cluster deployment process to complete, feel free to check out a video where we discuss the growing pains of using Kubernetes and how Palette can help your address these pain points. - -
- - - - -
- -## Validate - -Once the cluster is deployed and ready, you can access the deployed application Hello Universe. -From the cluster's **Overview** page, click on the URL for port **:8080** next to the **hello-universe-service** in the **Services** row. This URL will take you to the application landing page. - -
- - - - -It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. - - - - -![Deployed application](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_app.png) - -
- -Welcome to Hello Universe, a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the counter and for a fun image change. - -You have deployed your first application to a cluster managed by Palette through Terraform. Your first application is a single container application with no upstream dependencies. - - -## Cleanup - -Use the following steps to clean up all the resources you created for the tutorial. Use the destroy command to remove all the resources you created through Terraform. - -
- -```shell -terraform destroy --auto-approve -``` - -Output: -```shell -Destroy complete! Resources: 2 destroyed. -``` - -
- - - -If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for Force Delete. To trigger a force delete, navigate to the respective cluster’s details page and click on Settings. Click on the Force Delete Cluster to delete the cluster. Palette will automatically remove clusters stuck in the cluster deletion phase for over 24 hours. - - - - -If you are using the tutorial container and want to exit the container, type `exit` in your terminal session and press the **Enter** key. Next, issue the following command to stop the container. - -
- -```shell -docker stop tutorialContainer -``` - - -
-
- -# Wrap-up - -In this tutorial, you created a cluster profile, which is a template containing the core layers required to deploy a host cluster. You then deployed a host cluster onto your preferred cloud service provider. Once the cluster deployed, you updated the cluster profile and added the application Hello Universe to the profile definition, and applied the updates to the host cluster. - -Palette assures consistency across cluster deployments through cluster profiles. Palette also enables you to quickly deploy applications to a Kubernetes environment with little or no prior Kubernetes knowledge. In a matter of minutes, you were able to provision a new Kubernetes cluster and deploy an application. - -We encourage you to check out the [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) tutorial to learn more about Palette. Palette Dev Engine can help you deploy applications more quickly through the usage of [virtual clusters](/glossary-all#palettevirtualcluster). Feel free to check out the reference links below to learn more about Palette. - -
- - -- [Palette Modes](/introduction/palette-modes) - - -- [Cluster Profiles](/cluster-profiles) - - -- [Palette Clusters](/clusters) - - -- [Hello Universe GitHub repository](https://github.com/spectrocloud/hello-universe) - -
diff --git a/content/docs/19-kubernetes-knowlege-hub.md b/content/docs/19-kubernetes-knowlege-hub.md deleted file mode 100644 index 33590408cb..0000000000 --- a/content/docs/19-kubernetes-knowlege-hub.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Kubernetes Knowledge Hub" -metaTitle: "Kubernetes Knowledge Hub" -metaDescription: "Kubernetes Knowledge Hub Repository" -icon: "bookmark" -hideToCSidebar: true -hideToC: true -fullWidth: true ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Welcome to the Spectro Cloud Knowledge Portal - -Welcome to the Spectro Cloud Kubernetes Knowledge Hub. You will find core Kubernetes tutorials, how-tos, frequently asked questions, and community curated resources. - -If you have a topic in mind you would like to see, use the Feedback app on the lower-right-hand corner. -
- -- [How-To](/kubernetes-knowlege-hub/how-to) - - -- [Tutorials](/kubernetes-knowlege-hub/tutorials) - - -
diff --git a/content/docs/19-kubernetes-knowlege-hub/00-how-to.md b/content/docs/19-kubernetes-knowlege-hub/00-how-to.md deleted file mode 100644 index 341dee33d7..0000000000 --- a/content/docs/19-kubernetes-knowlege-hub/00-how-to.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "How to" -metaTitle: "Get started with a quick Kubernetes How-to" -metaDescription: "Kubernetes School with How to" -icon: "book" -hideToC: false -fullWidth: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# How To - -Learn about core Kubernetes concepts and how you can apply them on Spectro Cloud Palette. - -# Core Kubernetes -- [How To Retrieve Images from a Private Registry in Kubernetes](/kubernetes-knowlege-hub/how-to/how-to-retrieve-images-from-private-registry) - - -
diff --git a/content/docs/19-kubernetes-knowlege-hub/00-how-to/02-how-to-retrieve-images-from-private-registry.md b/content/docs/19-kubernetes-knowlege-hub/00-how-to/02-how-to-retrieve-images-from-private-registry.md deleted file mode 100644 index 3090d84b55..0000000000 --- a/content/docs/19-kubernetes-knowlege-hub/00-how-to/02-how-to-retrieve-images-from-private-registry.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: "Retrieve Images from a Private Registry" -metaTitle: "Retrieve Images from a Private Registry" -metaDescription: "Create a Kubernetes Secret to retrieve images from a private registry." -icon: "" -hideToC: false -fullWidth: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# How To Retrieve Images from a Private Registry in Kubernetes - -Kubernetes is an open-source container orchestration platform that enables efficient management, deployment, and scaling of containerized applications. - -By default, Docker and Kubernetes allow a limited number of unauthenticated pulls from a Docker registry, such as Docker Hub. When you exceed this limit, you will not be able to pull any more images until the limit resets. - -The limit is based on the IP address of the machine that is making the pulls, so it applies to all containers running on that machine. - -To avoid this issue, we recommend that you authenticate with the Docker registry before pulling images, especially if you are pulling from a private registry. This ensures you have access to the images you need and can pull them without restrictions or limitations. - -To log into a Docker registry from Kubernetes, you must create a secret that contains your registry credentials. You can use this secret in a Kubernetes deployment configuration to pull images from the registry. - -In this how-to guide, you will log into a private docker registry to pull existing images of an application that you will deploy in Kubernetes. - -# Prerequisites - -- The kubectl [command-line tool](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/). Kubectl allows you to connect to, configure and work with your clusters through the command line. -- Access to a private registry. [DockerHub](https://hub.docker.com/) offers a single private registry on the free tier. If you do not have a personal registry account, you can use DockerHub. -- Access to a running Kubernetes cluster. To learn how to create clusters in different environments using Palette, review guides listed under [Clusters](/clusters) or visit the [Palette Onboarding Workflow](/getting-started/onboarding-workflow#paletteonboardingworkflow) guide. To learn how to create a Kubernetes cluster from scratch, check out the [Create a Cluster](https://kubernetes.io/docs/tutorials/kubernetes-basics/create-cluster/) Kubernetes resource. - -The following example explains how you can create a secret and use it in a Kubernetes deployment. - -## Create a Credentials JSON File - -First, create a file called **registry-creds.json** that contains your registry credentials in the following format. - -
- -```json -{ - "auths": { - "example.registry.com": { - "username": "username", - "password": "password" - } - } -} -``` - -Keeping passwords in plain text is unsafe. Kubernetes automatically encodes passwords used to create a secret in base64. Encoding passwords does not mean your passwords cannot be decoded. - -## Create a Kubernetes Secret - -Use the `kubectl` command-line tool to generate a secret from the **registry-creds.json** file. - -
- -```bash -kubectl create secret generic myregistrykey --from-file=registry-creds.json -``` - -You can use the command below to view the secret created in detail. - -
- -```bash -kubectl get secret/myregistrykey --output json -``` - -The command output displays the content of the **registry-creds.json** file as base 64 encoded. - -
- -```json -{ - "apiVersion": "v1", - "data": { - "registry-creds.json": "ewogICJhdXRocyI6IHsKICAgICJleGFtcGxlLnJlZ2lzdHJ5LmNvbSI6IHsKICAgICAgInVzZXJuYW1lIjogInRlc3RfdXNlcm5hbWUiLAogICAgICAicGFzc3dvcmQiOiAidGVzdF9wYXNzd29yZCIKICAgIH0KICB9Cn0K" - }, - "kind": "Secret", - "metadata": { - "creationTimestamp": "2023-03-22T08:44:26Z", - "name": "myregistrykey", - "namespace": "default", - "resourceVersion": "1552285", - "uid": "ccfb047b-67c8-446b-a69a-6eb762c3100f" - }, - "type": "Opaque" -} -``` - -Invoke the following command to decode the secret you created to verify that secrets are not secure. - -
- -```bash -kubectl get secret myregistrykey --output jsonpath='{.data.registry-creds\.json}' | base64 --decode -``` - -The output of issuing the command above is the content of the JSON file you used to create the secret. - -
- -```json -{ - "auths": { - "example.registry.com": { - "username": "username", - "password": "password" - } - } -} -``` - -## Add Secret to Deployment Config - -In your Kubernetes deployment configuration, specify the name of the secret you just created for the imagePullSecrets parameter. - -
- -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-deployment -spec: - replicas: 3 - selector: - matchLabels: - app: my-app - template: - metadata: - labels: - app: my-app - spec: - containers: - - name: my-container - image: registry.example.com/my-image - imagePullSecrets: - - name: myregistrykey -``` - -## Apply the Deployment Configuration - -
- -```bash -kubectl apply --file deployment.yaml -``` - -With this configuration in place, Kubernetes will use the registry credentials in the `myregistrykey` secret to log into the registry and pull the specified image when deploying the application. - -## Other Docker Registry Authentication Methods - -An alternative way to log into a Docker registry from Kubernetes is by using the command line. - -Authenticate to the private registry. Here’s an example of how to do this. - -
- -```bash -$ kubectl create secret docker-registry \ - --docker-server= \ - --docker-username= \ - --docker-password= \ - --docker-email= -``` - -In the snippet above, **``** refers to a unique name for the secret, **``** is the URL of the private registry. Replace the **``** with the username for authentication and **``** with the password for authentication. Also, replace **``** - with the email associated with the authentication credentials. - -Add the secret created in the previous step to the default service account with the following code. - -
- -```bash -kubectl patch serviceaccount default \ - --port '{"imagePullSecrets": [{"name": ""}]}' -``` - -Replace **``** with the secret created in the previous step. - -Once you are authenticated and have added the secret to your default service account, you can use the `kubectl` command to pull images from the registry and deploy them to your Kubernetes cluster as follows. - -
- -```bash -kubectl run \ - --image=/: \ - --port= -``` - -The line above will create a new deployment using the image specified from the private registry. - -# Next Steps - -Accessing images from a private registry in Kubernetes can be challenging due to the need to authenticate with the registry. - -To solve this challenge, you have learned how to create a Kubernetes secret with your Docker registry credentials and use it in a Kubernetes deployment configuration. This allows you to pull images from your private registry without restrictions or limitations. - -To learn more about Kubernetes and how to use it to deploy your application, check out [Palette's Dev Engine](/devx/apps/deploy-app) and how it can reduce the challenges often encountered with deploying apps to Kubernetes. You can also read about [how to deploy a stateless frontend application](/kubernetes-knowlege-hub/tutorials/deploy-stateless-frontend-app) on Kubernetes or join our [slack channel](https://join.slack.com/t/spectrocloudcommunity/shared_invite/zt-1mw0cgosi-hZJDF_1QU77vF~qNJoPNUQ). Learn from other Kubernetes users and get to know fellow community members. diff --git a/content/docs/19-kubernetes-knowlege-hub/01-tutorials.md b/content/docs/19-kubernetes-knowlege-hub/01-tutorials.md deleted file mode 100644 index 2d1264c83e..0000000000 --- a/content/docs/19-kubernetes-knowlege-hub/01-tutorials.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Tutorials" -metaTitle: "Get started with a quick Kubernetes How-to" -metaDescription: "Kubernetes School with How to" -icon: "book" -hideToC: false -fullWidth: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; -import PointsOfInterest from 'shared/components/common/PointOfInterest'; -import Tooltip from "shared/components/ui/Tooltip"; - -# Tutorials - -Learn about core Kubernetes concepts and how you can apply them on Spectro Cloud Palette. - -
- - -# Core Kubernetes - -- [Deploy a Stateless Frontend Application on Kubernetes](/kubernetes-knowlege-hub/tutorials/deploy-stateless-frontend-app) - diff --git a/content/docs/19-kubernetes-knowlege-hub/01-tutorials/06-deploy-stateless-frontend-app.md b/content/docs/19-kubernetes-knowlege-hub/01-tutorials/06-deploy-stateless-frontend-app.md deleted file mode 100644 index 6ac380f52a..0000000000 --- a/content/docs/19-kubernetes-knowlege-hub/01-tutorials/06-deploy-stateless-frontend-app.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: 'Deploy a Stateless Frontend Application With Kubernetes' -metaTitle: 'Deploy a Stateless Frontend Application With Kubernetes' -metaDescription: 'One of the key benefits of using Kubernetes is that it provides a consistent and reliable way to deploy applications across different environments, including on-premises data centers and cloud infrastructure. Learn how to deploy a stateless frontend application in Kubernetes.' -icon: "" -hideToC: false -fullWidth: false -hideToCSidebar: false -hiddenFromNav: false ---- - -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; -import InfoBox from 'shared/components/InfoBox'; - -# Deploy a Stateless Frontend App with Kubernetes - -Kubernetes is a container orchestration platform that is widely used for deploying and managing containerized applications. - -One of the key benefits of using Kubernetes is that it provides a consistent and reliable way to deploy applications across different environments, including on-prem data centers and cloud infrastructure. - -Deploying a stateless frontend application with Kubernetes can be a straightforward process, although it requires an understanding of the key concepts and best practices of Kubernetes. - -In this tutorial, you will containerize a date suggester app built in React and deploy it with Kubernetes. This application is bootstrapped with [Create React App](https://create-react-app.dev/). - -# Requirements - -- An installation of [Node.js and NPM](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) on your machine. Node is a Javascript runtime environment and will enable React to run on your machine. - - -- A clone of the application from the [date suggestions app](https://github.com/Princesso/date-buddy.git) on GitHub. Cloning the application will enable you to follow this tutorial step by step. - - -- A Docker account and a [Docker installation](https://docs.docker.com/engine/install/ubuntu/) on your machine. - - -- An active Kubernetes cluster. Check out the [Deploy a Cluster with Palette](/clusters/public-cloud/deploy-k8s-cluster) tutorial to get started. - - -- An installation of the [kubectl command-line tool](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) on your machine and connected to your cluster. -- A LoadBalancer. You can [create a LoadBalancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) with a public cloud provider, or use the [minikube tunnel](https://minikube.sigs.k8s.io/docs/commands/tunnel/) to trick a local cluster into exposing a resource. - -## About the Application - -The date suggester app is written in React. It takes a single date input on when a user will like to go on a date and displays a date idea for the selected date. - -The app data comes from a JSON file that lives on the frontend app. - -## Clone the Application. - -Use the command shown below to clone the application from GitHub. - -
- -```bash -git clone https://github.com/spectrocloud/date-buddy -``` - -If you prefer to use a different stateless frontend app, you can do so. You may, however, get different results than in this tutorial. This tutorial only serves as a guide. - -## Create a Dockerfile on the App’s Root Directory. - -Before continuing this step, ensure Docker is installed on your machine. In the app's root directory, create a file named **Dockerfile**. - -
- -```bash -touch Dockerfile -``` - -In a text editor, add the lines below to the Dockerfile. - -
- -```bash -FROM node:12 - -WORKDIR /date-suggestions - -COPY package*.json ./ - -RUN npm install - -COPY . . - -EXPOSE 3000 - -CMD ["npm", "start"] -``` - -Also, create a **.dockerignore** file and add the following lines to it. - -```bash -/node_modules -/.pnp -.pnp.js -/coverage -``` - -## Build a Docker Image of the Application. - -This step packages the application into a portable image. To build the app’s image, run the Docker `build` command as shown. - -
- -```bash -docker build --tag date-suggestions . -``` - -## Create a Kubernetes Deployment. - -Before continuing with this step, ensure that you have access to a Kubernetes cluster, as explained in the [prerequisites](https://www.notion.so/How-To-Deploy-A-Stateless-Frontend-App-with-Kubernetes-b885ae2307e94ef191a1b713fe29c81f). - -In the application's root directory, create a Kubernetes Deployment file using the `kubectl` command below. - -
- -```bash -kubectl create deploy date-suggestions --image=date-suggestions --replicas=2 --port=3000 --dry-run=client --output yaml -``` - -The command output is a YAML representation of the deployment, similar to the lines below. - -
- -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: date-suggestions -spec: - selector: - matchLabels: - app: date-suggestions - replicas: 2 - template: - metadata: - labels: - app: date-suggestions - spec: - containers: - - name: date-suggestions - image: date-suggestions - ports: - - containerPort: 3000 -``` - - -You can use the output YAML to create a deployment file. Use the redirect operator `>` to turn the command output into a **deployment.yaml** file. - -
- -```bash -kubectl create deploy date-suggestions --image=date-suggestions --replicas=2 --port=3000 --dry-run=client --output yaml > deployment.yaml -``` - -Alternatively, you can use the `touch` command to create the **deployment.yaml** file, and then copy the YAML output from the command to create a deployment to it. - -
- -```bash -touch deployment.yaml -``` - -## Create a Kubernetes Service. - -Create and populate a Kubernetes Service file in the app's root directory. By default, your application will only be accessible within the cluster. You'll need to create a Kubernetes service resource to expose the application to resources outside the Kubernetes cluster. A service resource creates an abstraction over a set of pods that provides discovery and routing between them. - -To create a service, use the `kubectl expose` command as shown below. - -
- -```bash -kubectl expose deployment date-suggestions --type=LoadBalancer --port=80 --target-port=3000 --name=date-suggestion-service --dry-run=client --output yaml -``` - -The output of running the command will be similar to the YAML below. - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: date-suggestions-service -spec: - type: LoadBalancer - selector: - app: date-suggestions - ports: - - protocol: TCP - port: 80 - targetPort: 3000 -``` - -If everything looks good, modify the command to redirect the output YAML to the file **service.yaml**. - -```bash -kubectl expose deployment date-suggestions --type=LoadBalancer --port=80 --target-port=3000 --name=date-suggestion-service --dry-run=client --output yaml > service.yaml -``` - -You can also create a YAML file with the `touch` command and add the output of the `kubectl expose` command to it. - -
- -```bash -touch service.yaml -``` - -Copy and paste the following line of code to the service file. - -## Deploy the Application. -Use the kubectl command-line connected to the cluster you created earlier, and deploy the application by applying the file's content to Kubernetes. - -
- -```bash -kubectl apply --file deployment.yaml --file service.yaml -``` - -## Confirm that deployment was successful. - -Once the deployment and service files have been applied, you should be able to access your app by issuing the following command. - -
- -```bash -kubectl get service date-suggestions-service --output=jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - -This will display the URL of your app that you can use to can access it in a web browser. - -# Next Steps - -Deploying a stateless frontend application with Kubernetes can be a straightforward process if you understand the fundamental concepts of Kubernetes. - -In this tutorial, you containerized a stateless React-based app and deployed it with Kubernetes by creating a Dockerfile, building a Docker image, creating a Kubernetes deployment, and creating a Kubernetes service. - -To learn more about Kubernetes, you can join our [slack channel](https://join.slack.com/t/spectrocloudcommunity/shared_invite/zt-1mw0cgosi-hZJDF_1QU77vF~qNJoPNUQ). Learn from other Kubernetes users and get to know fellow community members. \ No newline at end of file diff --git a/content/docs/20-legal-licenses.md b/content/docs/20-legal-licenses.md deleted file mode 100644 index 47ca78f8b8..0000000000 --- a/content/docs/20-legal-licenses.md +++ /dev/null @@ -1,500 +0,0 @@ ---- -title: "Legal & Acknowledgments" -metaTitle: "Legal & Acknowledgments" -metaDescription: "Review the legal and open source components used in Palette." -icon: "gavel" -hideToCSidebar: true -hideToC: true -fullWidth: true ---- - -# Overview - - - -The following table lists the open-source licenses tied to the libraries and modules currently in use by Palette. If you have any questions or concerns, contact us at support@spectrocloud.com - -| Library | License| -|------- | -------| -|api | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0)| -| apimachinery | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| appengine | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| assert | [MIT](https://opensource.org/license/mit/) -| atomic | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| aws-sdk-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| azure-sdk-for-go | [MIT](https://opensource.org/license/mit/) -| backoff | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| beego | [MIT](https://opensource.org/license/mit/) -| cast | [MIT](https://opensource.org/license/mit/) -| cert-manager | [MIT](https://opensource.org/license/mit/) -| client-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| client_golang | [MIT](https://opensource.org/license/mit/) -| client_model | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cloud.google.com/go/container | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-bootstrap-provider-microk8s | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-control-plane-provider-microk8s | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-provider-aws | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-provider-azure | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-provider-coxedge | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-provider-gcp | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-provider-maas | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-provider-openstack | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-provider-vcluster | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-api-provider-vsphere | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cluster-bootstrap | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| common | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| component-base | [MIT](https://opensource.org/license/mit/) -| compress | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| concurrent | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| containerized-data-importer | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| controller-runtime | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| corefile-migration | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| crypto | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| easyjson | [MIT](https://opensource.org/license/mit/) -| emperror.dev/errors | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| errors | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| errwrap | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| exp | [MIT](https://opensource.org/license/mit/) -| flect | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| fsnotify | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| ginkgo | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) -| github.com/andybalholm/brotli | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/apparentlymart/go-cidr | [MIT](https://opensource.org/license/mit/) -| github.com/avast/retry-go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/Azure/go-autorest/autorest/to | [MIT](https://opensource.org/license/mit/) -| github.com/Azure/go-autorest/autorest/validation | [MIT](https://opensource.org/license/mit/) -| github.com/blang/semver/v4 | [MIT](https://opensource.org/license/mit/) -| github.com/coredns/caddy | [MIT](https://opensource.org/license/mit/) -| github.com/docker/distribution | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/dsnet/compress | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/emicklei/go-restful/v3 | [MIT](https://opensource.org/license/mit/) -| github.com/evanphx/json-patch/v5 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/go-errors/errors | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/golang-jwt/jwt/v4 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/google/gnostic | [MIT](https://opensource.org/license/mit/) -| github.com/go-openapi/analysis | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/go-openapi/errors | [MIT](https://opensource.org/license/mit/) -| github.com/go-openapi/loads | [MIT](https://opensource.org/license/mit/) -| github.com/go-openapi/runtime | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/go-openapi/strfmt | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/go-openapi/validate | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/gophercloud/gophercloud | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/grpc-ecosystem/grpc-gateway/v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/jasonlvhit/gocron | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/juliangruber/go-intersect | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/klauspost/pgzip | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/lib/pq | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/Masterminds/goutils | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| github.com/Masterminds/sprig/v3 | [MIT](https://opensource.org/license/mit/) -| github.com/mholt/archiver/v3 | [MIT](https://opensource.org/license/mit/) -| github.com/minio/highwayhash | [MIT](https://opensource.org/license/mit/) -| github.com/[MIT](https://opensource.org/license/mit/)chellh/copystructure | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/[MIT](https://opensource.org/license/mit/)chellh/hashstructure | [MIT](https://opensource.org/license/mit/) -| github.com/[MIT](https://opensource.org/license/mit/)chellh/reflectwalk | [MIT](https://opensource.org/license/mit/) -| github.com/nats-io/jwt/v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/nats-io/nats.go | [MIT](https://opensource.org/license/mit/) -| github.com/nats-io/nkeys | [MIT](https://opensource.org/license/mit/) -| github.com/nats-io/nuid | [MIT](https://opensource.org/license/mit/) -| github.com/nwaples/rardecode | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/opencontainers/go-digest | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/openshift/custom-resource-status | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/PaesslerAG/gval | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/pborman/uuid | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| github.com/pierrec/lz4/v4 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/shopspring/decimal | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/tidwall/pretty | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/ulikunitz/xz | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/xi2/xz | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| go | [MIT](https://opensource.org/license/mit/) -| goautoneg | [MIT](https://opensource.org/license/mit/) -| go-autorest | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| go-client | public-domain -| go-cmp | [MIT](https://opensource.org/license/mit/) -| go-difflib | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| gofuzz | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-genproto | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-humanize | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-internal | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-jmespath | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| golang-lru | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| golang_protobuf_extensions | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| gomega | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| gomodules.xyz/jsonpatch/v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go.mongodb.org/mongo-driver | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-multierror | [MIT](https://opensource.org/license/mit/) -| google-cloud-go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| go.opentelemetry.io/otel/exporters/jaeger | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go.opentelemetry.io/otel/internal/metric | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) -| go-spew | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| gotest.tools | [MIT](https://opensource.org/license/mit/) -| govalidator | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-wildcard | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-yaml | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) -| groupcache | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| grpc-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| harbor | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| httpsnoop | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| inf.v0 | [ISC](https://opensource.org/license/isc-license-txt) -| intern | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| ip-address-manager | [MIT](https://opensource.org/license/mit/) -| json | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| jsonpath | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| jsonpointer | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| jsonreference | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| k8s.io/apiextensions-apiserver | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| klog | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| kube-openapi | [MIT](https://opensource.org/license/mit/) -| kubevirt.io/controller-lifecycle-operator-sdk/api | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| logr | [MIT](https://opensource.org/license/mit/) -| logrus | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| mapstructure | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| martian | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| mergo | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| metrics | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| multierr | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| mux | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| mysql | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| net | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| oauth2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| opentelemetry-go | [MIT](https://opensource.org/license/mit/) -| opentelemetry-go-contrib | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| opentelemetry-proto-go | [MIT](https://opensource.org/license/mit/) -| opentracing-go | [MIT](https://opensource.org/license/mit/) -| perks | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| pflag | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| procfs | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| protobuf | [MIT](https://opensource.org/license/mit/) -| reflect2 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| semver | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) -| shutdown | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| sigs.k8s.io/gateway-api | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| snappy | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| spec | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| stdr | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| structured-merge-diff | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| swag | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| sys | [MIT](https://opensource.org/license/mit/) -| system-upgrade-controller | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| tail | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| term | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| testify | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| text | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| time | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| tomb.v1 | [MIT](https://opensource.org/license/mit/) -| ulid | [MIT](https://opensource.org/license/mit/) -| utils | [MIT](https://opensource.org/license/mit/) -| uuid | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| wrangler | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| xstrings | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| xxhash | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| yaml | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| yaml.v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| yaml.v3 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/vmware/govmomi | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| sigs.k8s.io/controller-runtime | [MIT](https://opensource.org/license/mit/) -| ajv | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| antd | [MIT](https://opensource.org/license/mit/) -| @ant-design/compatible | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| @ant-design/icons | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| axios | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| clipboard | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| color | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| connected-react-router | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| @fortawesome/fontawesome-svg-core | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @fortawesome/free-solid-svg-icons | [MIT](https://opensource.org/license/mit/) -| @fortawesome/react-fontawesome | [MIT](https://opensource.org/license/mit/) -| history | [MIT](https://opensource.org/license/mit/) -| i18next | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| i18next-browser-languagedetector | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| i18next-xhr-backend | [MIT](https://opensource.org/license/mit/) -| immer | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| lodash | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| lscache | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| moment | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| monaco-editor | [MIT](https://opensource.org/license/mit/) -| monaco-themes | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| monaco-yaml | [MIT](https://opensource.org/license/mit/) -| @nivo/core | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @nivo/line | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @nivo/pie | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| node-fetch | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| normalizr | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| prettier | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| prop-types | [MIT](https://opensource.org/license/mit/) -| query-string | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| react | [MIT](https://opensource.org/license/mit/) -| react-dom | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| react-i18next | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| react-js-cron | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| react-redux | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| react-router | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| react-router-dom | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| @react-spring/core | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @react-spring/three | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @react-spring/web | [MIT](https://opensource.org/license/mit/) -| react-teleporter | [MIT](https://opensource.org/license/mit/) -| @react-three/fiber | [MIT](https://opensource.org/license/mit/) -| react-transition-group | [MIT](https://opensource.org/license/mit/) -| redux | [MIT](https://opensource.org/license/mit/) -| redux-debounce-thunk | [MIT](https://opensource.org/license/mit/) -| redux-devtools-extension | [MIT](https://opensource.org/license/mit/) -| redux-thunk | [MIT](https://opensource.org/license/mit/) -| reselect | [MIT](https://opensource.org/license/mit/) -| styled-components | [MIT](https://opensource.org/license/mit/) -| three | [MIT](https://opensource.org/license/mit/) -| @typescript-eslint/eslint-plugin | [MIT](https://opensource.org/license/mit/) -| @typescript-eslint/parser | [MIT](https://opensource.org/license/mit/) -| validator | [MIT](https://opensource.org/license/mit/) -| cryptography | [MIT](https://opensource.org/license/mit/) -| github.com/flynn/go-shlex | [MIT](https://opensource.org/license/mit/) -| k8s.io/klog | [MIT](https://opensource.org/license/mit/) -| aec | [MIT](https://opensource.org/license/mit/) -| bugsnag-go | [MIT](https://opensource.org/license/mit/) -| cli | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| containerd | [MIT](https://opensource.org/license/mit/) -| docker-credential-helpers | [MIT](https://opensource.org/license/mit/) -| ghw | [MIT](https://opensource.org/license/mit/) -| github.com/docker/docker | [MIT](https://opensource.org/license/mit/) -| github.com/docker/go-metrics | [MIT](https://opensource.org/license/mit/) -| github.com/gdamore/encoding | [MIT](https://opensource.org/license/mit/) -| github.com/gomodule/redigo | [MIT](https://opensource.org/license/mit/) -| github.com/go-ole/go-ole | [MIT](https://opensource.org/license/mit/) -| github.com/jessevdk/go-flags | [MIT](https://opensource.org/license/mit/) -| github.com/kardianos/osext | [MIT](https://opensource.org/license/mit/) -| github.com/lucasb-eyer/go-colorful | [MIT](https://opensource.org/license/mit/) -| github.com/otiai10/copy | [MIT](https://opensource.org/license/mit/) -| github.com/power-devops/perfstat | [MIT](https://opensource.org/license/mit/) -| github.com/rivo/uniseg | [MIT](https://opensource.org/license/mit/) -| github.com/shirou/gopsutil | [MIT](https://opensource.org/license/mit/) -| github.com/shirou/gopsutil/v3 | [MIT](https://opensource.org/license/mit/) -| github.com/StackExchange/wmi | [MIT](https://opensource.org/license/mit/) -| github.com/tklauser/go-sysconf | [MIT](https://opensource.org/license/mit/) -| github.com/tklauser/numcpus | [MIT](https://opensource.org/license/mit/) -| github.com/yusufpapurcu/wmi | [MIT](https://opensource.org/license/mit/) -| go-ansiterm | [MIT](https://opensource.org/license/mit/) -| go-connections | [MIT](https://opensource.org/license/mit/) -| go-homedir | [MIT](https://opensource.org/license/mit/) -| gophercloud | [MIT](https://opensource.org/license/mit/) -| gopsutil | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| go-runewidth | [MIT](https://opensource.org/license/mit/) -| go-units | [ISC](https://opensource.org/license/isc-license-txt) -| go.uuid | [MIT](https://opensource.org/license/mit/) -| go-windows-terminal-sequences | [MIT](https://opensource.org/license/mit/) -| howett.net/plist | [MIT](https://opensource.org/license/mit/) -| image-spec | [MIT](https://opensource.org/license/mit/) -| json-patch | [MIT](https://opensource.org/license/mit/) -| k8s.io/metrics | [MIT](https://opensource.org/license/mit/) -| k8sutil | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| libtrust | [MIT](https://opensource.org/license/mit/) -| libvirt-go-module | [MIT](https://opensource.org/license/mit/) -| libvirt-go-xml | [ISC](https://opensource.org/license/isc-license-txt) -| locker | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| oras.land/oras-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| panicwrap | [MIT](https://opensource.org/license/mit/) -| pcidb | Python-2.0 -| purell | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| retry-go | [MIT](https://opensource.org/license/mit/) -| stack | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| sync | [MIT](https://opensource.org/license/mit/) -| tcell | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| tview | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| urlesc | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/google/go-github | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-i18n | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| validator.v2 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| websocket | [MIT](https://opensource.org/license/mit/) -| check.v1 | [MIT](https://opensource.org/license/mit/) -| emperror | [MIT](https://opensource.org/license/mit/) -| gax-go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/Azure/go-autorest/autorest/azure/cli | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/dimchansky/utfbom | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/googleapis/enterprise-certificate-proxy | [MIT](https://opensource.org/license/mit/) -| gnostic | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-jose.v2 | [MIT](https://opensource.org/license/mit/) -| google-api-go-client | [MIT](https://opensource.org/license/mit/) -| libvirt.org/go/libvirt | [MIT](https://opensource.org/license/mit/) -| logur | [MIT](https://opensource.org/license/mit/) -| maas-client-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| opencensus-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| pretty | [MIT](https://opensource.org/license/mit/) -| tencentcloud-sdk-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| xerrors | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| clockwork | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| connectproxy | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| crypt2go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| ftp | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/Azure/azure-sdk-for-go/sdk/azidentity | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/beevik/etree | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/bxcodec/faker/v3 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/coreos/go-oidc/v3 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/dgraph-io/ristretto | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/gorhill/cronexpr | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/hashicorp/go-version | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/huandu/xstrings | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/kelseyhightower/envconfig | [MIT](https://opensource.org/license/mit/) -| github.com/kylelemons/godebug | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/Masterminds/semver | [MIT](https://opensource.org/license/mit/) -| github.com/mattermost/xml-roundtrip-validator | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/montanaflynn/stats | [MIT](https://opensource.org/license/mit/) -| github.com/pkg/browser | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/robfig/cron | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/stripe/stripe-go/v71 | [MIT](https://opensource.org/license/mit/) -| github.com/youmark/pkcs8 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| glog | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| goconvey | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| gofpdi | [MIT](https://opensource.org/license/mit/) -| gopdf | [MIT](https://opensource.org/license/mit/) -| gopkg.in/alexcesaro/quotedprintable.v3 | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| gopkg.in/mail.v2 | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| gosaml2 | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| goxmldsig | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) -| mail | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| microsoft-authentication-library-for-go | [MIT](https://opensource.org/license/mit/) -| mongo-tools | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| mongo-tools-common | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| pbkdf2 | [MIT](https://opensource.org/license/mit/) -| rateli[MIT](https://opensource.org/license/mit/)er | [MIT](https://opensource.org/license/mit/) -| scram | [MIT](https://opensource.org/license/mit/) -| stringprep | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| axios-retry | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| base-64 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @datadog/browser-logs | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| fast-deep-equal | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @fullstory/browser | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| i18next-http-backend | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| leaflet | [MIT](https://opensource.org/license/mit/) -| leaflet.markercluster | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| less | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| netmask | [MIT](https://opensource.org/license/mit/) -| @nivo/bar | [MIT](https://opensource.org/license/mit/) -| react-calendar | [MIT](https://opensource.org/license/mit/) -| react-clipboard.js | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| react-dev-utils | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| react-helmet | [MIT](https://opensource.org/license/mit/) -| @stripe/react-stripe-js | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @stripe/stripe-js | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| typescript | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @types/node | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @types/react | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @types/react-dom | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @types/react-redux | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @types/react-router-dom | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @types/styled-components | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| unique-names-generator | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| url | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @visx/axis | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| @visx/event | [MIT](https://opensource.org/license/mit/) -| @visx/gradient | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| @visx/grid | [MIT](https://opensource.org/license/mit/) -| @visx/group | [MIT](https://opensource.org/license/mit/) -| @visx/hierarchy | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| @visx/mock-data | [MIT](https://opensource.org/license/mit/) -| @visx/responsive | [MIT](https://opensource.org/license/mit/) -| @visx/scale | [MIT](https://opensource.org/license/mit/) -| @visx/shape | [MIT](https://opensource.org/license/mit/) -| @visx/tooltip | [MIT](https://opensource.org/license/mit/) -| afero | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| apiextensions-apiserver | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/AzureAD/microsoft-authentication-library-for-go | [MIT](https://opensource.org/license/mit/) -| github.com/Azure/azure-pipeline-go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/Azure/azure-sdk-for-go/sdk/azcore | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/Azure/azure-sdk-for-go/sdk/internal | [MIT](https://opensource.org/license/mit/) -| github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute | [MIT](https://opensource.org/license/mit/) -| github.com/Azure/azure-storage-blob-go | [MIT](https://opensource.org/license/mit/) -| github.com/Azure/go-autorest/autorest/azure/auth | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/golang-jwt/jwt | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/go-logr/zapr | [MIT](https://opensource.org/license/mit/) -| github.com/mattn/go-ieproxy | [MIT](https://opensource.org/license/mit/) -| goformation | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| mod | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| vcluster | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| zap | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cobra | public-domain -| gojsonschema | [MIT](https://opensource.org/license/mit/) -| handlers | [MIT](https://opensource.org/license/mit/) -| logrus-logstash-hook | [MIT](https://opensource.org/license/mit/) -| apiserver | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| btree | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cli-runtime | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| console | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| cursor | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| diskv | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| distribution | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| emission | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| etcd | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| filepath-securejoin | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| fuzzysearch | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/aybabtme/rgbterm | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/chai2010/gettext-go | [MIT](https://opensource.org/license/mit/) -| github.com/cheggaaa/pb | [MIT](https://opensource.org/license/mit/) -| github.com/containerd/containerd | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/exponent-io/jsonpath | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/gobwas/glob | [MIT](https://opensource.org/license/mit/) -| github.com/go-gorp/gorp/v3 | [MIT](https://opensource.org/license/mit/) -| github.com/gosuri/uitable | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/jmoiron/sqlx | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/joho/godotenv | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/lann/builder | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/lann/ps | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) -| github.com/liggitt/tabwriter | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/MakeNowJust/heredoc | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/Masterminds/squirrel | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/mattn/go-sqlite3 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| github.com/[MIT](https://opensource.org/license/mit/)chellh/colorstring | [MIT](https://opensource.org/license/mit/) -| github.com/[MIT](https://opensource.org/license/mit/)chellh/go-wordwrap | [ISC](https://opensource.org/license/isc-license-txt) -| github.com/monochromegane/go-gitignore | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/rancher/wrangler | [MIT](https://opensource.org/license/mit/) -| github.com/rubenv/sql-migrate | [MIT](https://opensource.org/license/mit/) -| github.com/russross/blackfriday | [MIT](https://opensource.org/license/mit/) -| github.com/skip2/go-qrcode | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/vbatts/tar-split | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| github.com/xlab/treeprint | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) -| go-colorable | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-containerregistry | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| go-isatty | [MIT](https://opensource.org/license/mit/) -| gojq | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| gojsonpointer | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| gojsonreference | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-pluggable | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| go.starlark.net | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| gotenv | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-toml | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| go-vfs | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| hcl | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| helm | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| httpcache | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| image | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| image2ascii | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| imaging | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| ini.v1 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| jwalterweatherman | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| kairos | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| keyboard | [MIT](https://opensource.org/license/mit/) -| kubectl | [MIT](https://opensource.org/license/mit/) -| kubernetes | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| kustomize | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| lumberjack.v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| machineid | [MIT](https://opensource.org/license/mit/) -| mousetrap | [MIT](https://opensource.org/license/mit/) -| objx | [MIT](https://opensource.org/license/mit/) -| pixterm | [MIT](https://opensource.org/license/mit/) -| progressbar | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| properties | [MIT](https://opensource.org/license/mit/) -| pterm | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| resize | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| runtime | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| shlex | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| spdystream | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| stargz-snapshotter | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| terminal-dimensions | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| terminfo | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| timefmt-go | [MIT](https://opensource.org/license/mit/) -| toml | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) -| tools | [MIT](https://opensource.org/license/mit/) -| viper | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) -| yaml.v1 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) - -
- - \ No newline at end of file diff --git a/declarations.d.ts b/declarations.d.ts new file mode 100644 index 0000000000..53bb6f4b7a --- /dev/null +++ b/declarations.d.ts @@ -0,0 +1,23 @@ +declare module "*.png"; + +declare module "*.module.scss" { + const classes: { readonly [key: string]: string }; + export default classes; +} + +declare module "*.scss" { + const src: string; + export default src; +} + +interface Mendable { + initialize: () => void; + // Add other methods or properties as needed +} + +// Extend the global Window interface +declare global { + interface Window { + Mendable: Mendable; + } +} diff --git a/CODEOWNERS b/docs/CODEOWNERS similarity index 100% rename from CODEOWNERS rename to docs/CODEOWNERS diff --git a/content/api/1-introduction.md b/docs/api-content/api-docs/1-introduction.md similarity index 77% rename from content/api/1-introduction.md rename to docs/api-content/api-docs/1-introduction.md index 35db9149bd..8d67da3c76 100644 --- a/content/api/1-introduction.md +++ b/docs/api-content/api-docs/1-introduction.md @@ -1,35 +1,21 @@ --- title: "Introduction" -metaTitle: "Introduction" -metaDescription: "Palette API Introduction" -icon: "graph" -hideToC: false -fullWidth: false +sidebar_label: "Introduction" +description: "Palette API Introduction" +hide_table_of_contents: false hiddenFromNav: false -hideToCSidebar: false +sidebar_custom_props: + icon: "graph" --- -import {Intro, IntroButtons} from "shared/components" -import {Layout} from "shared" -import InfoBox from "shared/components/InfoBox" -import WarningBox from "shared/components/WarningBox" +Palette offers a range of capabilities you can access through the REST APIs. These REST APIs are designed in accordance with open API standards, which ensure that the platform's features can be integrated with other applications and systems. By utilizing these APIs, you can tap into the platform's capabilities through programmatic methods. Use the APIs to build custom integrations and workflows that leverage the power of the Palette. - -# Palette APIs - - - -Spectro Cloud Palette platform capabilities are exposed via REST APIs, providing a subset of features that comply with open application programming interface (API) standards. - -### APIs and External Resources -Palette interacts with external-facing utility tools such as Terraform to provision, change, and manage your environment automatically and efficiently. - -# Paths +## Paths Every API's URI has the prefix of the version and the Palette resource, such as: `v1/spectroclusters/...` -# Authentication +## Authentication Palette supports two types of user authentication methods: ### Using Authorization Token @@ -45,7 +31,7 @@ Palette uses API keys to provide secure API authentication and authorization. Th * Value: API key copied from the Palette Console. E.g. QMOI1ZVKVIoW6LM6uXqSWFPsjmt0juvl [Read More...](/user-management/user-authentication/#usingapikey) -# Requests +## Requests All requests are in the `JSON` format. In general, the request payload has three sections: *metadata, spec and status*. @@ -53,25 +39,27 @@ All requests are in the `JSON` format. In general, the request payload has three * *Spec* consists of attributes that define the resource * *Status* contains the status information of the resource. The API does not support creating or modifying the status section. - +:::info + Certain update request schemas have restricted spec resource definitions, and specific fields like uid and creation timestamp cannot be modified post-creation. - -| HTTP Method | Documentation | +::: + +| **HTTP Method** | **Description** | | --- | --- | | POST | To create a resource or a sub-resource. | | PUT | To update the resource or a sub-resource. The PUT request will overwrite the existing resource data. | | PATCH | To add, modify, remove a specific attribute or sub-resource within a resource. | | DELETE | To delete the resource. | -# Response Codes +## Response Codes The API returns standard HTTP response codes: -| HTTP Code | Description | +| **HTTP Code** | **Description** | | --- | --- | -| 200 | For a successful response. The response payload will vary depending upon the API. Refer to the respective API response schema. | -| 201 | For a successful resource creation. The response payload contains the uid of the created resource. | +| 200 | Request succeeded. The response payload will vary depending upon the API. Refer to the respective API response schema. | +| 201 | A resource was successfully created. The response payload contains the uid of the created resource. | | 204 | Response without any content for a successful operation. These operations include update, delete and the other actions on the resource. | | 400 | Bad request. The request does not adhere to the API request payload schema. | | 401 | Missing authorization token or invalid authorization token. | @@ -79,7 +67,7 @@ The API returns standard HTTP response codes: | 404 | The resource or the dependent resource is not found for the operation. | | 500 | Operational error. For 500 error code, the server responds with an explicit error code and an error message. | -# Palette API Lifecycle +## Palette API Lifecycle Palette APIs maintain backward compatibility until deprecation. The three API phases in the lifecycle are *Production*, *Sunset*, and *Deprecated*. Spectro Cloud will inform users when APIs transition through this lifecycle. ### Production The Palette APIs are designed to work as intended and expected. @@ -90,39 +78,38 @@ We indicate that an API is deprecated when it is no longer supported or recommen
- +:::info The API lifecycle also applies to external-facing tools such as Terraform. - +::: -# Versioning +## Versioning The version information is included in the API URI, such as `v1alpha1` or `v1`. Future APIs will increment the version, leaving the earlier version intact. The existing API request and response schema will be modified to add new attributes or query parameters while maintaining backward compatibility with earlier schemas. Prior notice will be given before advancing to the next version, and users will be advised to migrate to the new API. -# Scope +## Scope Palette groups resources under either a Tenant or Project scope. When making API requests targeting resources belonging to a project, the project scope should be specified. To specify the project scope, use the HTTP header key `ProjectUid` with the value `` in the API request. The `ProjectUid` needs to be specified for a request to be applied under a specific project scope. -Example: +**Example**: -```shell hideClipboard +```shell curl --location --request \ GET 'https://api.spectrocloud.com/v1/edgehosts/ad3d90ab-de6e-3e48-800f-4d663cec3060?resolvePackValues=false' \ --header 'Accept: application/json' \ - --header 'ProjectUid: yourProjectUid' + --header 'ProjectUid: ad3d90ab-de6e-3e48-800f-4d663cec3060' ``` -
- +:::info -If you do not provide the `ProjectUid` header, then tenant is the assumed scope. +If you do not provide the ProjectUid header, then the assumed scope is of the tenant. - +::: -# Pagination +## Pagination -API endpoints that return a list have a limit of 50 items per return payload. Pagination is necessary for this purpose. The API response for the list includes the listMeta resource that contains the continue token. To perform pagination, you need to check whether the continue token value is present in the API response. For subsequent requests, use the `continue` token as a query parameter to paginate the remaining resource items. +API endpoints that return a list have a limit of 50 items per return payload. Pagination is necessary for this purpose. The API response for the list includes the listMeta resource that contains the `continue` token. To perform pagination, you need to check whether the `continue` token value is present in the API response. For subsequent requests, use the `continue` token as a query parameter to paginate the remaining resource items.
@@ -149,19 +136,18 @@ curl --location 'https://api.spectrocloud.com/v1/packs?continue=eyJvZmZzZXQiOjUw ``` -# Rate Limits +## Rate Limits The API rate limits are as follows: -
* There is a limit of ten API requests per second for each source IP address. The API supports additional bursts through the usage of a burst queue. The default burst queue size is set to five. You could make 50 (10 * 5) requests in seconds before the API returns a `429 - TooManyRequests` error. Refer to the [Endpoint Prefix Rate](#endpointprefixrate) for additional information. -* The API request limits are categorized by the parent resources, such as `/v1/cloudconfig/:uid` and `/v1/roles`. You can find a list of all resource types in the [API documentation](/api/introduction). The requests are counted together if you make multiple requests to the same resource type but use different sub-resources. For example, if you make five requests to `/v1/clusterprofiles` and five requests to `/v1/clusterprofiles/macros`, the requests are counted together as ten requests to the resource `clusterprofiles`. +* API request limits are categorized by the parent resources, such as `/v1/cloudconfig/:uid` and `/v1/roles`. You can find a list of all resource types in the [API documentation](/api/category/palette-api-v1). The requests are counted together if you make multiple requests to the same resource type but use different sub-resources. For example, if you make five requests to `/v1/clusterprofiles` and five requests to `/v1/clusterprofiles/macros`, the requests are counted together as ten requests to the resource `clusterprofiles`. -* In case of too many requests, the user will receive an error with HTTP code `429` - `TooManyRequests.` In that event, we recommend retrying the API call after a few moments. +* If too many requests are issued, you may receive an error with HTTP code `429` - `TooManyRequests.` We recommend retrying the API call after a few moments. ## Endpoint Prefix Rate @@ -197,7 +183,7 @@ The API rate limits are as follows: | /v1/registries | 10 | 5 | 50 | | /v1/services | 10 | 5 | 50 | | /v1/overlords | 10 | 5 | 50 | -| v1/cluster | 10 | 5 | 50 | +| /v1/cluster | 10 | 5 | 50 | | /v1/cloudconfigs | 10 | 5 | 50 | | /v1/cloudconfigs/{cloudType}/{uid}/machinePools | 10 | 5 | 50 | | /v1/edgehosts | 10 | 5 | 50 | @@ -231,4 +217,4 @@ The API rate limits are as follows: | /v1/clusterprofiles/:uid/packs/{packName}/manifests | 50 | 5 | 250 | | /v1/clusterprofiles/validate/packs | 50 | 5 | 250 | | /v1/clusterprofiles/:uid/validate/packs | 50 | 5 | 250 | -| /v1/spectroclusters/:uid/profiles | 50 | 5 | 250 | +| /v1/spectroclusters/:uid/profiles | 50 | 5 | 250 | \ No newline at end of file diff --git a/content/api/2-samples.mdx b/docs/api-content/api-docs/2-samples.mdx similarity index 87% rename from content/api/2-samples.mdx rename to docs/api-content/api-docs/2-samples.mdx index adb71c203f..1496f475d6 100644 --- a/content/api/2-samples.mdx +++ b/docs/api-content/api-docs/2-samples.mdx @@ -1,53 +1,43 @@ --- title: "Example Usage" -metaTitle: "Example Usage" -metaDescription: "Learn how to use the Spectro Cloud API through examples." +sidebar_label: "Example Usage" +description: "Learn how to use the Spectro Cloud API through examples." icon: "" -hideToC: true -fullWidth: false +hide_table_of_contents: true + hiddenFromNav: false -hideToCSidebar: false --- -import {Intro, IntroButtons} from "shared/components" -import InfoBox from "shared/components/InfoBox" -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; - # Overview -This workflow demonstrates how to use Spectro Cloud API. You can use the API to automate the provisioning of Kubernetes clusters and applications on Spectro Cloud. +This workflow demonstrates how to use Spectro Cloud API. You can use the API to automate the provisioning of Kubernetes clusters and applications on Spectro Cloud. The API is a RESTful API that uses JSON for serialization and supports HTTP Basic Authentication and token authentication. The API is available at `https://api.spectrocloud.com`. -Use the following examples to familiarize yourself with the API. +Use the following examples to familiarize yourself with the API.
- +:::caution The following samples are for demonstration purposes only and may lack all the required payload values. They are not intended for production use. We recommend exploring the API using a tool such as [Postman](https://www.postman.com/) and our available Postman collection. Check out the [Palette Postman collection](/api/postman-collection) resource to learn more about the Postman collection. - +:::
# Prerequisites -* You must have a Spectro Cloud account. If you do not have an account, you can create one at [https://console.spectrocloud.com](https://console.spectrocloud.com). - - -* An Authentication header with a token value or an API Key with an ApiKey value. -Learn more about authentication credentials by reviewing the [authentication methods](https://docs.spectrocloud.com/user-management/user-authentication) resource. +- You must have a Spectro Cloud account. If you do not have an account, you can create one at [https://console.spectrocloud.com](https://console.spectrocloud.com). +- An Authentication header with a token value or an API Key with an ApiKey value. + Learn more about authentication credentials by reviewing the [authentication methods](https://docs.spectrocloud.com/user-management/user-authentication) resource. -* The respective language runtime and package manager installed on your machine. +- The respective language runtime and package manager installed on your machine. - - -* [jq](https://stedolan.github.io/jq/download/) - A command-line JSON processor. +- [jq](https://stedolan.github.io/jq/download/) - A command-line JSON processor. ## Configure Scope and Authentication @@ -57,8 +47,9 @@ The project ID is the Unique Identifier (UID) of the project in which you want t
```shell -export API_KEY="Your API Key" +export API_value="Your API Key" ``` +
```shell @@ -67,15 +58,11 @@ export PROJECT_ID="Your Project UID" Some of the endpoints require a cluster ID. - - - - +:::info If you do not provide the ProjectUid header, then the assumed scope is of the tenant. - - +::: # Deploy a Cluster @@ -83,15 +70,12 @@ You can use the following endpoint to deploy a cluster. The provider value repre **Endpoint**: `https://api.spectrocloud.com/v1/spectroclusters/{provider}` -Set the provider as an environment variable. +Set the provider as an environment variable. ```shell export PROVIDER="Your Provider" ``` -The payload for creating a host cluster can be lengthy due to the various configurations available for a host cluster. The payload is condensed in the following examples to improve readability and lack all the required values. -Check out the [Clusters](/api/v1/clusters) API resource to learn more about the available parameters. -
```shell @@ -105,10 +89,9 @@ export PAYLOAD='{ }' ``` - - + ```shell curl --location 'https://api.spectrocloud.com/v1/spectroclusters/$PROVIDER?ProjectUid=$PROJECT_ID"' \ @@ -118,9 +101,9 @@ curl --location 'https://api.spectrocloud.com/v1/spectroclusters/$PROVIDER?Proje --data "$PAYLOAD" ``` - + - + ```js const apiKey = process.env.API_KEY; @@ -135,18 +118,18 @@ const data = { metadata: { annotations: {}, name: "my-cluster", - labels: {} + labels: {}, }, spec: { // ... - } + }, }; // Define headers for the request const headers = new Headers({ "Content-Type": "application/json", - "Accept": "application/json", - "ApiKey": apiKey + Accept: "application/json", + ApiKey: apiKey, }); // Define async function to send POST request @@ -155,7 +138,7 @@ async function sendRequest() { const response = await fetch(url, { method: "POST", headers: headers, - body: JSON.stringify(data) + body: JSON.stringify(data), }); if (!response.ok) { throw new Error(`HTTP error ${response.status}`); @@ -168,9 +151,10 @@ async function sendRequest() { } sendRequest(); ``` - - + + + ```go package main @@ -231,11 +215,10 @@ func main() { fmt.Println(responseData) } ``` - - - + + ```python import os @@ -287,10 +270,9 @@ if __name__ == '__main__': main() ``` + - - - + ```rust use std::collections::HashMap; @@ -349,7 +331,8 @@ async fn main() -> Result<(), Box> { Ok(()) } ``` - + + @@ -359,7 +342,7 @@ You can use the following endpoint to monitor the progress of cluster creation. **Endpoint**: `https://api.spectrocloud.com/v1/spectroclusters/{uid}` -Ensure you specify the cluster ID. You can set the cluster ID as an environment variable. +Ensure you specify the cluster ID. You can set the cluster ID as an environment variable. ```shell export CLUSTER_ID="Your Cluster ID" @@ -369,7 +352,7 @@ export CLUSTER_ID="Your Cluster ID" - + ```shell curl -s --location "https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID?ProjectUid=$PROJECT_ID" \ @@ -378,10 +361,9 @@ curl -s --location "https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID? | jq -r '.status' ``` - + - - + ```js const apiKey = process.env.API_KEY; @@ -390,8 +372,8 @@ const clusterID = process.env.CLUSTER_ID; const url = `https://api.spectrocloud.com/v1/spectroclusters/${clusterID}?ProjectUid=${projectID}`; const headers = { - 'Accept': 'application/json', - 'ApiKey': apiKey, + Accept: "application/json", + ApiKey: apiKey, }; try { @@ -400,13 +382,13 @@ try { const status = data.status; console.log(status); } catch (error) { - console.error('Error:', error); + console.error("Error:", error); } ``` - + - + ```go package main @@ -466,9 +448,9 @@ func main() { } ``` - + - + ```python import os @@ -506,9 +488,10 @@ def main(): if __name__ == '__main__': main() ``` - - + + + ``` use reqwest::header::{HeaderMap, HeaderValue, ACCEPT}; @@ -542,19 +525,17 @@ async fn main() -> Result<(), Box> { } ``` - - + - # Cluster Nodes and Node Status You can use the following endpoint to retrieve the list of nodes in a cluster and their status. **Endpoint**: `https://api.spectrocloud.com/v1/spectroclusters/{uid}` -Ensure you specify the cluster ID. You can set the cluster ID as an environment variable. +Ensure you specify the cluster ID. You can set the cluster ID as an environment variable. ```shell export CLUSTER_ID="Your Cluster ID" @@ -566,7 +547,7 @@ export CLUSTER_ID="Your Cluster ID" - + ```shell curl --location "https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID?ProjectUid=$PROJECT_ID" \ @@ -574,10 +555,9 @@ curl --location "https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID?Pro --header "ApiKey: $API_KEY" ``` - - - + + ```js // Get API key, project ID, and cluster ID from environment variables @@ -590,8 +570,8 @@ const url = `https://api.spectrocloud.com/v1/spectroclusters/${clusterID}?Projec // Define headers for the request const headers = new Headers({ - "Accept": "application/json", - "ApiKey": apiKey + Accept: "application/json", + ApiKey: apiKey, }); // Define an async function to send the HTTP request @@ -600,7 +580,7 @@ async function fetchData() { // Send the HTTP request using the fetch() method const response = await fetch(url, { method: "GET", - headers: headers + headers: headers, }); const data = await response.json(); console.log("Response data:", data); @@ -613,9 +593,9 @@ async function fetchData() { fetchData(); ``` - + - + ```go package main @@ -666,9 +646,9 @@ func main() { ``` - + - + ```python import os @@ -710,9 +690,9 @@ if __name__ == '__main__': main() ``` - + - + ``` use std::env; @@ -754,21 +734,20 @@ async fn main() -> Result<(), reqwest::Error> { } ``` - - + -## Retrieve the Cluster Cloud Config Identifier +## Retrieve the Cluster Cloud Config Identifier -The cloud config identifier is a unique identifier for the cloud config that is used to provision the cluster. -The values are found in the `spec.cloudConfigRef` field of the cluster object. You can use the following code snippet to retrieve the cluster cloud config identifier `uid` and `kind`. +The cloud config identifier is a unique identifier for the cloud config that is used to provision the cluster. +The values are found in the `spec.cloudConfigRef` field of the cluster object. You can use the following code snippet to retrieve the cluster cloud config identifier `uid` and `kind`.
- + ```shell curl -s --location "https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID?ProjectUid=$PROJECT_ID" \ @@ -777,9 +756,9 @@ curl -s --location "https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID? | jq -r '.spec.cloudConfigRef | "\(.kind) \(.uid)"' ``` - + - + ```js // Get API key, project ID, and cluster ID from environment variables @@ -792,8 +771,8 @@ const url = `https://api.spectrocloud.com/v1/spectroclusters/${clusterID}?Projec // Define headers for the request const headers = new Headers({ - "Accept": "application/json", - "ApiKey": apiKey + Accept: "application/json", + ApiKey: apiKey, }); // Define an async function to send the HTTP request @@ -802,7 +781,7 @@ async function fetchData() { // Send the HTTP request using the fetch() method const response = await fetch(url, { method: "GET", - headers: headers + headers: headers, }); // Parse the response as JSON @@ -824,10 +803,10 @@ async function fetchData() { // Call the async function to send the HTTP request fetchData(); ``` - + - + ```go package main @@ -895,9 +874,9 @@ func main() { } ``` - + - + ```python import os @@ -934,9 +913,10 @@ else: # Print the error message if the request failed print("Request failed with status code:", response.status_code) ``` - - + + + ``` use std::env; @@ -1002,33 +982,28 @@ async fn main() -> Result<(), reqwest::Error> { } ``` - - + - - - # Cluster Workloads -You can retrieve information about the active workloads on a cluster, such as the number of pods, nodes, and containers. +You can retrieve information about the active workloads on a cluster, such as the number of pods, nodes, and containers. Use the namespace filter to retrieve information about workloads in specific namespaces. **Endpoint**: `https://api.spectrocloud.com/v1/dashboard/spectroclusters/{uid}/workloads/pod` -Ensure you specify the cluster ID. You can set the cluster ID as an environment variable. +Ensure you specify the cluster ID. You can set the cluster ID as an environment variable. ```shell export CLUSTER_ID="Your Cluster ID" ``` +
- - - + ```shell curl --location "https://api.spectrocloud.com/v1/dashboard/spectroclusters/$CLUSTER_ID/workloads" \ @@ -1042,9 +1017,10 @@ export CLUSTER_ID="Your Cluster ID" } }' ``` - - + + + ```js const API_KEY = process.env.API_KEY; @@ -1054,18 +1030,18 @@ const CLUSTER_ID = process.env.CLUSTER_ID; const url = `https://api.spectrocloud.com/v1/dashboard/spectroclusters/${CLUSTER_ID}/workloads`; const options = { - method: 'POST', + method: "POST", headers: { - 'Content-Type': 'application/json', - 'Accept': 'application/json', - 'ProjectUid': PROJECT_ID, - 'ApiKey': API_KEY + "Content-Type": "application/json", + Accept: "application/json", + ProjectUid: PROJECT_ID, + ApiKey: API_KEY, }, body: JSON.stringify({ filter: { - namespaces: ["default", "myOtherNamespace"] - } - }) + namespaces: ["default", "myOtherNamespace"], + }, + }), }; try { @@ -1077,9 +1053,9 @@ try { } ``` - + - + ```go package main @@ -1146,9 +1122,9 @@ func main() { } ``` - + - + ```python import os @@ -1191,9 +1167,9 @@ if __name__ == "__main__": main() ``` - + - + ``` use std::env; @@ -1262,32 +1238,29 @@ async fn main() -> Result<(), Box> { } ``` - + - # Filter Clusters You can filter host clusters by specifying the tags you want to filter on. **Endpoint**: `https://api.spectrocloud.com/v1/dashboard/spectroclusters/search` -Ensure you specify the cluster ID. You can set the cluster ID as an environment variable. +Ensure you specify the cluster ID. You can set the cluster ID as an environment variable. ```shell export CLUSTER_ID="Your Cluster ID" ``` - -In the following example, a filter for the tags `dev` and `team:bravo-2` is specified. You can learn more about filters and how to - +In the following example, a filter for the tags `dev` and `team:bravo-2` is specified. You can learn more about filters and how to
- + ```shell curl --location 'https://api.spectrocloud.com/v1/dashboard/spectroclusters/search?limit=20' \ @@ -1307,7 +1280,7 @@ In the following example, a filter for the tags `dev` and `team:bravo-2` is spec "type": "string", "condition": { "string": { - "operator": "eq", + "operator": "eq", "negation": false, "match": { "conjunction": "or", @@ -1328,56 +1301,53 @@ In the following example, a filter for the tags `dev` and `team:bravo-2` is spec }' ``` - + - + ```js async function searchSpectroclusters() { - const url = 'https://api.spectrocloud.com/v1/dashboard/spectroclusters/search?limit=20'; + const url = "https://api.spectrocloud.com/v1/dashboard/spectroclusters/search?limit=20"; const headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json', - 'ProjectUid': process.env.PROJECT_UID, - 'ApiKey': process.env.API_KEY + "Content-Type": "application/json", + Accept: "application/json", + ProjectUid: process.env.PROJECT_UID, + ApiKey: process.env.API_KEY, }; const body = JSON.stringify({ - "filter": { - "conjunction": "and", - "filterGroups": [ + filter: { + conjunction: "and", + filterGroups: [ { - "conjunction": "and", - "filters": [ + conjunction: "and", + filters: [ { - "property": "tag", - "type": "string", - "condition": { - "string": { - "operator": "eq", - "negation": false, - "match": { - "conjunction": "or", - "values": [ - "dev", - "team:bravo-2" - ] + property: "tag", + type: "string", + condition: { + string: { + operator: "eq", + negation: false, + match: { + conjunction: "or", + values: ["dev", "team:bravo-2"], }, - "ignoreCase": false - } - } - } - ] - } - ] + ignoreCase: false, + }, + }, + }, + ], + }, + ], }, - "sort": [] + sort: [], }); try { const response = await fetch(url, { - method: 'POST', + method: "POST", headers: headers, - body: body + body: body, }); const data = await response.json(); console.log(data); @@ -1388,10 +1358,10 @@ async function searchSpectroclusters() { searchSpectroclusters(); ``` - - + + ```go package main @@ -1485,9 +1455,10 @@ func main() { fmt.Println(string(body)) } ``` - - + + + ```python import requests @@ -1544,9 +1515,10 @@ print(response.json()) if __name__ == "__main__": main() ``` - - + + + ``` use reqwest::header::{HeaderMap, HeaderValue}; @@ -1650,7 +1622,7 @@ async fn main() -> Result<(), Box> { } ``` - + @@ -1658,7 +1630,6 @@ async fn main() -> Result<(), Box> { You can download the kubeconfig file of a host cluster. To download the kubeconfig file, you need to provide the cluster UID. - **Endpoint**: `https://api.spectrocloud.com/v1/spectroclusters/{uid}/assets/kubeconfig`
@@ -1671,7 +1642,7 @@ export CLUSTER_ID="Your Cluster ID" - + ```shell curl --location 'https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID/assets/kubeconfig' \ @@ -1680,9 +1651,9 @@ export CLUSTER_ID="Your Cluster ID" --header 'ApiKey: $API_KEY' ``` - + - + ```js const apiKey = process.env.API_KEY; @@ -1691,9 +1662,9 @@ const clusterID = process.env.CLUSTER_ID; const url = `https://api.spectrocloud.com/v1/spectroclusters/${clusterID}/assets/kubeconfig?frp=true`; const headers = { - 'Accept': 'application/octet-stream', - 'ProjectUid': projectID, - 'ApiKey': apiKey + Accept: "application/octet-stream", + ProjectUid: projectID, + ApiKey: apiKey, }; try { @@ -1701,13 +1672,13 @@ try { const data = await response.text(); console.log(data); } catch (error) { - console.error('Error:', error); + console.error("Error:", error); } ``` - + - + ```go package main @@ -1764,9 +1735,10 @@ func main() { } } ``` - - + + + ```python import os @@ -1803,9 +1775,10 @@ def main(): if __name__ == '__main__': main() ``` - - + + + ``` use std::env; @@ -1848,8 +1821,6 @@ async fn main() -> Result<(), reqwest::Error> { } ``` - - - + diff --git a/content/api/3-postman-collection.md b/docs/api-content/api-docs/3-postman-collection.md similarity index 79% rename from content/api/3-postman-collection.md rename to docs/api-content/api-docs/3-postman-collection.md index fa2c39dbff..f38d74c020 100644 --- a/content/api/3-postman-collection.md +++ b/docs/api-content/api-docs/3-postman-collection.md @@ -1,24 +1,12 @@ --- title: "Postman Collection" -metaTitle: "Postman Collection" -metaDescription: "Spectro Cloud API Postman Collection" +sidebar_label: "Postman Collection" +description: "Spectro Cloud API Postman Collection" icon: "" -hideToC: true -fullWidth: false +hide_table_of_contents: true + hiddenFromNav: false -hideToCSidebar: true --- -import {Intro, IntroButtons} from "shared/components" -import InfoBox from "shared/components/InfoBox" -import Tabs from 'shared/components/ui/Tabs'; -import WarningBox from 'shared/components/WarningBox'; - - - - -# Postman Collection - - Spectro Cloud provides a Postman collection for your convenience to help you interact with the Spectro Cloud API. @@ -49,7 +37,7 @@ Learn more about [authentication methods](https://docs.spectrocloud.com/user-man Use the following URL to import your configuration into Postman: -https://raw.githubusercontent.com/spectrocloud/librarium/master/content/api/palette-apis.json +https://raw.githubusercontent.com/spectrocloud/librarium/master/docs/api-content/api-docs/palette-apis.json
1. Open the **Import** dialog. diff --git a/content/api/URL-as-a-link.png b/docs/api-content/api-docs/URL-as-a-link.png similarity index 100% rename from content/api/URL-as-a-link.png rename to docs/api-content/api-docs/URL-as-a-link.png diff --git a/content/api/palette-apis.json b/docs/api-content/api-docs/palette-apis.json similarity index 100% rename from content/api/palette-apis.json rename to docs/api-content/api-docs/palette-apis.json diff --git a/content/api/v1/api.json b/docs/api-content/api-docs/v1/api.json similarity index 98% rename from content/api/v1/api.json rename to docs/api-content/api-docs/v1/api.json index 7eff85b690..e444bc7025 100644 --- a/content/api/v1/api.json +++ b/docs/api-content/api-docs/v1/api.json @@ -2434,7 +2434,7 @@ "type": "boolean" }, "controlPlaneLoadBalancer": { - "description": "ControlPlaneLoadBalancer specifies how API server elb will be configured, this field is optional, not provided, \"\", default =\u003e \"Internet-facing\" \"Internet-facing\" =\u003e \"Internet-facing\" \"internal\" =\u003e \"internal\" For spectro saas setup we require to talk to the apiserver from our cluster so ControlPlaneLoadBalancer should be \"\", not provided or \"Internet-facing\"", + "description": "ControlPlaneLoadBalancer specifies how API server elb will be configured, this field is optional, not provided, \"\", default => \"Internet-facing\" \"Internet-facing\" => \"Internet-facing\" \"internal\" => \"internal\" For spectro saas setup we require to talk to the apiserver from our cluster so ControlPlaneLoadBalancer should be \"\", not provided or \"Internet-facing\"", "type": "string" }, "region": { @@ -2826,7 +2826,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean", "x-omitempty": false } @@ -3764,7 +3764,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean", "x-omitempty": false } @@ -8905,7 +8905,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" } }, @@ -10341,7 +10341,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" } }, @@ -10753,7 +10753,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" } }, @@ -11102,7 +11102,7 @@ "type": "boolean" }, "controlPlaneLoadBalancer": { - "description": "ControlPlaneLoadBalancer specifies how API server elb will be configured, this field is optional, not provided, \"\", default =\u003e \"Internet-facing\" \"Internet-facing\" =\u003e \"Internet-facing\" \"internal\" =\u003e \"internal\" For spectro saas setup we require to talk to the apiserver from our cluster so ControlPlaneLoadBalancer should be \"\", not provided or \"Internet-facing\"", + "description": "ControlPlaneLoadBalancer specifies how API server elb will be configured, this field is optional, not provided, \"\", default => \"Internet-facing\" \"Internet-facing\" => \"Internet-facing\" \"internal\" => \"internal\" For spectro saas setup we require to talk to the apiserver from our cluster so ControlPlaneLoadBalancer should be \"\", not provided or \"Internet-facing\"", "type": "string" }, "encryptionConfig": { @@ -11324,7 +11324,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean", "x-omitempty": false } @@ -11807,7 +11807,7 @@ "additionalProperties": { "type": "string" }, - "description": "Addresses is a map of PCI device entry name to its addresses.\nExample entry would be \"11:00.0 VGA compatible controller [0300]: NVIDIA\nCorporation Device [10de:1eb1] (rev a1)\"- \u003e 0000_11_00_0\" The address is\nBDF (Bus Device Function) identifier format seperated by underscores. The\nfirst 4 bits are almost always 0000. In the above example 11 is Bus, 00\nis Device,0 is function. The values of these addreses are expected in hexadecimal\nformat\n", + "description": "Addresses is a map of PCI device entry name to its addresses.\nExample entry would be \"11:00.0 VGA compatible controller [0300]: NVIDIA\nCorporation Device [10de:1eb1] (rev a1)\"- > 0000_11_00_0\" The address is\nBDF (Bus Device Function) identifier format seperated by underscores. The\nfirst 4 bits are almost always 0000. In the above example 11 is Bus, 00\nis Device,0 is function. The values of these addreses are expected in hexadecimal\nformat\n", "type": "object" }, "deviceModel": { @@ -11832,7 +11832,7 @@ "additionalProperties": { "type": "string" }, - "description": "Addresses is a map of PCI device entry name to its addresses.\nExample entry would be \"11:00.0 VGA compatible controller [0300]: NVIDIA\nCorporation Device [10de:1eb1] (rev a1)\"- \u003e 0000_11_00_0\" The address is\nBDF (Bus Device Function) identifier format seperated by underscores. The\nfirst 4 bits are almost always 0000. In the above example 11 is Bus, 00\nis Device,0 is function. The values of these addreses are expected in hexadecimal\nformat\n", + "description": "Addresses is a map of PCI device entry name to its addresses.\nExample entry would be \"11:00.0 VGA compatible controller [0300]: NVIDIA\nCorporation Device [10de:1eb1] (rev a1)\"- > 0000_11_00_0\" The address is\nBDF (Bus Device Function) identifier format seperated by underscores. The\nfirst 4 bits are almost always 0000. In the above example 11 is Bus, 00\nis Device,0 is function. The values of these addreses are expected in hexadecimal\nformat\n", "type": "object" }, "model": { @@ -12292,7 +12292,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" } }, @@ -12701,7 +12701,7 @@ "type": "integer" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" } }, @@ -14596,7 +14596,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" }, "xslTemplate": { @@ -15417,7 +15417,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" } }, @@ -15797,7 +15797,7 @@ "description": "Rolling update strategy for this machine pool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "If IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "If IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean", "x-omitempty": false } @@ -18872,7 +18872,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" } }, @@ -19331,7 +19331,7 @@ "$ref": "#/definitions/v1MaasCloudAccount" }, "name": { - "description": "Name for the private gateway \u0026 cloud account", + "description": "Name for the private gateway & cloud account", "type": "string" }, "shareWithProjects": { @@ -19402,7 +19402,7 @@ "$ref": "#/definitions/v1OpenStackCloudAccount" }, "name": { - "description": "Name for the private gateway \u0026 cloud account", + "description": "Name for the private gateway & cloud account", "type": "string" }, "shareWithProjects": { @@ -19454,7 +19454,7 @@ "$ref": "#/definitions/v1VsphereCloudAccount" }, "name": { - "description": "Name for the private gateway \u0026 cloud account", + "description": "Name for the private gateway & cloud account", "type": "string" }, "shareWithProjects": { @@ -30187,7 +30187,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" } }, @@ -30640,7 +30640,7 @@ "uniqueItems": true }, "v1UpdateStrategy": { - "description": "UpdatesStrategy will be used to translate to RollingUpdateStrategy of a MachineDeployment We'll start with default values for the translation, can expose more details later Following is details of parameters translated from the type ScaleOut =\u003e maxSurge=1, maxUnavailable=0 ScaleIn =\u003e maxSurge=0, maxUnavailable=1", + "description": "UpdatesStrategy will be used to translate to RollingUpdateStrategy of a MachineDeployment We'll start with default values for the translation, can expose more details later Following is details of parameters translated from the type ScaleOut => maxSurge=1, maxUnavailable=0 ScaleIn => maxSurge=0, maxUnavailable=1", "properties": { "type": { "description": "update strategy, either ScaleOut or ScaleIn if empty, will default to RollingUpdateScaleOut", @@ -31638,7 +31638,7 @@ "description": "dataVolumeTemplates is a list of dataVolumes that the VirtualMachineInstance template can reference. DataVolumes in this list are dynamically created for the VirtualMachine and are tied to the VirtualMachine's life-cycle." }, "persist": { - "description": "If 'true' add the disk to the Virtual Machine \u0026 Virtual Machine Instance, else add the disk to the Virtual Machine Instance only", + "description": "If 'true' add the disk to the Virtual Machine & Virtual Machine Instance, else add the disk to the Virtual Machine Instance only", "type": "boolean" } }, @@ -31701,7 +31701,7 @@ "v1VMRemoveVolumeEntity": { "properties": { "persist": { - "description": "If 'true' remove the disk from the Virtual Machine \u0026 Virtual Machine Instance, else remove the disk from the Virtual Machine Instance only", + "description": "If 'true' remove the disk from the Virtual Machine & Virtual Machine Instance, else remove the disk from the Virtual Machine Instance only", "type": "boolean" }, "removeVolumeOptions": { @@ -32000,7 +32000,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean" } }, @@ -33070,7 +33070,7 @@ "$ref": "#/definitions/v1VmBlockSize" }, "bootOrder": { - "description": "BootOrder is an integer value \u003e 0, used to determine ordering of boot devices. Lower values take precedence. Each disk or interface that has a boot order must have a unique value. Disks without a boot order are not tried if a disk with a boot order exists.", + "description": "BootOrder is an integer value > 0, used to determine ordering of boot devices. Lower values take precedence. Each disk or interface that has a boot order must have a unique value. Disks without a boot order are not tried if a disk with a boot order exists.", "format": "int32", "type": "integer" }, @@ -33411,7 +33411,7 @@ "type": "object" }, "v1VmFieldsV1": { - "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", "properties": { "Raw": { "items": { @@ -33691,7 +33691,7 @@ "type": "integer" }, "bootOrder": { - "description": "BootOrder is an integer value \u003e 0, used to determine ordering of boot devices. Lower values take precedence. Each interface or disk that has a boot order must have a unique value. Interfaces without a boot order are not tried.", + "description": "BootOrder is an integer value > 0, used to determine ordering of boot devices. Lower values take precedence. Each interface or disk that has a boot order must have a unique value. Interfaces without a boot order are not tried.", "format": "int32", "type": "integer" }, @@ -34006,7 +34006,7 @@ "type": "boolean" }, "networkName": { - "description": "References to a NetworkAttachmentDefinition CRD object. Format: \u003cnetworkName\u003e, \u003cnamespace\u003e/\u003cnetworkName\u003e. If namespace is not specified, VMI namespace is assumed.", + "description": "References to a NetworkAttachmentDefinition CRD object. Format: , /. If namespace is not specified, VMI namespace is assumed.", "type": "string" } }, @@ -34360,7 +34360,7 @@ "type": "object" }, "v1VmPodAffinityTerm": { - "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running", + "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", "properties": { "labelSelector": { "$ref": "#/definitions/v1VmLabelSelector" @@ -34447,7 +34447,7 @@ "type": "string" }, "port": { - "description": "Number of port to expose for the virtual machine. This must be a valid port number, 0 \u003c x \u003c 65536.", + "description": "Number of port to expose for the virtual machine. This must be a valid port number, 0 < x < 65536.", "format": "int32", "type": "integer" }, @@ -34564,7 +34564,7 @@ "type": "object" }, "v1VmQuantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n ::= \n (Note that may be empty, from the \"\" case in .)\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n ::= \"e\" | \"E\" \n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "type": "string" }, "v1VmRTCTimer": { @@ -34863,7 +34863,7 @@ "type": "object" }, "v1VmToleration": { - "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", + "description": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", "properties": { "effect": { "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", @@ -34901,7 +34901,7 @@ "type": "integer" }, "topologyKey": { - "description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", + "description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", "type": "string" }, "whenUnsatisfiable": { @@ -35085,7 +35085,7 @@ "type": "string" }, "subdomain": { - "description": "If specified, the fully qualified vmi hostname will be \"\u003chostname\u003e.\u003csubdomain\u003e.\u003cpod namespace\u003e.svc.\u003ccluster domain\u003e\". If not specified, the vmi will not have a domainname at all. The DNS entry will resolve to the vmi, no matter if the vmi itself can pick up a hostname.", + "description": "If specified, the fully qualified vmi hostname will be \"...svc.\". If not specified, the vmi will not have a domainname at all. The DNS entry will resolve to the vmi, no matter if the vmi itself can pick up a hostname.", "type": "string" }, "terminationGracePeriodSeconds": { @@ -35906,7 +35906,7 @@ "description": "rolling update strategy for this machinepool if not specified, will use ScaleOut" }, "useControlPlaneAsWorker": { - "description": "if IsControlPlane==true \u0026\u0026 useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", + "description": "if IsControlPlane==true && useControlPlaneAsWorker==true, then will remove master taint this will not be used for worker pools", "type": "boolean", "x-omitempty": false } @@ -37145,7 +37145,7 @@ }, "summary": "Returns the specified cluster's installer manifest file", "tags": [ - "v1" + "cluster" ] }, "parameters": [ @@ -37170,7 +37170,7 @@ }, "summary": "Retrieves a list of API keys", "tags": [ - "v1" + "apiKeys" ] }, "post": { @@ -37200,7 +37200,7 @@ }, "summary": "Create an API key", "tags": [ - "v1" + "apiKeys" ] } }, @@ -37214,7 +37214,7 @@ }, "summary": "Deletes the specified API key", "tags": [ - "v1" + "apiKeys" ] }, "get": { @@ -37229,7 +37229,7 @@ }, "summary": "Returns the specified API key", "tags": [ - "v1" + "apiKeys" ] }, "parameters": [ @@ -37259,7 +37259,7 @@ }, "summary": "Activate or de-active the specified API key", "tags": [ - "v1" + "apiKeys" ] }, "put": { @@ -37280,7 +37280,7 @@ }, "summary": "Update the specified API key", "tags": [ - "v1" + "apiKeys" ] } }, @@ -37312,7 +37312,7 @@ }, "summary": "Revoke or re-activate the API key access", "tags": [ - "v1" + "apiKeys" ] } }, @@ -37344,7 +37344,7 @@ }, "summary": "Creates a application deployment in the virtual cluster", "tags": [ - "v1" + "appDeployments" ] } }, @@ -37376,7 +37376,7 @@ }, "summary": "Creates a application deployment in one of virtual clusters in the cluster group", "tags": [ - "v1" + "appDeployments" ] } }, @@ -37390,7 +37390,7 @@ }, "summary": "Deletes the specified application deployment", "tags": [ - "v1" + "appDeployments" ] }, "get": { @@ -37405,7 +37405,7 @@ }, "summary": "Returns the specified application deployment", "tags": [ - "v1" + "appDeployments" ] }, "parameters": [ @@ -37431,7 +37431,7 @@ }, "summary": "Returns profile of the specified application deployment", "tags": [ - "v1" + "appDeployments" ] }, "parameters": [ @@ -37461,7 +37461,7 @@ }, "summary": "Updates the specified application deployment profile", "tags": [ - "v1" + "appDeployments" ] } }, @@ -37490,7 +37490,7 @@ }, "summary": "Apply the application deployment profile updates", "tags": [ - "v1" + "appDeployments" ] } }, @@ -37507,7 +37507,7 @@ }, "summary": "Returns the specified application deployment profile tier information", "tags": [ - "v1" + "appDeployments" ] }, "parameters": [ @@ -37544,7 +37544,7 @@ }, "summary": "Updates the specified application deployment profile tier information", "tags": [ - "v1" + "appDeployments" ] } }, @@ -37561,7 +37561,7 @@ }, "summary": "Retrieves a list of manifests of the specified application deployment profile tier", "tags": [ - "v1" + "appDeployments" ] }, "parameters": [ @@ -37594,7 +37594,7 @@ }, "summary": "Returns the specified application deployment tier manifest information", "tags": [ - "v1" + "appDeployments" ] }, "parameters": [ @@ -37638,7 +37638,7 @@ }, "summary": "Updates the specified application deployment tier manifest information", "tags": [ - "v1" + "appDeployments" ] } }, @@ -37655,7 +37655,7 @@ }, "summary": "Retrieves a list of profile versions of the specified application deployment", "tags": [ - "v1" + "appDeployments" ] }, "parameters": [ @@ -37696,7 +37696,7 @@ }, "summary": "Creates a application profile", "tags": [ - "v1" + "appProfiles" ] } }, @@ -37713,7 +37713,7 @@ }, "summary": "Retrieves a list of application profile macros", "tags": [ - "v1" + "appProfiles" ] } }, @@ -37727,7 +37727,7 @@ }, "summary": "Deletes the specified application profile", "tags": [ - "v1" + "appProfiles" ] }, "get": { @@ -37742,7 +37742,7 @@ }, "summary": "Returns the specified application profile", "tags": [ - "v1" + "appProfiles" ] }, "parameters": [ @@ -37771,7 +37771,7 @@ }, "summary": "Updates the specified application profile", "tags": [ - "v1" + "appProfiles" ] } }, @@ -37812,7 +37812,7 @@ }, "summary": "Clones the specified application profile", "tags": [ - "v1" + "appProfiles" ] } }, @@ -37850,7 +37850,7 @@ }, "summary": "Validates the specified application profile clone", "tags": [ - "v1" + "appProfiles" ] } }, @@ -37882,7 +37882,7 @@ }, "summary": "Updates the specified application profile metadata", "tags": [ - "v1" + "appProfiles" ] } }, @@ -37899,7 +37899,7 @@ }, "summary": "Retrieves a list of tiers of the specified application profile", "tags": [ - "v1" + "appProfiles" ] }, "parameters": [ @@ -37938,7 +37938,7 @@ }, "summary": "Updates app tier of the specified application profile", "tags": [ - "v1" + "appProfiles" ] }, "post": { @@ -37968,7 +37968,7 @@ }, "summary": "Adds tier to the specified application profile", "tags": [ - "v1" + "appProfiles" ] } }, @@ -37982,7 +37982,7 @@ }, "summary": "Deletes the specified application profile tier", "tags": [ - "v1" + "appProfiles" ] }, "get": { @@ -37997,7 +37997,7 @@ }, "summary": "Returns the specified application profile tier information", "tags": [ - "v1" + "appProfiles" ] }, "parameters": [ @@ -38034,7 +38034,7 @@ }, "summary": "Updates the specified application profile tier", "tags": [ - "v1" + "appProfiles" ] } }, @@ -38051,7 +38051,7 @@ }, "summary": "Retrieves a list of manifests of the specified application profile tier", "tags": [ - "v1" + "appProfiles" ] }, "parameters": [ @@ -38097,7 +38097,7 @@ }, "summary": "Adds manifest to the specified application profile tier", "tags": [ - "v1" + "appProfiles" ] } }, @@ -38111,7 +38111,7 @@ }, "summary": "Deletes the specified application profile tier manifest", "tags": [ - "v1" + "appProfiles" ] }, "get": { @@ -38126,7 +38126,7 @@ }, "summary": "Returns the specified application profile tier manifest information", "tags": [ - "v1" + "appProfiles" ] }, "parameters": [ @@ -38170,7 +38170,7 @@ }, "summary": "Updates the specified application profile tier manifest information", "tags": [ - "v1" + "appProfiles" ] } }, @@ -38187,7 +38187,7 @@ }, "summary": "Returns the specified application profile tier resolved values", "tags": [ - "v1" + "appProfiles" ] }, "parameters": [ @@ -38299,7 +38299,7 @@ }, "summary": "Retrieves the list of audit logs", "tags": [ - "v1" + "audits" ] } }, @@ -38316,7 +38316,7 @@ }, "summary": "Returns the specified audit log", "tags": [ - "v1" + "audits" ] }, "parameters": [ @@ -38342,7 +38342,7 @@ }, "summary": "Returns the specified system audit message", "tags": [ - "v1" + "audits" ] }, "parameters": [ @@ -38383,7 +38383,7 @@ }, "summary": "Updates the specified user message for the specified audit", "tags": [ - "v1" + "audits" ] } }, @@ -38419,7 +38419,7 @@ }, "summary": "Authenticates the user for the specified crendentials", "tags": [ - "v1" + "auth" ] } }, @@ -38447,7 +38447,7 @@ }, "summary": "Deprecated. Returns the authentication type for the specified user email id", "tags": [ - "v1" + "auth" ] } }, @@ -38472,7 +38472,7 @@ }, "summary": "Returns the user organization details", "tags": [ - "v1" + "auth" ] } }, @@ -38506,7 +38506,7 @@ }, "summary": "Creates a request to switch organization", "tags": [ - "v1" + "auth" ] } }, @@ -38524,7 +38524,7 @@ }, "summary": "Idp authorization code callback", "tags": [ - "v1" + "auth" ] }, "parameters": [ @@ -38578,7 +38578,7 @@ }, "summary": "Identity provider logout url for the Oidc", "tags": [ - "v1" + "auth" ] }, "parameters": [ @@ -38643,7 +38643,7 @@ }, "summary": "Identity provider callback url for the SMAL authentication", "tags": [ - "v1" + "auth" ] } }, @@ -38690,7 +38690,7 @@ }, "summary": "Identity provider logout url for the SMAL", "tags": [ - "v1" + "auth" ] } }, @@ -38708,7 +38708,7 @@ }, "summary": "Returns a list of user's organizations", "tags": [ - "v1" + "auth" ] } }, @@ -38752,7 +38752,7 @@ }, "summary": "Updates and Activates the specified user password using the password token", "tags": [ - "v1" + "auth" ] } }, @@ -38796,7 +38796,7 @@ }, "summary": "Resets the user password using the password token", "tags": [ - "v1" + "auth" ] } }, @@ -38814,7 +38814,7 @@ }, "summary": "Refreshes authentication token", "tags": [ - "v1" + "auth" ] }, "parameters": [ @@ -38848,7 +38848,7 @@ }, "summary": "Returns a list of predefined Identity Provider (IDP)", "tags": [ - "v1" + "auth" ] } }, @@ -38873,7 +38873,7 @@ }, "summary": "Returns a list of supported sso logins", "tags": [ - "v1" + "auth" ] } }, @@ -38891,7 +38891,7 @@ }, "summary": "Returns a list of supported sso auth providers", "tags": [ - "v1" + "auth" ] } }, @@ -38909,7 +38909,7 @@ }, "summary": "Returns Authorization token. Works as a callback url for the system defined sso apps", "tags": [ - "v1" + "auth" ] }, "parameters": [ @@ -38972,7 +38972,7 @@ }, "summary": "Returns No Content. Sends the user organization information via email", "tags": [ - "v1" + "auth" ] } }, @@ -39012,7 +39012,7 @@ }, "summary": "Creates request to reset password via email", "tags": [ - "v1" + "auth" ] } }, @@ -39070,7 +39070,7 @@ }, "summary": "Retrieves a list of AWS cloud accounts", "tags": [ - "v1" + "cloudaccounts" ] }, "post": { @@ -39101,7 +39101,7 @@ }, "summary": "Creates an AWS cloud account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39115,7 +39115,7 @@ }, "summary": "Deletes the specified AWS account", "tags": [ - "v1" + "cloudaccounts" ] }, "get": { @@ -39138,7 +39138,7 @@ }, "summary": "Returns the specified AWS account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -39168,7 +39168,7 @@ }, "summary": "Updates the specified AWS account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39226,7 +39226,7 @@ }, "summary": "Retrieves a list of azure cloud accounts", "tags": [ - "v1" + "cloudaccounts" ] }, "post": { @@ -39257,7 +39257,7 @@ }, "summary": "Create azure cloud account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39271,7 +39271,7 @@ }, "summary": "Deletes the specified azure account", "tags": [ - "v1" + "cloudaccounts" ] }, "get": { @@ -39286,7 +39286,7 @@ }, "summary": "Returns the specified azure cloud account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -39316,7 +39316,7 @@ }, "summary": "Updates the specified azure account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39374,7 +39374,7 @@ }, "summary": "Retrieves a list of CoxEdge cloud accounts", "tags": [ - "v1" + "cloudaccounts" ] }, "post": { @@ -39405,7 +39405,7 @@ }, "summary": "Creates an CoxEdge cloud account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39419,7 +39419,7 @@ }, "summary": "Deletes the specified CoxEdge account", "tags": [ - "v1" + "cloudaccounts" ] }, "get": { @@ -39434,7 +39434,7 @@ }, "summary": "Returns the specified CoxEdge account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -39464,7 +39464,7 @@ }, "summary": "Updates the specified CoxEdge account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39522,7 +39522,7 @@ }, "summary": "Retrieves a list of gcp cloud accounts", "tags": [ - "v1" + "cloudaccounts" ] }, "post": { @@ -39553,7 +39553,7 @@ }, "summary": "Creates a GCP cloud account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39567,7 +39567,7 @@ }, "summary": "Deletes the specified GCP account", "tags": [ - "v1" + "cloudaccounts" ] }, "get": { @@ -39582,7 +39582,7 @@ }, "summary": "Returns the specified GCP cloud account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -39613,7 +39613,7 @@ }, "summary": "Updates the specified GCP account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39671,7 +39671,7 @@ }, "summary": "Retrieves a list of Maas cloud accounts", "tags": [ - "v1" + "cloudaccounts" ] }, "post": { @@ -39702,7 +39702,7 @@ }, "summary": "Creates an Maas cloud account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39716,7 +39716,7 @@ }, "summary": "Deletes the specified Maas account", "tags": [ - "v1" + "cloudaccounts" ] }, "get": { @@ -39731,7 +39731,7 @@ }, "summary": "Returns the specified Maas account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -39762,7 +39762,7 @@ }, "summary": "Patches the specified CloudAccount Maas", "tags": [ - "v1" + "cloudaccounts" ] }, "put": { @@ -39783,7 +39783,7 @@ }, "summary": "Updates the specified Maas account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39800,7 +39800,7 @@ }, "summary": "Get the maas azs for a given account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -39825,7 +39825,7 @@ }, "summary": "Get the maas domains for a given account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -39850,7 +39850,7 @@ }, "summary": "Get the maas pools for a given account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -39875,7 +39875,7 @@ }, "summary": "Get the maas subnets for a given account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -39941,7 +39941,7 @@ }, "summary": "Retrieves a list of OpenStack cloud accounts", "tags": [ - "v1" + "cloudaccounts" ] }, "post": { @@ -39972,7 +39972,7 @@ }, "summary": "Creates a OpenStack cloud account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -39986,7 +39986,7 @@ }, "summary": "Deletes the specified OpenStack account", "tags": [ - "v1" + "cloudaccounts" ] }, "get": { @@ -40001,7 +40001,7 @@ }, "summary": "Returns the specified OpenStack account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40031,7 +40031,7 @@ }, "summary": "Updates the specified OpenStack account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -40065,7 +40065,7 @@ }, "summary": "Get the openstack azs for a given account and region", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40107,7 +40107,7 @@ }, "summary": "Get the openstack keypairs for a given account and scope", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40149,7 +40149,7 @@ }, "summary": "Get the openstack keypairs for a given account and scope", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40191,7 +40191,7 @@ }, "summary": "Get the openstack networks for a given account and scope", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40216,7 +40216,7 @@ }, "summary": "Get the openstack projects for a given account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40241,7 +40241,7 @@ }, "summary": "Get the openstack regions for a given account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40301,7 +40301,7 @@ }, "summary": "Retrieves a list of cloud accounts summary", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -40359,7 +40359,7 @@ }, "summary": "Retrieves a list of Tencent cloud accounts", "tags": [ - "v1" + "cloudaccounts" ] }, "post": { @@ -40390,7 +40390,7 @@ }, "summary": "Creates an Tencent cloud account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -40404,7 +40404,7 @@ }, "summary": "Deletes the specified Tencent account", "tags": [ - "v1" + "cloudaccounts" ] }, "get": { @@ -40419,7 +40419,7 @@ }, "summary": "Returns the specified Tencent account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40449,7 +40449,7 @@ }, "summary": "Updates the specified Tencent account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -40507,7 +40507,7 @@ }, "summary": "Retrieves a list of vSphere cloud accounts", "tags": [ - "v1" + "cloudaccounts" ] }, "post": { @@ -40538,7 +40538,7 @@ }, "summary": "Creates a vSphere cloud account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -40552,7 +40552,7 @@ }, "summary": "Deletes the specified vSphere account", "tags": [ - "v1" + "cloudaccounts" ] }, "get": { @@ -40567,7 +40567,7 @@ }, "summary": "Returns the specified vSphere account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40598,7 +40598,7 @@ }, "summary": "Updates the specified VSphere account", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -40615,7 +40615,7 @@ }, "summary": "Get the vSphere computecluster resources for the given overlord account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40655,9 +40655,9 @@ } } }, - "summary": "Get the vSphere datacenters \u0026 datacluster for the given overlord account", + "summary": "Get the vSphere datacenters & datacluster for the given overlord account", "tags": [ - "v1" + "cloudaccounts" ] }, "parameters": [ @@ -40697,7 +40697,7 @@ }, "summary": "Update the geolocation annotation", "tags": [ - "v1" + "cloudaccounts" ] } }, @@ -40714,7 +40714,7 @@ }, "summary": "Returns the specified AKS cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -40755,7 +40755,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -40796,7 +40796,7 @@ }, "summary": "Creates an AKS cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -40810,7 +40810,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -40847,7 +40847,7 @@ }, "summary": "Updates the specified AKS cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -40905,7 +40905,7 @@ }, "summary": "Retrieves a list of AKS machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -40951,7 +40951,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -40965,7 +40965,7 @@ }, "summary": "Deletes the specified Azure machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -40980,7 +40980,7 @@ }, "summary": "Returns the specified AKS machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41024,7 +41024,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41041,7 +41041,7 @@ }, "summary": "Returns the specified AWS cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41082,7 +41082,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41123,7 +41123,7 @@ }, "summary": "Creates an AWS cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41137,7 +41137,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41174,7 +41174,7 @@ }, "summary": "Updates the specified AWS cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41232,7 +41232,7 @@ }, "summary": "Retrieves a list of AWS machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41278,7 +41278,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41292,7 +41292,7 @@ }, "summary": "Deletes the specified AWS machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -41307,7 +41307,7 @@ }, "summary": "Returns the specified AWS machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41351,7 +41351,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41368,7 +41368,7 @@ }, "summary": "Returns the specified Azure cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41409,7 +41409,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41450,7 +41450,7 @@ }, "summary": "Creates an Azure cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41464,7 +41464,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41501,7 +41501,7 @@ }, "summary": "Updates the specified Azure cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41560,7 +41560,7 @@ }, "summary": "Retrieves a list of Azure machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41606,7 +41606,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41620,7 +41620,7 @@ }, "summary": "Deletes the specified Azure machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -41636,7 +41636,7 @@ }, "summary": "Returns the specified Azure machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41680,7 +41680,7 @@ }, "summary": "Updates the specified machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41697,7 +41697,7 @@ }, "summary": "Returns the specified CoxEdge cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41738,7 +41738,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41779,7 +41779,7 @@ }, "summary": "Creates a CoxEdge cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41793,7 +41793,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41830,7 +41830,7 @@ }, "summary": "Updates the specified CoxEdge cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41888,7 +41888,7 @@ }, "summary": "Retrieves a list of CoxEdge machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -41934,7 +41934,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -41948,7 +41948,7 @@ }, "summary": "Deletes the specified CoxEdge machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -41963,7 +41963,7 @@ }, "summary": "Returns the specified CoxEdge machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42007,7 +42007,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42024,7 +42024,7 @@ }, "summary": "Returns the specified edge-native cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42065,7 +42065,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42106,7 +42106,7 @@ }, "summary": "Creates a edge-native cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42120,7 +42120,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42157,7 +42157,7 @@ }, "summary": "Updates the specified edge-native cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42174,7 +42174,7 @@ }, "summary": "Retrieves a list of edge-native machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42220,7 +42220,7 @@ }, "summary": "Adds the edge-native machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42234,7 +42234,7 @@ }, "summary": "Deletes the specified edge-native machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -42249,7 +42249,7 @@ }, "summary": "Returns the specified edge-native machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42293,7 +42293,7 @@ }, "summary": "Updates the specified machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42310,7 +42310,7 @@ }, "summary": "Returns the specified edge cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42351,7 +42351,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42392,7 +42392,7 @@ }, "summary": "Creates a edge cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42406,7 +42406,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42443,7 +42443,7 @@ }, "summary": "Updates the specified Edge cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42460,7 +42460,7 @@ }, "summary": "Retrieves a list of Edge machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42506,7 +42506,7 @@ }, "summary": "Adds the Edge machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42520,7 +42520,7 @@ }, "summary": "Deletes the specified Edge machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -42535,7 +42535,7 @@ }, "summary": "Returns the specified Edge machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42579,7 +42579,7 @@ }, "summary": "Updates the specified machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42596,7 +42596,7 @@ }, "summary": "Returns the specified EKS cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42637,7 +42637,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42669,7 +42669,7 @@ }, "summary": "Updates EKS cloud config's fargate profiles", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42710,7 +42710,7 @@ }, "summary": "Creates an EKS cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42724,7 +42724,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42761,7 +42761,7 @@ }, "summary": "Updates the specified EKS cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42819,7 +42819,7 @@ }, "summary": "Retrieves a list of EKS machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42865,7 +42865,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42879,7 +42879,7 @@ }, "summary": "Deletes the specified EKS machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -42894,7 +42894,7 @@ }, "summary": "Returns the specified EKS machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42938,7 +42938,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -42955,7 +42955,7 @@ }, "summary": "Returns the specified GCP cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -42996,7 +42996,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43037,7 +43037,7 @@ }, "summary": "Creates a Gcp cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43051,7 +43051,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43088,7 +43088,7 @@ }, "summary": "Updates the specified GCP cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43146,7 +43146,7 @@ }, "summary": "Retrieves a list of GCP machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43192,7 +43192,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43206,7 +43206,7 @@ }, "summary": "Deletes the specified GCP machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -43221,7 +43221,7 @@ }, "summary": "Returns the specified GCP machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43265,7 +43265,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43282,7 +43282,7 @@ }, "summary": "Returns the specified Generic cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43323,7 +43323,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43364,7 +43364,7 @@ }, "summary": "Creates a generic cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43378,7 +43378,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43415,7 +43415,7 @@ }, "summary": "Updates the specified generic cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43473,7 +43473,7 @@ }, "summary": "Retrieves a list of Generic machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43519,7 +43519,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43533,7 +43533,7 @@ }, "summary": "Deletes the specified machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -43548,7 +43548,7 @@ }, "summary": "Returns the specified generic machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43592,7 +43592,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43609,7 +43609,7 @@ }, "summary": "Returns the specified GKE cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43650,7 +43650,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43691,7 +43691,7 @@ }, "summary": "Creates an GKE cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43705,7 +43705,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43742,7 +43742,7 @@ }, "summary": "Updates the specified GKE cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43800,7 +43800,7 @@ }, "summary": "Retrieves a list of GKE machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43846,7 +43846,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43860,7 +43860,7 @@ }, "summary": "Deletes the specified Gcp machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -43875,7 +43875,7 @@ }, "summary": "Returns the specified GKE machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43919,7 +43919,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -43936,7 +43936,7 @@ }, "summary": "Returns the specified libvirt cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -43977,7 +43977,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44018,7 +44018,7 @@ }, "summary": "Creates a libvirt cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44032,7 +44032,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44069,7 +44069,7 @@ }, "summary": "Updates the specified Libvirt cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44086,7 +44086,7 @@ }, "summary": "Retrieves a list of Libvirt machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44132,7 +44132,7 @@ }, "summary": "Adds the Libvirt machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44146,7 +44146,7 @@ }, "summary": "Deletes the specified Libvirt machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -44161,7 +44161,7 @@ }, "summary": "Returns the specified Libvirt machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44205,7 +44205,7 @@ }, "summary": "Updates the specified machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44222,7 +44222,7 @@ }, "summary": "Returns the specified Maas cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44263,7 +44263,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44304,7 +44304,7 @@ }, "summary": "Creates an Maas cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44318,7 +44318,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44355,7 +44355,7 @@ }, "summary": "Updates the specified Maas cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44413,7 +44413,7 @@ }, "summary": "Retrieves a list of Maas machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44459,7 +44459,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44473,7 +44473,7 @@ }, "summary": "Deletes the specified Maas machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -44488,7 +44488,7 @@ }, "summary": "Returns the specified Maas machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44532,7 +44532,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44549,7 +44549,7 @@ }, "summary": "Returns the specified OpenStack cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44590,7 +44590,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44631,7 +44631,7 @@ }, "summary": "Creates a OpenStack cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44645,7 +44645,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44682,7 +44682,7 @@ }, "summary": "Updates the specified OpenStack cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44699,7 +44699,7 @@ }, "summary": "Retrieves a list of OpenStack machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44745,7 +44745,7 @@ }, "summary": "Adds the OpenStack machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44759,7 +44759,7 @@ }, "summary": "Deletes the specified OpenStack machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -44774,7 +44774,7 @@ }, "summary": "Returns the specified OpenStack machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44818,7 +44818,7 @@ }, "summary": "Updates the specified machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44835,7 +44835,7 @@ }, "summary": "Returns the specified TKE cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44876,7 +44876,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44917,7 +44917,7 @@ }, "summary": "Creates an TKE cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -44931,7 +44931,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -44968,7 +44968,7 @@ }, "summary": "Updates the specified TKE cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45026,7 +45026,7 @@ }, "summary": "Retrieves a list of TKE machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45072,7 +45072,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45086,7 +45086,7 @@ }, "summary": "Deletes the specified Tencent machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -45101,7 +45101,7 @@ }, "summary": "Returns the specified Tke machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45145,7 +45145,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45162,7 +45162,7 @@ }, "summary": "Returns the specified Virtual cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45203,7 +45203,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45244,7 +45244,7 @@ }, "summary": "Creates a virtual cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45258,7 +45258,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45295,7 +45295,7 @@ }, "summary": "Updates the specified virtual cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45353,7 +45353,7 @@ }, "summary": "Retrieves a list of virtual machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45399,7 +45399,7 @@ }, "summary": "Adds the machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45413,7 +45413,7 @@ }, "summary": "Deletes the specified virtual machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -45428,7 +45428,7 @@ }, "summary": "Returns the specified virtual machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45472,7 +45472,7 @@ }, "summary": "Updates the specified machine to the cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45504,7 +45504,7 @@ }, "summary": "Updates and resizes the virtual cluster", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45521,7 +45521,7 @@ }, "summary": "Returns the specified vSphere cloud config", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45562,7 +45562,7 @@ }, "summary": "Updates the cluster configuration information", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45603,7 +45603,7 @@ }, "summary": "Creates a vSphere cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45617,7 +45617,7 @@ }, "summary": "Deletes the specified machine pool", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45654,7 +45654,7 @@ }, "summary": "Updates the specified vSphere cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45712,7 +45712,7 @@ }, "summary": "Retrieves a list of vSphere machines", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45758,7 +45758,7 @@ }, "summary": "Adds the vSphere machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45772,7 +45772,7 @@ }, "summary": "Deletes the specified vSphere machine", "tags": [ - "v1" + "cloudconfigs" ] }, "get": { @@ -45787,7 +45787,7 @@ }, "summary": "Returns the specified vSphere machine", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -45831,7 +45831,7 @@ }, "summary": "Updates the specified machine to cloud config's machine pool", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45906,7 +45906,7 @@ }, "summary": "Updates the specified machine maintenance", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45981,7 +45981,7 @@ }, "summary": "Updates the specified machine maintenance", "tags": [ - "v1" + "cloudconfigs" ] } }, @@ -45998,7 +45998,7 @@ }, "summary": "Returns the specified cloud config's machine pools and machine uid", "tags": [ - "v1" + "cloudconfigs" ] }, "parameters": [ @@ -46037,7 +46037,7 @@ }, "summary": "Retrieves AWS external id and account id", "tags": [ - "v1" + "clouds" ] } }, @@ -46068,7 +46068,7 @@ }, "summary": "Validate the specified AWS account credentials", "tags": [ - "v1" + "clouds" ] } }, @@ -46100,7 +46100,7 @@ }, "summary": "validates aws cloud watch credentials", "tags": [ - "v1" + "clouds" ] } }, @@ -46128,7 +46128,7 @@ }, "summary": "Retrieves AWS cloud account usage cost from cost explorer.", "tags": [ - "v1" + "clouds" ] } }, @@ -46169,7 +46169,7 @@ }, "summary": "Get AWS Volume Size", "tags": [ - "v1" + "clouds" ] } }, @@ -46203,7 +46203,7 @@ }, "summary": "Retrieves a list of AWS policies for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -46240,7 +46240,7 @@ }, "summary": "Validate the aws policy arns validate", "tags": [ - "v1" + "clouds" ] } }, @@ -46266,7 +46266,7 @@ }, "summary": "Retrieves a list of AWS regions for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -46299,7 +46299,7 @@ }, "summary": "Retrieves a list of AWS availability zones for the specified region", "tags": [ - "v1" + "clouds" ] } }, @@ -46333,7 +46333,7 @@ }, "summary": "Copies the specified image from one region to another region", "tags": [ - "v1" + "clouds" ] } }, @@ -46377,7 +46377,7 @@ }, "summary": "Check if Aws cluster name is valid", "tags": [ - "v1" + "clouds" ] } }, @@ -46411,7 +46411,7 @@ }, "summary": "Returns AWS image for the specified AMI name", "tags": [ - "v1" + "clouds" ] } }, @@ -46464,7 +46464,7 @@ }, "summary": "Retrieves a list of AWS instance types", "tags": [ - "v1" + "clouds" ] } }, @@ -46497,7 +46497,7 @@ }, "summary": "Retrieves a list of AWS keypairs", "tags": [ - "v1" + "clouds" ] } }, @@ -46540,7 +46540,7 @@ }, "summary": "Validate the specified AWS keypair", "tags": [ - "v1" + "clouds" ] } }, @@ -46573,7 +46573,7 @@ }, "summary": "Retrieves a list of AWS KMS keys for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -46616,7 +46616,7 @@ }, "summary": "Validate an Aws KMS key for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -46642,7 +46642,7 @@ }, "summary": "Retrieves a list of AWS storage types", "tags": [ - "v1" + "clouds" ] } }, @@ -46675,7 +46675,7 @@ }, "summary": "Retrieves a list of VPCs for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -46706,7 +46706,7 @@ }, "summary": "Validate the AWS S3 bucket", "tags": [ - "v1" + "clouds" ] } }, @@ -46745,7 +46745,7 @@ }, "summary": "Retrieves a list of AWS security groups for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -46772,7 +46772,7 @@ }, "summary": "Get all AWS Volume Types", "tags": [ - "v1" + "clouds" ] } }, @@ -46804,7 +46804,7 @@ }, "summary": "Check if Azure account is valid", "tags": [ - "v1" + "clouds" ] } }, @@ -46829,7 +46829,7 @@ }, "summary": "Retrieves a list of Azure groups", "tags": [ - "v1" + "clouds" ] } }, @@ -46860,7 +46860,7 @@ }, "summary": "Retrieves a list of Azure regions", "tags": [ - "v1" + "clouds" ] } }, @@ -46907,7 +46907,7 @@ }, "summary": "Retrieves a list of Azure instance types", "tags": [ - "v1" + "clouds" ] } }, @@ -46933,7 +46933,7 @@ }, "summary": "Retrieves a list of Azure storage types", "tags": [ - "v1" + "clouds" ] } }, @@ -46991,7 +46991,7 @@ }, "summary": "Check if Azure cluster name is valid", "tags": [ - "v1" + "clouds" ] } }, @@ -47037,7 +47037,7 @@ }, "summary": "Retrieves a list of Azure virtual network list for the sepcified account", "tags": [ - "v1" + "clouds" ] } }, @@ -47077,7 +47077,7 @@ }, "summary": "Retrieves a list of Azure resource group for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -47103,7 +47103,7 @@ }, "summary": "Retrieves a list of Azure zones for the specified region", "tags": [ - "v1" + "clouds" ] } }, @@ -47144,7 +47144,7 @@ }, "summary": "Get Azure private DNS zones for the given resource group", "tags": [ - "v1" + "clouds" ] } }, @@ -47184,7 +47184,7 @@ }, "summary": "Get Azure storage accounts", "tags": [ - "v1" + "clouds" ] } }, @@ -47231,7 +47231,7 @@ }, "summary": "Get Azure storage containers", "tags": [ - "v1" + "clouds" ] } }, @@ -47257,7 +47257,7 @@ }, "summary": "Get Azure storage account types", "tags": [ - "v1" + "clouds" ] } }, @@ -47284,7 +47284,7 @@ }, "summary": "Retrieves a list of Azure subscription list for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -47310,7 +47310,7 @@ }, "summary": "Returns the Azure vhd url for the specified vhd location", "tags": [ - "v1" + "clouds" ] } }, @@ -47341,7 +47341,7 @@ }, "summary": "Validate the specified CoxEdge account credentials", "tags": [ - "v1" + "clouds" ] } }, @@ -47358,7 +47358,7 @@ }, "summary": "Retrieves a list of default base urls", "tags": [ - "v1" + "clouds" ] } }, @@ -47390,7 +47390,7 @@ }, "summary": "Retrieves a list of environments for the specified account", "tags": [ - "v1" + "clouds" ] }, "post": { @@ -47416,7 +47416,7 @@ }, "summary": "Retrieves a list of environments for baseUrl and apiKey", "tags": [ - "v1" + "clouds" ] } }, @@ -47442,7 +47442,7 @@ }, "summary": "Retrieves a list of organizations for the specified account", "tags": [ - "v1" + "clouds" ] }, "post": { @@ -47468,7 +47468,7 @@ }, "summary": "Retrieves a list of organizations for baseUrl and apiKey", "tags": [ - "v1" + "clouds" ] } }, @@ -47511,7 +47511,7 @@ }, "summary": "Retrieves a list of CoxEdge regions for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -47551,7 +47551,7 @@ }, "summary": "Retrieves a list of CoxEdge instance types", "tags": [ - "v1" + "clouds" ] } }, @@ -47583,7 +47583,7 @@ }, "summary": "Retrieves a list of services for the specified account", "tags": [ - "v1" + "clouds" ] }, "post": { @@ -47609,7 +47609,7 @@ }, "summary": "Retrieves a list of services for baseUrl and apiKey", "tags": [ - "v1" + "clouds" ] } }, @@ -47640,7 +47640,7 @@ }, "summary": "Validate the specified GCP account credentials", "tags": [ - "v1" + "clouds" ] } }, @@ -47671,7 +47671,7 @@ }, "summary": "Validate the specified GCP az", "tags": [ - "v1" + "clouds" ] } }, @@ -47702,7 +47702,7 @@ }, "summary": "Validate the specified GCP bucket name credentials", "tags": [ - "v1" + "clouds" ] } }, @@ -47738,7 +47738,7 @@ }, "summary": "Validates the image with tag", "tags": [ - "v1" + "clouds" ] } }, @@ -47764,7 +47764,7 @@ }, "summary": "Returns the Gcp image url for the specified image location", "tags": [ - "v1" + "clouds" ] } }, @@ -47790,7 +47790,7 @@ }, "summary": "Retrieves a list of GCP projects for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -47823,7 +47823,7 @@ }, "summary": "Retrieves a list of GCP regions", "tags": [ - "v1" + "clouds" ] } }, @@ -47863,7 +47863,7 @@ }, "summary": "Retrieves a list of GCP networks for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -47903,7 +47903,7 @@ }, "summary": "Retrieves a list of GCP zones for the specified account and region", "tags": [ - "v1" + "clouds" ] } }, @@ -47941,7 +47941,7 @@ }, "summary": "Validate the specified GCP project", "tags": [ - "v1" + "clouds" ] } }, @@ -47974,7 +47974,7 @@ }, "summary": "Retrieves a list of GCP zones for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -48021,7 +48021,7 @@ }, "summary": "Retrieves a list of GCP instance types", "tags": [ - "v1" + "clouds" ] } }, @@ -48047,7 +48047,7 @@ }, "summary": "Retrieves a list of Gcp storage types", "tags": [ - "v1" + "clouds" ] } }, @@ -48079,7 +48079,7 @@ }, "summary": "Check if Maas account is valid", "tags": [ - "v1" + "clouds" ] } }, @@ -48104,7 +48104,7 @@ }, "summary": "Retrieves a list of Maas zones for a particular account uid", "tags": [ - "v1" + "clouds" ] } }, @@ -48129,7 +48129,7 @@ }, "summary": "Retrieves a list of Maas domains", "tags": [ - "v1" + "clouds" ] } }, @@ -48154,7 +48154,7 @@ }, "summary": "Retrieves a list of Maas pools for a particular account uid", "tags": [ - "v1" + "clouds" ] } }, @@ -48179,7 +48179,7 @@ }, "summary": "Retrieves a list of Maas subnets for a particular account uid", "tags": [ - "v1" + "clouds" ] } }, @@ -48211,7 +48211,7 @@ }, "summary": "Check if OpenStack account is valid", "tags": [ - "v1" + "clouds" ] } }, @@ -48254,7 +48254,7 @@ }, "summary": "Retrieves a list of OpenStack azs for a particular account uid", "tags": [ - "v1" + "clouds" ] } }, @@ -48297,7 +48297,7 @@ }, "summary": "Returns the OpenStack flavors", "tags": [ - "v1" + "clouds" ] } }, @@ -48340,7 +48340,7 @@ }, "summary": "Returns the OpenStack keypair", "tags": [ - "v1" + "clouds" ] } }, @@ -48383,7 +48383,7 @@ }, "summary": "Returns the OpenStack networks", "tags": [ - "v1" + "clouds" ] } }, @@ -48408,7 +48408,7 @@ }, "summary": "Returns the OpenStack projects", "tags": [ - "v1" + "clouds" ] } }, @@ -48433,7 +48433,7 @@ }, "summary": "Returns the OpenStack regions", "tags": [ - "v1" + "clouds" ] } }, @@ -48464,7 +48464,7 @@ }, "summary": "Validate the specified Tencent account credentials", "tags": [ - "v1" + "clouds" ] } }, @@ -48490,7 +48490,7 @@ }, "summary": "Retrieves a list of Tencent regions for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -48544,7 +48544,7 @@ }, "summary": "Retrieves a list of Tencent instance types", "tags": [ - "v1" + "clouds" ] } }, @@ -48577,7 +48577,7 @@ }, "summary": "Retrieves a list of keypairs for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -48610,7 +48610,7 @@ }, "summary": "Retrieves a list of secutity groups for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -48650,7 +48650,7 @@ }, "summary": "Retrieves a list of Tencent storage types", "tags": [ - "v1" + "clouds" ] } }, @@ -48683,7 +48683,7 @@ }, "summary": "Retrieves a list of VPCs for the specified account", "tags": [ - "v1" + "clouds" ] } }, @@ -48716,7 +48716,7 @@ }, "summary": "Retrieves a list of Tencent availability zones for the specified region", "tags": [ - "v1" + "clouds" ] } }, @@ -48748,7 +48748,7 @@ }, "summary": "Check if Vsphere account is valid", "tags": [ - "v1" + "clouds" ] } }, @@ -48773,7 +48773,7 @@ }, "summary": "Returns the vsphere data centers", "tags": [ - "v1" + "clouds" ] } }, @@ -48813,7 +48813,7 @@ }, "summary": "Returns the resources for vsphere compute cluster", "tags": [ - "v1" + "clouds" ] } }, @@ -48841,7 +48841,7 @@ }, "summary": "Retrieves vsphere env", "tags": [ - "v1" + "clouds" ] } }, @@ -48888,7 +48888,7 @@ }, "summary": "Retrieves the cloud instance spot price based on zone and timestamp for a specific cloud", "tags": [ - "v1" + "clouds" ] } }, @@ -48928,7 +48928,7 @@ }, "summary": "Returns the cloud compute rate", "tags": [ - "v1" + "clouds" ] } }, @@ -48974,7 +48974,7 @@ }, "summary": "Returns the cloud storage rate", "tags": [ - "v1" + "clouds" ] } }, @@ -49006,7 +49006,7 @@ }, "summary": "Create cluster groups", "tags": [ - "v1" + "clustergroups" ] } }, @@ -49023,7 +49023,7 @@ }, "summary": "Get cluster group developer credit usage by scope", "tags": [ - "v1" + "clustergroups" ] }, "parameters": [ @@ -49048,7 +49048,7 @@ }, "summary": "Retrieves a list of cluster groups host cluster summary", "tags": [ - "v1" + "clustergroups" ] } }, @@ -49065,7 +49065,7 @@ }, "summary": "Retrieves a list of cluster groups host cluster metadata", "tags": [ - "v1" + "clustergroups" ] } }, @@ -49093,7 +49093,7 @@ }, "summary": "Validates the cluster groups name", "tags": [ - "v1" + "clustergroups" ] } }, @@ -49107,7 +49107,7 @@ }, "summary": "Deletes the specified cluster group", "tags": [ - "v1" + "clustergroups" ] }, "get": { @@ -49122,7 +49122,7 @@ }, "summary": "Returns the specified cluster groups", "tags": [ - "v1" + "clustergroups" ] }, "parameters": [ @@ -49162,7 +49162,7 @@ }, "summary": "Updates the specified cluster groups clusters", "tags": [ - "v1" + "clustergroups" ] } }, @@ -49193,7 +49193,7 @@ }, "summary": "Updates cluster reference and host cluster config", "tags": [ - "v1" + "clustergroups" ] } }, @@ -49225,7 +49225,7 @@ }, "summary": "Updates the host cluster config in cluster group", "tags": [ - "v1" + "clustergroups" ] } }, @@ -49256,7 +49256,7 @@ }, "summary": "Updates the specified cluster groups meta", "tags": [ - "v1" + "clustergroups" ] } }, @@ -49315,7 +49315,7 @@ }, "summary": "Retrieves a list of cluster profiles", "tags": [ - "v1" + "clusterprofiles" ] }, "post": { @@ -49345,7 +49345,7 @@ }, "summary": "Creates a cluster profile", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49371,7 +49371,7 @@ }, "summary": "Deletes list of cluster profiles", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49409,7 +49409,7 @@ }, "summary": "Imports a cluster profile", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49460,7 +49460,7 @@ }, "summary": "Imports a cluster profile via file", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49486,7 +49486,7 @@ }, "summary": "Validates cluster profile import", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49503,7 +49503,7 @@ }, "summary": "Retrieves a list of macros", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49538,7 +49538,7 @@ }, "summary": "Validates the cluster profile metadata", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49564,7 +49564,7 @@ }, "summary": "Validates cluster profile packs", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49605,7 +49605,7 @@ }, "summary": "Creates a clone of the specified cluster profile", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49644,7 +49644,7 @@ }, "summary": "Validates the cluster profile clone", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49670,7 +49670,7 @@ }, "summary": "Export the specified cluster profile", "tags": [ - "v1" + "clusterprofiles" ] }, "parameters": [ @@ -49716,7 +49716,7 @@ }, "summary": "Downloads the specified cluster profile", "tags": [ - "v1" + "clusterprofiles" ] }, "parameters": [ @@ -49768,7 +49768,7 @@ }, "summary": "Updates the specified cluster profile metadata", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49804,7 +49804,7 @@ }, "summary": "Updates cluster profile packs ref", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49821,7 +49821,7 @@ }, "summary": "Returns the specified cluster profile packs", "tags": [ - "v1" + "clusterprofiles" ] }, "parameters": [ @@ -49866,7 +49866,7 @@ }, "summary": "Adds a new pack to the specified cluster profile and returns the created pack uid", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -49883,7 +49883,7 @@ }, "summary": "Returns the specified cluster profile pack manifests", "tags": [ - "v1" + "clusterprofiles" ] }, "parameters": [ @@ -49915,7 +49915,7 @@ }, "summary": "Returns the specified cluster profile packs resolved values", "tags": [ - "v1" + "clusterprofiles" ] }, "parameters": [ @@ -49945,7 +49945,7 @@ }, "summary": "Deletes the specified pack information in the cluster profile", "tags": [ - "v1" + "clusterprofiles" ] }, "get": { @@ -49960,7 +49960,7 @@ }, "summary": "Returns the specified cluster profile pack", "tags": [ - "v1" + "clusterprofiles" ] }, "parameters": [ @@ -49997,7 +49997,7 @@ }, "summary": "Updates the specified pack information in the cluster profile", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -50037,7 +50037,7 @@ }, "summary": "Returns the specified cluster profile pack configuration", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -50054,7 +50054,7 @@ }, "summary": "Returns the associated manifests for the specified profile's pack", "tags": [ - "v1" + "clusterprofiles" ] }, "parameters": [ @@ -50100,7 +50100,7 @@ }, "summary": "Adds manifest to the profiles packs and returns the added manifests uid", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -50114,7 +50114,7 @@ }, "summary": "Deletes the specified cluster profile pack manifest", "tags": [ - "v1" + "clusterprofiles" ] }, "get": { @@ -50129,7 +50129,7 @@ }, "summary": "Returns the specified cluster profile pack manifest", "tags": [ - "v1" + "clusterprofiles" ] }, "parameters": [ @@ -50173,7 +50173,7 @@ }, "summary": "Updates the specified manifest of the profile's pack", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -50197,7 +50197,7 @@ }, "summary": "Publishes the specified cluster profile", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -50223,7 +50223,7 @@ }, "summary": "Downloads the specified cluster profile", "tags": [ - "v1" + "clusterprofiles" ] }, "parameters": [ @@ -50265,7 +50265,7 @@ }, "summary": "Validates specified cluster profile packs", "tags": [ - "v1" + "clusterprofiles" ] } }, @@ -50312,7 +50312,7 @@ }, "summary": "Retrieves a list of application deployments filter summary Supported filter fields - [\"appDeploymentName\", \"clusterUid\", \"tags\"] Supported sort fields - [\"appDeploymentName\", \"creationTimestamp\", \"lastModifiedTimestamp\"]", "tags": [ - "v1" + "dashboard" ] } }, @@ -50359,7 +50359,7 @@ }, "summary": "Retrieves a list of application profiles filter summary Supported filter fields - [\"profileName\", \"tags\"] Supported sort fields - [\"profileName\", \"creationTimestamp\", \"lastModifiedTimestamp\"]", "tags": [ - "v1" + "dashboard" ] } }, @@ -50376,7 +50376,7 @@ }, "summary": "Retrieves a list of application profile metadata", "tags": [ - "v1" + "dashboard" ] } }, @@ -50402,7 +50402,7 @@ }, "summary": "Retrieves a list of edgehosts summary", "tags": [ - "v1" + "dashboard" ] } }, @@ -50448,7 +50448,7 @@ }, "summary": "Retrieves a list of cloud accounts metadata", "tags": [ - "v1" + "dashboard" ] } }, @@ -50501,7 +50501,7 @@ }, "summary": "Retrieves a list of cluster summary for a given cluster group", "tags": [ - "v1" + "dashboard" ] } }, @@ -50554,7 +50554,7 @@ }, "summary": "Retrieves a list of cluster summary for a given cluster group", "tags": [ - "v1" + "dashboard" ] } }, @@ -50601,7 +50601,7 @@ }, "summary": "Retrieves a list of cluster profiles filter summary Supported filter fields - [\"profileName\", \"tags\", \"profileType\", \"environment\"] Supported sort fields - [\"profileName\", \"environment\", \"profileType\", \"creationTimestamp\", \"lastModifiedTimestamp\"]", "tags": [ - "v1" + "dashboard" ] } }, @@ -50618,7 +50618,7 @@ }, "summary": "Retrieves a list of cluster profiles metadata", "tags": [ - "v1" + "dashboard" ] } }, @@ -50635,7 +50635,7 @@ }, "summary": "Retrieves a specified cluster profile summary", "tags": [ - "v1" + "dashboard" ] }, "parameters": [ @@ -50690,7 +50690,7 @@ }, "summary": "Retrieves a list of Edgehosts summary with provided search filter. Supported fields as per schema /v1/dashboard/edgehosts/search/schema", "tags": [ - "v1" + "dashboard" ] } }, @@ -50707,7 +50707,7 @@ }, "summary": "Retrieves a schema for the Edgehost search filter", "tags": [ - "v1" + "dashboard" ] } }, @@ -50754,7 +50754,7 @@ }, "summary": "Retrieves a list of PCG summary with provided search filter. Supported fields as per schema /v1/dashboard/pcgs/search/schema", "tags": [ - "v1" + "dashboard" ] } }, @@ -50771,7 +50771,7 @@ }, "summary": "Retrieves a schema for the PCG search filter", "tags": [ - "v1" + "dashboard" ] } }, @@ -50820,7 +50820,7 @@ }, "summary": "Retrieves a list of project summary", "tags": [ - "v1" + "dashboard" ] }, "post": { @@ -50864,7 +50864,7 @@ } }, "tags": [ - "v1" + "dashboard" ] } }, @@ -50889,7 +50889,7 @@ }, "summary": "Retrieves a list of projects metadata", "tags": [ - "v1" + "dashboard" ] } }, @@ -50937,7 +50937,7 @@ }, "summary": "Retrieves a list of cluster summary with provided filter spec Supported filter fields - [\"cpuUsage\", \"memoryUsage\", \"clusterName\", \"tags\", \"healthState\", \"clusterStates\", \"isDeleted\", \"environments\", \"metricPeriod\"] Supported sort fields - [\"environment\", \"clusterName\", \"memoryUsage\", \"healthState\", \"creationTimestamp\", \"lastModifiedTimestamp\"]", "tags": [ - "v1" + "dashboard" ] } }, @@ -50963,7 +50963,7 @@ }, "summary": "Retrieves spectro clusters cloud cost summary information", "tags": [ - "v1" + "dashboard" ] } }, @@ -51003,7 +51003,7 @@ }, "summary": "Retrieves a list of running, non rbac configured clusters in a workspace", "tags": [ - "v1" + "dashboard" ] } }, @@ -51031,7 +51031,7 @@ }, "summary": "Retrieves a list of cluster summary metadata", "tags": [ - "v1" + "dashboard" ] }, "post": { @@ -51055,7 +51055,7 @@ }, "summary": "Retrieves a list of cluster summary", "tags": [ - "v1" + "dashboard" ] } }, @@ -51081,7 +51081,7 @@ }, "summary": "Retrieves a list of cluster metadata with provided search filter spec Supported sort fields - [\"environment\", \"clusterName\", \"clusterState\", \"creationTimestamp\", \"lastModifiedTimestamp\"]", "tags": [ - "v1" + "dashboard" ] } }, @@ -51098,7 +51098,7 @@ }, "summary": "Retrieves a schema for the cluster metadata search filter", "tags": [ - "v1" + "dashboard" ] } }, @@ -51124,7 +51124,7 @@ }, "summary": "Retrieves spectro clusters resource consumption", "tags": [ - "v1" + "dashboard" ] } }, @@ -51150,7 +51150,7 @@ }, "summary": "Retrieves spectro clusters resources cost summary information", "tags": [ - "v1" + "dashboard" ] } }, @@ -51176,7 +51176,7 @@ }, "summary": "Retrieves spectro clusters resources usage summary information", "tags": [ - "v1" + "dashboard" ] } }, @@ -51223,7 +51223,7 @@ }, "summary": "Retrieves a list of cluster summary with provided search filter spec Supported sort fields - [\"environment\", \"clusterName\", \"memoryUsage\", \"healthState\", \"creationTimestamp\", \"lastModifiedTimestamp\"]", "tags": [ - "v1" + "dashboard" ] } }, @@ -51267,7 +51267,7 @@ }, "summary": "Export and download the list of cluster summary with matching search filter and download as a file(csv)", "tags": [ - "v1" + "dashboard" ] }, "post": { @@ -51309,7 +51309,7 @@ }, "summary": "Export the list of cluster summary with matching search filter and download as a file(csv) Supported sort fields - [\"environment\", \"clusterName\", \"healthState\", \"creationTimestamp\", \"lastModifiedTimestamp\"]", "tags": [ - "v1" + "dashboard" ] } }, @@ -51326,7 +51326,7 @@ }, "summary": "Retrieves a supported input values for the cluster search filter", "tags": [ - "v1" + "dashboard" ] } }, @@ -51343,7 +51343,7 @@ }, "summary": "Retrieves a schema for the cluster search filter", "tags": [ - "v1" + "dashboard" ] } }, @@ -51360,7 +51360,7 @@ }, "summary": "Retrieves a list of Virtual machine enabled clusters", "tags": [ - "v1" + "dashboard" ] } }, @@ -51377,7 +51377,7 @@ }, "summary": "Returns the specified cluster summary", "tags": [ - "v1" + "dashboard" ] }, "parameters": [ @@ -51426,7 +51426,7 @@ }, "summary": "Retrieves the specified cluster cost summary", "tags": [ - "v1" + "dashboard" ] }, "parameters": [ @@ -51451,7 +51451,7 @@ }, "summary": "Returns the specified cluster summary overview", "tags": [ - "v1" + "dashboard" ] }, "parameters": [ @@ -51493,7 +51493,7 @@ }, "summary": "Retrieves specified spectro cluster resource consumption", "tags": [ - "v1" + "dashboard" ] } }, @@ -51527,7 +51527,7 @@ }, "summary": "Retrieves specified cluster workloads", "tags": [ - "v1" + "dashboard" ] } }, @@ -51561,7 +51561,7 @@ }, "summary": "Retrieves specified cluster workload clusterrolebindings", "tags": [ - "v1" + "dashboard" ] } }, @@ -51595,7 +51595,7 @@ }, "summary": "Retrieves specified cluster workload cronjobs", "tags": [ - "v1" + "dashboard" ] } }, @@ -51629,7 +51629,7 @@ }, "summary": "Retrieves specified cluster workload daemonsets", "tags": [ - "v1" + "dashboard" ] } }, @@ -51663,7 +51663,7 @@ }, "summary": "Retrieves specified cluster workload deployments", "tags": [ - "v1" + "dashboard" ] } }, @@ -51697,7 +51697,7 @@ }, "summary": "Retrieves specified cluster workload jobs", "tags": [ - "v1" + "dashboard" ] } }, @@ -51731,7 +51731,7 @@ }, "summary": "Retrieves specified cluster workload namespaces", "tags": [ - "v1" + "dashboard" ] } }, @@ -51765,7 +51765,7 @@ }, "summary": "Retrieves specified cluster workload pods", "tags": [ - "v1" + "dashboard" ] } }, @@ -51799,7 +51799,7 @@ }, "summary": "Retrieves specified cluster workload rolebindings", "tags": [ - "v1" + "dashboard" ] } }, @@ -51833,7 +51833,7 @@ }, "summary": "Retrieves specified cluster workload statefulsets", "tags": [ - "v1" + "dashboard" ] } }, @@ -51850,7 +51850,7 @@ }, "summary": "Retrieves a list of workspace", "tags": [ - "v1" + "dashboard" ] } }, @@ -51884,7 +51884,7 @@ }, "summary": "Retrieves specified workspace clusters workload clusterrolebindings", "tags": [ - "v1" + "dashboard" ] } }, @@ -51918,7 +51918,7 @@ }, "summary": "Retrieves specified workspace clusters workload cronjobs", "tags": [ - "v1" + "dashboard" ] } }, @@ -51952,7 +51952,7 @@ }, "summary": "Retrieves specified workspace clusters workload daemonsets", "tags": [ - "v1" + "dashboard" ] } }, @@ -51986,7 +51986,7 @@ }, "summary": "Retrieves specified workspace clusters workload deployments", "tags": [ - "v1" + "dashboard" ] } }, @@ -52020,7 +52020,7 @@ }, "summary": "Retrieves specified workspace clusters workload jobs", "tags": [ - "v1" + "dashboard" ] } }, @@ -52054,7 +52054,7 @@ }, "summary": "Retrieves specified workspace clusters workload namespaces", "tags": [ - "v1" + "dashboard" ] } }, @@ -52088,7 +52088,7 @@ }, "summary": "Retrieves specified workspace clusters workload pods", "tags": [ - "v1" + "dashboard" ] } }, @@ -52122,7 +52122,7 @@ }, "summary": "Retrieves specified workspace clusters workload rolebindings", "tags": [ - "v1" + "dashboard" ] } }, @@ -52156,7 +52156,7 @@ }, "summary": "Retrieves specified workspace clusters workload statefulsets", "tags": [ - "v1" + "dashboard" ] } }, @@ -52188,7 +52188,7 @@ }, "summary": "sync data to cloud watch", "tags": [ - "v1" + "datasinks" ] } }, @@ -52217,7 +52217,7 @@ }, "summary": "Retrieves a list of registered edge host devices", "tags": [ - "v1" + "edgehosts" ] }, "post": { @@ -52247,7 +52247,7 @@ }, "summary": "Create the edge host device", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52284,7 +52284,7 @@ }, "summary": "Retrieves a list of edge hosts metadata matching the filter condition", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52310,7 +52310,7 @@ }, "summary": "Registers the edge host device", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52327,7 +52327,7 @@ }, "summary": "Retrieves a list of edge tokens", "tags": [ - "v1" + "edgehosts" ] }, "post": { @@ -52357,7 +52357,7 @@ }, "summary": "Create the edge token", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52371,7 +52371,7 @@ }, "summary": "Deletes the specified edge token", "tags": [ - "v1" + "edgehosts" ] }, "get": { @@ -52386,7 +52386,7 @@ }, "summary": "Returns the specified edge token", "tags": [ - "v1" + "edgehosts" ] }, "parameters": [ @@ -52416,7 +52416,7 @@ }, "summary": "Updates the specified edge token", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52448,7 +52448,7 @@ }, "summary": "Revoke or re-activate the edge token access", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52462,7 +52462,7 @@ }, "summary": "Deletes the specified edge host device", "tags": [ - "v1" + "edgehosts" ] }, "get": { @@ -52486,7 +52486,7 @@ }, "summary": "Returns the specified edge host device", "tags": [ - "v1" + "edgehosts" ] }, "parameters": [ @@ -52515,7 +52515,7 @@ }, "summary": "Updates the specified edge host device", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52529,7 +52529,7 @@ }, "summary": "Deassociate the clusters to the edge host", "tags": [ - "v1" + "edgehosts" ] }, "parameters": [ @@ -52558,7 +52558,7 @@ }, "summary": "Associate the clusters to the edge host", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52589,7 +52589,7 @@ }, "summary": "Updates the edge host health", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52620,7 +52620,7 @@ }, "summary": "Update the specified edge host device host check sum", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52651,7 +52651,7 @@ }, "summary": "Update the specified edge host device host pairing key", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52682,7 +52682,7 @@ }, "summary": "Updates the specified edge host device meta", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52722,7 +52722,7 @@ }, "summary": "Returns the specified edge host's manifest", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52753,7 +52753,7 @@ }, "summary": "Patch update specified edge host's packs status", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52778,7 +52778,7 @@ }, "summary": "Returns the associated profiles of a specified edge host device", "tags": [ - "v1" + "edgehosts" ] }, "parameters": [ @@ -52807,7 +52807,7 @@ }, "summary": "Associate cluster profiles to the specified edge host device", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52836,7 +52836,7 @@ }, "summary": "Reset the cluster through edge host", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52862,7 +52862,7 @@ }, "summary": "Download the specified edge host device spc", "tags": [ - "v1" + "edgehosts" ] }, "parameters": [ @@ -52901,7 +52901,7 @@ }, "summary": "Updates the specified edge host device vsphere properties", "tags": [ - "v1" + "edgehosts" ] } }, @@ -52960,7 +52960,7 @@ }, "summary": "Returns a paginated list of component events based on request parameters", "tags": [ - "v1" + "events" ] }, "post": { @@ -52991,7 +52991,7 @@ }, "summary": "Creates a component event", "tags": [ - "v1" + "events" ] } }, @@ -53018,7 +53018,7 @@ }, "summary": "Creates the component events in bulk", "tags": [ - "v1" + "events" ] } }, @@ -53032,7 +53032,7 @@ }, "summary": "Delete all the components events for the specified related object", "tags": [ - "v1" + "events" ] }, "get": { @@ -53089,7 +53089,7 @@ }, "summary": "Returns a list of components events for the specified related object", "tags": [ - "v1" + "events" ] }, "parameters": [ @@ -53149,7 +53149,7 @@ }, "summary": "Returns a list of Filters", "tags": [ - "v1" + "filters" ] } }, @@ -53174,7 +53174,7 @@ }, "summary": "Returns a list of Filters metadata", "tags": [ - "v1" + "filters" ] } }, @@ -53206,7 +53206,7 @@ }, "summary": "Creates a Tag filter", "tags": [ - "v1" + "filters" ] } }, @@ -53220,7 +53220,7 @@ }, "summary": "Delete the specified Filter object", "tags": [ - "v1" + "filters" ] }, "get": { @@ -53235,7 +53235,7 @@ }, "summary": "Returns the specified Filter object", "tags": [ - "v1" + "filters" ] }, "parameters": [ @@ -53264,7 +53264,7 @@ }, "summary": "Updates a Tag filter", "tags": [ - "v1" + "filters" ] } }, @@ -53279,7 +53279,7 @@ }, "summary": "Deletes the spectroinstaller entity", "tags": [ - "v1" + "installers" ] }, "parameters": [ @@ -53366,7 +53366,7 @@ }, "summary": "Retrieves the list of metrics for a specified resource kind", "tags": [ - "v1" + "metrics" ] } }, @@ -53401,7 +53401,7 @@ }, "summary": "Deletes the metrics of the specified resource", "tags": [ - "v1" + "metrics" ] }, "get": { @@ -53480,7 +53480,7 @@ }, "summary": "Returns the metrics for a specified resource uid", "tags": [ - "v1" + "metrics" ] } }, @@ -53539,7 +53539,7 @@ }, "summary": "Returns a paginated list of notifications based on request parameters", "tags": [ - "v1" + "notifications" ] } }, @@ -53572,7 +53572,7 @@ }, "summary": "Creates a notification event", "tags": [ - "v1" + "notifications" ] } }, @@ -53631,7 +53631,7 @@ }, "summary": "Returns a list of notifications for the specified related object", "tags": [ - "v1" + "notifications" ] }, "parameters": [ @@ -53682,7 +53682,7 @@ }, "summary": "Updates the specified notification for the acknowledgment", "tags": [ - "v1" + "notifications" ] } }, @@ -53706,7 +53706,7 @@ }, "summary": "Updates the specified notification action as done", "tags": [ - "v1" + "notifications" ] } }, @@ -53730,7 +53730,7 @@ }, "summary": "Retrieves a list of overlords owned by the tenant", "tags": [ - "v1" + "overlords" ] } }, @@ -53747,7 +53747,7 @@ }, "summary": "Returns the manifests required for the private gateway installation", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -53795,7 +53795,7 @@ }, "summary": "create the maas cloudaccount for the private gateway", "tags": [ - "v1" + "overlords" ] }, "put": { @@ -53816,7 +53816,7 @@ }, "summary": "update the maas cloudaccount for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -53857,7 +53857,7 @@ }, "summary": "validate the maas cloudaccount for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -53897,7 +53897,7 @@ }, "summary": "create the maas cloud config for the private gateway", "tags": [ - "v1" + "overlords" ] }, "put": { @@ -53918,7 +53918,7 @@ }, "summary": "update the maas cloud config for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -53935,7 +53935,7 @@ }, "summary": "Returns the specified maas private gateway cluster profile", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -53966,7 +53966,7 @@ }, "summary": "migrate all the clusters from source overlord to target overlord", "tags": [ - "v1" + "overlords" ] } }, @@ -53983,7 +53983,7 @@ }, "summary": "Returns the manifests required for the private gateway installation", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -54031,7 +54031,7 @@ }, "summary": "create the OpenStack cloudaccount for the private gateway", "tags": [ - "v1" + "overlords" ] }, "put": { @@ -54052,7 +54052,7 @@ }, "summary": "update the OpenStack cloudaccount for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -54093,7 +54093,7 @@ }, "summary": "validate the OpenStack cloudaccount for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -54133,7 +54133,7 @@ }, "summary": "create the OpenStack cloud config for the private gateway", "tags": [ - "v1" + "overlords" ] }, "put": { @@ -54154,7 +54154,7 @@ }, "summary": "update the OpenStack cloud config for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -54171,7 +54171,7 @@ }, "summary": "Returns the specified OpenStack private gateway cluster profile", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -54208,7 +54208,7 @@ }, "summary": "Returns the pairing code for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -54225,7 +54225,7 @@ }, "summary": "Returns the manifests required for the private gateway installation", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -54250,7 +54250,7 @@ }, "summary": "Returns overlord's ova information", "tags": [ - "v1" + "overlords" ] } }, @@ -54290,7 +54290,7 @@ }, "summary": "create the vSphere cloudaccount for the private gateway", "tags": [ - "v1" + "overlords" ] }, "put": { @@ -54311,7 +54311,7 @@ }, "summary": "update the vSphere cloudaccount for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -54352,7 +54352,7 @@ }, "summary": "validate the vSphere cloudaccount for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -54392,7 +54392,7 @@ }, "summary": "create the vSphere cloud config for the private gateway", "tags": [ - "v1" + "overlords" ] }, "put": { @@ -54413,7 +54413,7 @@ }, "summary": "update the vSphere cloud config for the private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -54430,7 +54430,7 @@ }, "summary": "Returns the specified vsphere private gateway cluster profile", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -54455,7 +54455,7 @@ }, "summary": "Retrieves a list of IP Pools for the specified private gateway", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -54493,7 +54493,7 @@ }, "summary": "Creates an IP pool defintion for the sepcified private gateway", "tags": [ - "v1" + "overlords" ] } }, @@ -54507,7 +54507,7 @@ }, "summary": "Deletes the private gateways's specified IP Pool data", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -54542,7 +54542,7 @@ }, "summary": "Updates the private gateways's specified IP Pool data", "tags": [ - "v1" + "overlords" ] } }, @@ -54559,7 +54559,7 @@ }, "summary": "Retrieves the vSphere computecluster resources for the specified private gateway's account", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -54594,9 +54594,9 @@ } } }, - "summary": "Retrieves the vSphere datacenters \u0026 datacluster for the specified private gateway's account", + "summary": "Retrieves the vSphere datacenters & datacluster for the specified private gateway's account", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -54621,7 +54621,7 @@ }, "summary": "delete the private gateway", "tags": [ - "v1" + "overlords" ] }, "get": { @@ -54636,7 +54636,7 @@ }, "summary": "Returns the specified private gateway's for the given uid", "tags": [ - "v1" + "overlords" ] }, "parameters": [ @@ -54675,7 +54675,7 @@ }, "summary": "update the private gateway's metadata", "tags": [ - "v1" + "overlords" ] } }, @@ -54700,7 +54700,7 @@ }, "summary": "reset the private gateway by disaaociating the private gateway's resources", "tags": [ - "v1" + "overlords" ] } }, @@ -54725,7 +54725,7 @@ }, "summary": "Deletes the packs", "tags": [ - "v1" + "packs" ] }, "get": { @@ -54781,7 +54781,7 @@ }, "summary": "Retrieves a list of packs", "tags": [ - "v1" + "packs" ] } }, @@ -54828,7 +54828,7 @@ }, "summary": "Retrieves a list of packs based on filter", "tags": [ - "v1" + "packs" ] } }, @@ -54845,7 +54845,7 @@ }, "summary": "Retrieves a list of packs", "tags": [ - "v1" + "packs" ] }, "parameters": [ @@ -54912,7 +54912,7 @@ }, "summary": "Returns the logo for a specified pack", "tags": [ - "v1" + "packs" ] }, "parameters": [ @@ -54938,7 +54938,7 @@ }, "summary": "Returns the specified pack", "tags": [ - "v1" + "packs" ] }, "parameters": [ @@ -54973,7 +54973,7 @@ }, "summary": "Returns the private gateway manifest link", "tags": [ - "v1" + "pcg" ] } }, @@ -55008,7 +55008,7 @@ }, "summary": "Registers the pcg", "tags": [ - "v1" + "pcg" ] } }, @@ -55034,7 +55034,7 @@ }, "summary": "Returns the pcg ally manifest", "tags": [ - "v1" + "pcg" ] }, "parameters": [ @@ -55068,7 +55068,7 @@ }, "summary": "Returns the pcg jet manifest", "tags": [ - "v1" + "pcg" ] }, "parameters": [ @@ -55106,7 +55106,7 @@ }, "summary": "Retrieves a list of permissions", "tags": [ - "v1" + "permissions" ] } }, @@ -55165,7 +55165,7 @@ }, "summary": "Retrieves a list of projects", "tags": [ - "v1" + "projects" ] }, "post": { @@ -55195,7 +55195,7 @@ }, "summary": "Creates a project", "tags": [ - "v1" + "projects" ] } }, @@ -55212,7 +55212,7 @@ }, "summary": "Retrieves a list of supported alerts for a project", "tags": [ - "v1" + "projects" ] } }, @@ -55240,7 +55240,7 @@ }, "summary": "Deletes the specified project", "tags": [ - "v1" + "projects" ] }, "get": { @@ -55255,7 +55255,7 @@ }, "summary": "Returns the specified project", "tags": [ - "v1" + "projects" ] }, "parameters": [ @@ -55284,7 +55284,7 @@ }, "summary": "Updates the specified project", "tags": [ - "v1" + "projects" ] } }, @@ -55298,7 +55298,7 @@ }, "summary": "Deletes the specified alert to the specified project", "tags": [ - "v1" + "projects" ] }, "parameters": [ @@ -55342,7 +55342,7 @@ }, "summary": "Create the specified alert to the specified project", "tags": [ - "v1" + "projects" ] }, "put": { @@ -55363,7 +55363,7 @@ }, "summary": "Upsert the specified alert to the specified project", "tags": [ - "v1" + "projects" ] } }, @@ -55377,7 +55377,7 @@ }, "summary": "Deletes the specified alert of the specified project", "tags": [ - "v1" + "projects" ] }, "get": { @@ -55392,7 +55392,7 @@ }, "summary": "Get the specified alert of the specified project", "tags": [ - "v1" + "projects" ] }, "parameters": [ @@ -55433,7 +55433,7 @@ }, "summary": "Update the specified alert of the specified project", "tags": [ - "v1" + "projects" ] } }, @@ -55456,7 +55456,7 @@ }, "summary": "Delete the macros for the specified project by macro name", "tags": [ - "v1" + "projects" ] }, "get": { @@ -55471,7 +55471,7 @@ }, "summary": "List the macros of the specified project", "tags": [ - "v1" + "projects" ] }, "parameters": [ @@ -55500,7 +55500,7 @@ }, "summary": "Update the macros for the specified project by macro name", "tags": [ - "v1" + "projects" ] }, "post": { @@ -55521,7 +55521,7 @@ }, "summary": "Create or add new macros for the specified project", "tags": [ - "v1" + "projects" ] }, "put": { @@ -55542,7 +55542,7 @@ }, "summary": "Update the macros of the specified project", "tags": [ - "v1" + "projects" ] } }, @@ -55573,7 +55573,7 @@ }, "summary": "Update the metadata of the specified project", "tags": [ - "v1" + "projects" ] } }, @@ -55590,7 +55590,7 @@ }, "summary": "Get project cluster settings", "tags": [ - "v1" + "projects" ] }, "parameters": [ @@ -55635,7 +55635,7 @@ }, "summary": "Update project clusters nodes auto remediation setting", "tags": [ - "v1" + "projects" ] } }, @@ -55666,7 +55666,7 @@ }, "summary": "Update the teams association to the specified project", "tags": [ - "v1" + "projects" ] } }, @@ -55697,7 +55697,7 @@ }, "summary": "Update the users association to the specified project", "tags": [ - "v1" + "projects" ] } }, @@ -55714,7 +55714,7 @@ }, "summary": "Validate and returns active resource of project before delete", "tags": [ - "v1" + "projects" ] }, "parameters": [ @@ -55780,7 +55780,7 @@ }, "summary": "Retrieves a list of Helm registries", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -55823,7 +55823,7 @@ }, "summary": "Creates a helm registry", "tags": [ - "v1" + "registries" ] } }, @@ -55881,7 +55881,7 @@ }, "summary": "Retrieves a list of helm registries as summary", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -55925,7 +55925,7 @@ }, "summary": "Check if helm registry is valid", "tags": [ - "v1" + "registries" ] } }, @@ -55939,7 +55939,7 @@ }, "summary": "Deletes the specified helm registry", "tags": [ - "v1" + "registries" ] }, "get": { @@ -55954,7 +55954,7 @@ }, "summary": "Returns the specified Helm registry", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -55983,7 +55983,7 @@ }, "summary": "Updates the specified helm registry", "tags": [ - "v1" + "registries" ] } }, @@ -56012,7 +56012,7 @@ }, "summary": "Sync Helm registry", "tags": [ - "v1" + "registries" ] } }, @@ -56030,7 +56030,7 @@ }, "summary": "Get helm registry sync status", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56055,7 +56055,7 @@ }, "summary": "Retrieves a list of registries metadata", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56106,7 +56106,7 @@ }, "summary": "Creates a basic oci registry", "tags": [ - "v1" + "registries" ] } }, @@ -56137,7 +56137,7 @@ }, "summary": "Check if oci registry is valid", "tags": [ - "v1" + "registries" ] } }, @@ -56175,7 +56175,7 @@ }, "summary": "Creates a ecr registry", "tags": [ - "v1" + "registries" ] } }, @@ -56206,7 +56206,7 @@ }, "summary": "Check if ecr registry is valid", "tags": [ - "v1" + "registries" ] } }, @@ -56223,7 +56223,7 @@ }, "summary": "Creates a image registry", "tags": [ - "v1" + "registries" ] } }, @@ -56240,7 +56240,7 @@ }, "summary": "Retrieves a oci registries summary", "tags": [ - "v1" + "registries" ] } }, @@ -56257,7 +56257,7 @@ }, "summary": "Returns the information of specified oci registry", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56284,7 +56284,7 @@ }, "summary": "Deletes the specified basic oci registry", "tags": [ - "v1" + "registries" ] }, "get": { @@ -56299,7 +56299,7 @@ }, "summary": "Returns the basic oci registry", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56328,7 +56328,7 @@ }, "summary": "Updates the specified basic oci registry", "tags": [ - "v1" + "registries" ] } }, @@ -56357,7 +56357,7 @@ }, "summary": "Sync oci registry", "tags": [ - "v1" + "registries" ] } }, @@ -56375,7 +56375,7 @@ }, "summary": "Get oci registry sync status", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56397,7 +56397,7 @@ }, "summary": "Deletes the specified ecr registry", "tags": [ - "v1" + "registries" ] }, "get": { @@ -56412,7 +56412,7 @@ }, "summary": "Returns the specified ecr registry", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56441,7 +56441,7 @@ }, "summary": "Updates the specified ecr registry", "tags": [ - "v1" + "registries" ] } }, @@ -56470,7 +56470,7 @@ }, "summary": "Sync ecr registry", "tags": [ - "v1" + "registries" ] } }, @@ -56488,7 +56488,7 @@ }, "summary": "Get ecr registry sync status", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56554,7 +56554,7 @@ }, "summary": "Retrieves a list of Pack registries", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56603,7 +56603,7 @@ }, "summary": "Creates a pack registry", "tags": [ - "v1" + "registries" ] } }, @@ -56661,7 +56661,7 @@ }, "summary": "Retrieves a list of pack registries as summary", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56705,7 +56705,7 @@ }, "summary": "Check if pack registry is valid", "tags": [ - "v1" + "registries" ] } }, @@ -56719,7 +56719,7 @@ }, "summary": "Deletes the specified pack registry", "tags": [ - "v1" + "registries" ] }, "get": { @@ -56734,7 +56734,7 @@ }, "summary": "Returns the specified Pack registry", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56763,7 +56763,7 @@ }, "summary": "Updates the specified pack registry", "tags": [ - "v1" + "registries" ] } }, @@ -56792,7 +56792,7 @@ }, "summary": "Sync Pack registry", "tags": [ - "v1" + "registries" ] } }, @@ -56810,7 +56810,7 @@ }, "summary": "Get pack registry sync status", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56835,7 +56835,7 @@ }, "summary": "Returns the specified system scope registry configuration", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56857,7 +56857,7 @@ }, "summary": "Deletes the specified registry", "tags": [ - "v1" + "registries" ] }, "parameters": [ @@ -56923,7 +56923,7 @@ }, "summary": "Retrieves a list of roles", "tags": [ - "v1" + "roles" ] }, "post": { @@ -56953,7 +56953,7 @@ }, "summary": "Creates a role with specified permissions", "tags": [ - "v1" + "roles" ] } }, @@ -56967,7 +56967,7 @@ }, "summary": "Deletes the specified role", "tags": [ - "v1" + "roles" ] }, "get": { @@ -56982,7 +56982,7 @@ }, "summary": "Returns the specified role", "tags": [ - "v1" + "roles" ] }, "parameters": [ @@ -57011,7 +57011,7 @@ }, "summary": "Updates the specified role", "tags": [ - "v1" + "roles" ] } }, @@ -57051,7 +57051,7 @@ }, "summary": "Clone the role", "tags": [ - "v1" + "roles" ] } }, @@ -57105,7 +57105,7 @@ }, "summary": "Returns a latest version for a given service name", "tags": [ - "v1" + "services" ] } }, @@ -57184,7 +57184,7 @@ }, "summary": "Returns a service manifest for a given service name and version", "tags": [ - "v1" + "services" ] } }, @@ -57243,7 +57243,7 @@ }, "summary": "Retrieves a list of clusters", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57277,7 +57277,7 @@ } }, "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57309,7 +57309,7 @@ }, "summary": "Creates an AKS cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57346,7 +57346,7 @@ }, "summary": "Get AKS cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57372,7 +57372,7 @@ }, "summary": "Validates AKS cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57404,7 +57404,7 @@ }, "summary": "Creates an AWS cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57436,7 +57436,7 @@ }, "summary": "Imports an AWS cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57473,7 +57473,7 @@ }, "summary": "Get AWS cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57499,7 +57499,7 @@ }, "summary": "Validates AWS cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57531,7 +57531,7 @@ }, "summary": "Creates an Azure cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57563,7 +57563,7 @@ }, "summary": "Imports an Azure cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57600,7 +57600,7 @@ }, "summary": "Get Azure cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57626,7 +57626,7 @@ }, "summary": "Validates Azure cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57643,7 +57643,7 @@ }, "summary": "Cluster configuration for the edge installer", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57675,7 +57675,7 @@ }, "summary": "Creates a CoxEdge cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57712,7 +57712,7 @@ }, "summary": "Get Cox Edge cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57738,7 +57738,7 @@ }, "summary": "Validates Cox Edge cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57770,7 +57770,7 @@ }, "summary": "Creates a Edge cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57802,7 +57802,7 @@ }, "summary": "Creates an EdgeNative cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57834,7 +57834,7 @@ }, "summary": "Imports an EdgeNative cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57871,7 +57871,7 @@ }, "summary": "Get edge-native cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57897,7 +57897,7 @@ }, "summary": "Validates edge-native cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57929,7 +57929,7 @@ }, "summary": "Imports an Edge cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57966,7 +57966,7 @@ }, "summary": "Get edge cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -57992,7 +57992,7 @@ }, "summary": "Validates edge cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58024,7 +58024,7 @@ }, "summary": "Creates an EKS cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58061,7 +58061,7 @@ }, "summary": "Get EKS cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58087,7 +58087,7 @@ }, "summary": "Validates EKS cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58104,7 +58104,7 @@ }, "summary": "Returns the cluster object references based on locationUid", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -58133,7 +58133,7 @@ }, "summary": "Change cluster backup location", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58168,7 +58168,7 @@ }, "summary": "Download log fetcher logs for cluster by log fetcher uid", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -58223,7 +58223,7 @@ }, "summary": "Update log fetcher logs by log fetcher uid", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58255,7 +58255,7 @@ }, "summary": "Creates a GCP cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58287,7 +58287,7 @@ }, "summary": "Imports a GCP cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58324,7 +58324,7 @@ }, "summary": "Get GCP cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58350,7 +58350,7 @@ }, "summary": "Validates GCP cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58383,7 +58383,7 @@ }, "summary": "Imports a cluster of any cloud type in generic way", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58420,7 +58420,7 @@ }, "summary": "Get generic cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58452,7 +58452,7 @@ }, "summary": "Creates an GKE cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58489,7 +58489,7 @@ }, "summary": "Get GKE cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58515,7 +58515,7 @@ }, "summary": "Validates GKE cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58547,7 +58547,7 @@ }, "summary": "Creates a Libvirt cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58579,7 +58579,7 @@ }, "summary": "Imports a libvirt cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58616,7 +58616,7 @@ }, "summary": "Get libvirt cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58642,7 +58642,7 @@ }, "summary": "Validates libvirt cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58674,7 +58674,7 @@ }, "summary": "Creates a MAAS cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58706,7 +58706,7 @@ }, "summary": "Imports a Maas cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58743,7 +58743,7 @@ }, "summary": "Get maas cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58769,7 +58769,7 @@ }, "summary": "Validates MAAS cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58801,7 +58801,7 @@ }, "summary": "Creates a OpenStack cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58833,7 +58833,7 @@ }, "summary": "Imports an OpenStack cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58870,7 +58870,7 @@ }, "summary": "Get openstack cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58896,7 +58896,7 @@ }, "summary": "Validates OpenStack cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58928,7 +58928,7 @@ }, "summary": "Creates a Tke cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58965,7 +58965,7 @@ }, "summary": "Get TKE cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -58991,7 +58991,7 @@ }, "summary": "Validates TKE cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59008,7 +59008,7 @@ }, "summary": "Get cluster settings by context", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59036,7 +59036,7 @@ }, "summary": "Validates the cluster name", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59062,7 +59062,7 @@ }, "summary": "Validates spectro cluster packs", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59094,7 +59094,7 @@ }, "summary": "Creates a virtual cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59120,7 +59120,7 @@ }, "summary": "Validates virtual cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59137,7 +59137,7 @@ }, "summary": "Get the default values yaml", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59169,7 +59169,7 @@ }, "summary": "Creates a vSphere cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59201,7 +59201,7 @@ }, "summary": "Imports a vSphere cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59238,7 +59238,7 @@ }, "summary": "Get vSphere cluster estimated rate information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59264,7 +59264,7 @@ }, "summary": "Validates vSphere cluster create operation", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59286,7 +59286,7 @@ }, "summary": "Deletes the specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "get": { @@ -59335,7 +59335,7 @@ }, "summary": "Returns the specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59361,7 +59361,7 @@ }, "summary": "Get the cluster asset doc", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59396,7 +59396,7 @@ }, "summary": "Associate the assets for the cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59422,7 +59422,7 @@ }, "summary": "Returns the specified cluster's kube config file", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59445,7 +59445,7 @@ }, "summary": "Deletes the cluster's frp kube config client data", "tags": [ - "v1" + "spectroclusters" ] }, "get": { @@ -59469,7 +59469,7 @@ }, "summary": "Returns the specified cluster's frp kube config file", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59499,7 +59499,7 @@ }, "summary": "Updates the cluster's frp kube config data", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59534,7 +59534,7 @@ }, "summary": "Returns the specified cluster's kube config file", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59564,7 +59564,7 @@ }, "summary": "Updates the cluster's manifest data", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59578,7 +59578,7 @@ }, "summary": "Deletes the cluster's kube config client data", "tags": [ - "v1" + "spectroclusters" ] }, "get": { @@ -59602,7 +59602,7 @@ }, "summary": "Returns the specified cluster's kube config client file", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59632,7 +59632,7 @@ }, "summary": "Updates the cluster's kube config client data", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59649,7 +59649,7 @@ }, "summary": "Returns the specified cluster's manifest data", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59679,7 +59679,7 @@ }, "summary": "Updates the specified cluster's manifest data", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59711,7 +59711,7 @@ }, "summary": "Deprecated. Updates the specified cluster's Cluster Role bindings", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59743,7 +59743,7 @@ }, "summary": "Updates the specified cluster controlPlane health check timeout", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59775,7 +59775,7 @@ }, "summary": "Updates the specified cluster host config", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59807,7 +59807,7 @@ }, "summary": "Updates the specified cluster Life cycle configuration", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59839,7 +59839,7 @@ }, "summary": "Updates the specified cluster OS patch configuration", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59856,7 +59856,7 @@ }, "summary": "Deprecated. Get the cluster RBAC information associated with a cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59882,7 +59882,7 @@ }, "summary": "Retrieves namespaces for the specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59912,7 +59912,7 @@ }, "summary": "Updates namespaces for the specified cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59929,7 +59929,7 @@ }, "summary": "Retrieves the specified namespace of the cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -59966,7 +59966,7 @@ }, "summary": "Updates the specified namespace of the cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -59983,7 +59983,7 @@ }, "summary": "Retrieves RBAC information for the specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60013,7 +60013,7 @@ }, "summary": "Updates RBAC information for the specified cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60030,7 +60030,7 @@ }, "summary": "Retrieves the specified RBAC of the cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60067,7 +60067,7 @@ }, "summary": "Updates the specified RBAC of the cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60093,7 +60093,7 @@ }, "summary": "Download the specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60119,7 +60119,7 @@ }, "summary": "Retrieves a list of edge host of edge-native cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60145,7 +60145,7 @@ }, "summary": "Retrieves a list of edge host of libvirt cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60168,7 +60168,7 @@ }, "summary": "Reset cluster backup schedule settings", "tags": [ - "v1" + "spectroclusters" ] }, "get": { @@ -60190,7 +60190,7 @@ }, "summary": "Returns the cluster backup result", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60228,7 +60228,7 @@ }, "summary": "Create cluster backup settings", "tags": [ - "v1" + "spectroclusters" ] }, "put": { @@ -60249,7 +60249,7 @@ }, "summary": "Update cluster backup settings", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60289,7 +60289,7 @@ }, "summary": "Create on demand cluster backup", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60303,7 +60303,7 @@ }, "summary": "Delete cluster backup", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60340,7 +60340,7 @@ }, "summary": "Returns the compliance scan of cluster, if driverType is provided then specific status of driverType will be returned", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60378,7 +60378,7 @@ }, "summary": "Create cluster compliance scan", "tags": [ - "v1" + "spectroclusters" ] }, "put": { @@ -60399,7 +60399,7 @@ }, "summary": "Update cluster compliance scan settings", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60416,7 +60416,7 @@ }, "summary": "Returns the compliance scan log by cluster uid and driver type", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60455,7 +60455,7 @@ }, "summary": "Update the KubeBench compliance scan log by uid", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60486,7 +60486,7 @@ }, "summary": "Update the KubeHunter compliance scan log by uid", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60517,7 +60517,7 @@ }, "summary": "Update the Sonobuoy compliance scan log by uid", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60548,7 +60548,7 @@ }, "summary": "Update the Syft compliance scan log by uid", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60562,7 +60562,7 @@ }, "summary": "Delete the compliance scan log by uid", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60593,7 +60593,7 @@ }, "summary": "Returns the KubeBench compliance scan log by uid", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60629,7 +60629,7 @@ }, "summary": "Returns the KubeHunter compliance scan log by uid", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60665,7 +60665,7 @@ }, "summary": "Returns the Sonobuoy compliance scan log by uid", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60701,7 +60701,7 @@ }, "summary": "Returns the Syft compliance scan log by uid", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60741,7 +60741,7 @@ }, "summary": "Returns the image sbom of syft scan log of cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60788,7 +60788,7 @@ }, "summary": "Downloads the driver cluster logs", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60860,7 +60860,7 @@ }, "summary": "Create on demand cluster compliance scan", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60877,7 +60877,7 @@ }, "summary": "Get the installed helm charts of a specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60909,7 +60909,7 @@ }, "summary": "Get the log fetcher for cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60948,7 +60948,7 @@ }, "summary": "Create the log fetcher for cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -60965,7 +60965,7 @@ }, "summary": "Get the installed manifests of a specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -60997,7 +60997,7 @@ }, "summary": "Returns the cluster restore of cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61045,7 +61045,7 @@ }, "summary": "Create on demand cluster restore", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61071,7 +61071,7 @@ }, "summary": "Returns the specified cluster's import manifest file", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61102,7 +61102,7 @@ }, "summary": "Upgrade the specified imported read only cluster with full permissions", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61119,7 +61119,7 @@ }, "summary": "Get K8Certificate for spectro cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61151,7 +61151,7 @@ }, "summary": "Sets the cluster master nodes Kubernetes certificates for renewal", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61168,7 +61168,7 @@ }, "summary": "Returns the specified cluster's kube config file", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61194,7 +61194,7 @@ }, "summary": "Retrieves a list of edge hosts of the libvirt cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61241,7 +61241,7 @@ }, "summary": "Associate the assets for the cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61273,7 +61273,7 @@ }, "summary": "Update the specified spectro cluster metadata", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61290,7 +61290,7 @@ }, "summary": "Returns available namespaces for the cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61330,7 +61330,7 @@ }, "summary": "Returns k8s spectrocluster oidc", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61356,7 +61356,7 @@ }, "summary": "Returns k8s dashboard url", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61396,7 +61396,7 @@ }, "summary": "Returns the specified cluster's manifest", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61449,7 +61449,7 @@ }, "summary": "Get specified cluster pack properties", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61485,7 +61485,7 @@ }, "summary": "Updates the cluster's pack references", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61502,7 +61502,7 @@ }, "summary": "Returns the specified cluster's packs resolved values", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61550,7 +61550,7 @@ }, "summary": "Patch update specified cluster's packs status", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61584,7 +61584,7 @@ }, "summary": "Returns the specified cluster's pack configuration", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61606,7 +61606,7 @@ }, "summary": "Replaces the specified cluster profile for the cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61623,7 +61623,7 @@ }, "summary": "Returns the profile updates of a specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61655,7 +61655,7 @@ }, "summary": "Remove cluster profiles from the specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "get": { @@ -61678,7 +61678,7 @@ }, "summary": "Returns the associated profiles of a specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61715,7 +61715,7 @@ }, "summary": "Patch cluster profiles to the specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "put": { @@ -61743,7 +61743,7 @@ }, "summary": "Associate cluster profiles to the specified cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61775,7 +61775,7 @@ }, "summary": "Returns the associated profile's pack manifests of a specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61824,7 +61824,7 @@ }, "summary": "Returns the specified cluster's profile pack configuration", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61841,7 +61841,7 @@ }, "summary": "Returns the associated profiles pack manifests of the specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61885,7 +61885,7 @@ }, "summary": "Updates cluster profiles pack manifests to the specified cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61902,7 +61902,7 @@ }, "summary": "Returns the estimated rate of the specified cluster", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -61952,7 +61952,7 @@ }, "summary": "reset the cluster s by deleting machine pools and condtions", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -61983,7 +61983,7 @@ }, "summary": "Updates the specified cluster status condition", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62017,7 +62017,7 @@ }, "summary": "Updates the specified cluster status conditions", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62051,7 +62051,7 @@ }, "summary": "Updates the specified cluster's service endpoints information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62073,7 +62073,7 @@ }, "summary": "Updates the specified cluster status as imported", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62107,7 +62107,7 @@ }, "summary": "Updates the specified cluster's services information", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62124,7 +62124,7 @@ }, "summary": "Returns the SPC apply information for the agent", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -62150,7 +62150,7 @@ }, "summary": "Set the CanBeApplied to true on the spcApply status. CanBeApplied indicates the agent to orchestrate the spc changes", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62181,7 +62181,7 @@ }, "summary": "Updates the agent patch time for the SPC changes", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62212,7 +62212,7 @@ }, "summary": "Updates the cluster's upgrade status", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62245,7 +62245,7 @@ }, "summary": "Validates cluster packs", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62288,7 +62288,7 @@ }, "summary": "Returns the list of virtual machines", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -62328,7 +62328,7 @@ }, "summary": "Create virtual machine", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62381,7 +62381,7 @@ }, "summary": "Returns the list of snapshots of given namespaces", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -62404,7 +62404,7 @@ }, "summary": "Deletes the virtual machine", "tags": [ - "v1" + "spectroclusters" ] }, "get": { @@ -62419,7 +62419,7 @@ }, "summary": "Get virtual machine", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -62466,7 +62466,7 @@ }, "summary": "Updates the specified virtual machine of the cluster", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62518,7 +62518,7 @@ }, "summary": "Add volume to the virtual machine instance", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62567,7 +62567,7 @@ }, "summary": "Clone virtual machine", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62610,7 +62610,7 @@ }, "summary": "Migrate the virtual machine", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62653,7 +62653,7 @@ }, "summary": "Pause the virtual machine instance", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62705,7 +62705,7 @@ }, "summary": "Remove volume from the virtual machine instance", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62748,7 +62748,7 @@ }, "summary": "Restart the virtual machine", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62791,7 +62791,7 @@ }, "summary": "Resume the virtual machine instance", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62840,7 +62840,7 @@ }, "summary": "Create snapshot of virtual machine", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62854,7 +62854,7 @@ }, "summary": "Delete the snapshot of virtual machine", "tags": [ - "v1" + "spectroclusters" ] }, "get": { @@ -62869,7 +62869,7 @@ }, "summary": "Get virtual machine snapshot", "tags": [ - "v1" + "spectroclusters" ] }, "parameters": [ @@ -62923,7 +62923,7 @@ }, "summary": "Updates the specified snapshot of a virtual machine", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -62966,7 +62966,7 @@ }, "summary": "Start the virtual machine", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -63009,7 +63009,7 @@ }, "summary": "Stop the virtual machine", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -63038,7 +63038,7 @@ }, "summary": "Sync specified cluster workload", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -63085,7 +63085,7 @@ }, "summary": "Sync specified cluster workload", "tags": [ - "v1" + "spectroclusters" ] } }, @@ -63143,7 +63143,7 @@ }, "summary": "Retrieves a list of teams", "tags": [ - "v1" + "teams" ] }, "post": { @@ -63173,7 +63173,7 @@ }, "summary": "Creates a team with the specified users and roles", "tags": [ - "v1" + "teams" ] } }, @@ -63232,7 +63232,7 @@ }, "summary": "Retrieves a list of team summary", "tags": [ - "v1" + "teams" ] }, "post": { @@ -63256,7 +63256,7 @@ }, "summary": "Retrieves a list of teams summary with provided filter spec", "tags": [ - "v1" + "teams" ] } }, @@ -63270,7 +63270,7 @@ }, "summary": "Deletes the specified team", "tags": [ - "v1" + "teams" ] }, "get": { @@ -63285,7 +63285,7 @@ }, "summary": "Returns the sepcified team", "tags": [ - "v1" + "teams" ] }, "parameters": [ @@ -63315,7 +63315,7 @@ }, "summary": "Patches the specified team", "tags": [ - "v1" + "teams" ] }, "put": { @@ -63336,7 +63336,7 @@ }, "summary": "Updates the sepcified team", "tags": [ - "v1" + "teams" ] } }, @@ -63353,7 +63353,7 @@ }, "summary": "Returns the specified team's project and roles data", "tags": [ - "v1" + "teams" ] }, "parameters": [ @@ -63383,7 +63383,7 @@ }, "summary": "Updates the projects and roles for the specified team", "tags": [ - "v1" + "teams" ] } }, @@ -63401,7 +63401,7 @@ }, "summary": "Returns the specified individual and resource roles for a team", "tags": [ - "v1" + "teams" ] }, "parameters": [ @@ -63432,7 +63432,7 @@ }, "summary": "Add resource roles for team", "tags": [ - "v1" + "teams" ] } }, @@ -63446,7 +63446,7 @@ }, "summary": "Deleted the resource roles from team", "tags": [ - "v1" + "teams" ] }, "parameters": [ @@ -63483,7 +63483,7 @@ }, "summary": "Updates the resource roles for team", "tags": [ - "v1" + "teams" ] } }, @@ -63500,7 +63500,7 @@ }, "summary": "Returns the specified team's tenant roles", "tags": [ - "v1" + "teams" ] }, "parameters": [ @@ -63530,7 +63530,7 @@ }, "summary": "Updates the tenant roles of the specified team", "tags": [ - "v1" + "teams" ] } }, @@ -63561,7 +63561,7 @@ }, "summary": "Update tenant address", "tags": [ - "v1" + "tenants" ] } }, @@ -63578,7 +63578,7 @@ }, "summary": "lists the certificates for the tenant", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -63616,7 +63616,7 @@ }, "summary": "create the tenant certificate", "tags": [ - "v1" + "tenants" ] } }, @@ -63630,7 +63630,7 @@ }, "summary": "deletes the tenant certificate", "tags": [ - "v1" + "tenants" ] }, "get": { @@ -63645,7 +63645,7 @@ }, "summary": "Returns the ca certificate for the tenant", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -63680,7 +63680,7 @@ }, "summary": "updates the tenant certificate", "tags": [ - "v1" + "tenants" ] } }, @@ -63694,7 +63694,7 @@ }, "summary": "deletes the tenant data sink config", "tags": [ - "v1" + "tenants" ] }, "get": { @@ -63709,7 +63709,7 @@ }, "summary": "Returns data sink config of tenant", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -63747,7 +63747,7 @@ }, "summary": "create data sink config", "tags": [ - "v1" + "tenants" ] }, "put": { @@ -63768,7 +63768,7 @@ }, "summary": "updates the tenant data sink config", "tags": [ - "v1" + "tenants" ] } }, @@ -63785,7 +63785,7 @@ }, "summary": "Get tenant auth token settings", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -63820,7 +63820,7 @@ }, "summary": "Update tenant auth token settings", "tags": [ - "v1" + "tenants" ] } }, @@ -63848,7 +63848,7 @@ }, "summary": "Tenant to accept the contract agreement", "tags": [ - "v1" + "tenants" ] } }, @@ -63870,7 +63870,7 @@ }, "summary": "Deletes the aws credit account for tenants", "tags": [ - "v1" + "tenants" ] }, "get": { @@ -63885,7 +63885,7 @@ }, "summary": "Get the credit accounts for the tenants with free tier access", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -63910,7 +63910,7 @@ }, "summary": "retrieves the domains for tenant", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -63939,7 +63939,7 @@ }, "summary": "creates or updates domains for tenant", "tags": [ - "v1" + "tenants" ] } }, @@ -63970,7 +63970,7 @@ }, "summary": "Update tenant emailId", "tags": [ - "v1" + "tenants" ] } }, @@ -63987,7 +63987,7 @@ }, "summary": "Get tenant level freemium configuration", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64016,7 +64016,7 @@ }, "summary": "Update tenant freemium configuration", "tags": [ - "v1" + "tenants" ] } }, @@ -64033,7 +64033,7 @@ }, "summary": "Get tenant freemium usage", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64058,7 +64058,7 @@ }, "summary": "Returns a specified invoice", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64102,7 +64102,7 @@ }, "summary": "Downloads the specified invoice report", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64146,7 +64146,7 @@ }, "summary": "Downloads the specified monthly invoice report", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64190,7 +64190,7 @@ }, "summary": "Downloads the specified tenant usage", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64223,7 +64223,7 @@ }, "summary": "Get tenant login banner settings", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64258,7 +64258,7 @@ }, "summary": "Update tenant login banner settings", "tags": [ - "v1" + "tenants" ] } }, @@ -64281,7 +64281,7 @@ }, "summary": "Delete the macros for the specified tenant by given macro name", "tags": [ - "v1" + "tenants" ] }, "get": { @@ -64296,7 +64296,7 @@ }, "summary": "List the macros of the specified tenant", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64325,7 +64325,7 @@ }, "summary": "Update the macros for the specified tenant by given macro name", "tags": [ - "v1" + "tenants" ] }, "post": { @@ -64346,7 +64346,7 @@ }, "summary": "Create or add new macros for the specified tenant", "tags": [ - "v1" + "tenants" ] }, "put": { @@ -64367,7 +64367,7 @@ }, "summary": "Update the macros of the specified tenant", "tags": [ - "v1" + "tenants" ] } }, @@ -64384,7 +64384,7 @@ }, "summary": "Returns the oidc Spec for tenant", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64413,7 +64413,7 @@ }, "summary": "Associates the oidc Spec for the tenant", "tags": [ - "v1" + "tenants" ] } }, @@ -64444,7 +64444,7 @@ }, "summary": "creates or updates a password policy for tenant", "tags": [ - "v1" + "tenants" ] } }, @@ -64461,7 +64461,7 @@ }, "summary": "Get is cluster group enabled for a specific tenant", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64490,7 +64490,7 @@ }, "summary": "Enable or Disable cluster group for a specific tenant", "tags": [ - "v1" + "tenants" ] } }, @@ -64507,7 +64507,7 @@ }, "summary": "Get tenant cluster settings", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64552,7 +64552,7 @@ }, "summary": "Update tenant clusters nodes auto remediation setting", "tags": [ - "v1" + "tenants" ] } }, @@ -64569,7 +64569,7 @@ }, "summary": "Get developer credit enabled for a specific tenant", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64598,7 +64598,7 @@ }, "summary": "update developer credit for a specific tenant", "tags": [ - "v1" + "tenants" ] } }, @@ -64615,7 +64615,7 @@ }, "summary": "Get tenant fips settings", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64650,7 +64650,7 @@ }, "summary": "Update tenant fips setting", "tags": [ - "v1" + "tenants" ] } }, @@ -64667,7 +64667,7 @@ }, "summary": "Get all rate config for public and private cloud", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64696,7 +64696,7 @@ }, "summary": "updates the rate config for public and private cloud", "tags": [ - "v1" + "tenants" ] } }, @@ -64713,7 +64713,7 @@ }, "summary": "Get tenant level resource limits configuration", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64742,7 +64742,7 @@ }, "summary": "Update tenant resource limits configuration", "tags": [ - "v1" + "tenants" ] } }, @@ -64759,7 +64759,7 @@ }, "summary": "Returns the specified service provider metadata and Saml Spec for tenant", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64788,7 +64788,7 @@ }, "summary": "Associates the specified federation metadata for the tenant", "tags": [ - "v1" + "tenants" ] } }, @@ -64805,7 +64805,7 @@ }, "summary": "get sso logins for the tenants", "tags": [ - "v1" + "tenants" ] }, "parameters": [ @@ -64834,7 +64834,7 @@ }, "summary": "enable sso logins for the tenants", "tags": [ - "v1" + "tenants" ] } }, @@ -64893,7 +64893,7 @@ }, "summary": "Lists users", "tags": [ - "v1" + "users" ] }, "post": { @@ -64924,7 +64924,7 @@ }, "summary": "Create User", "tags": [ - "v1" + "users" ] } }, @@ -64955,7 +64955,7 @@ }, "summary": "Returns the specified users location", "tags": [ - "v1" + "users" ] } }, @@ -64987,7 +64987,7 @@ }, "summary": "Create a Azure location", "tags": [ - "v1" + "users" ] } }, @@ -65004,7 +65004,7 @@ }, "summary": "Returns the specified Azure location", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -65034,7 +65034,7 @@ }, "summary": "Updates the specified Azure location", "tags": [ - "v1" + "users" ] } }, @@ -65066,7 +65066,7 @@ }, "summary": "Create a GCP location", "tags": [ - "v1" + "users" ] } }, @@ -65083,7 +65083,7 @@ }, "summary": "Returns the specified GCP location", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -65113,7 +65113,7 @@ }, "summary": "Updates the specified GCP location", "tags": [ - "v1" + "users" ] } }, @@ -65145,7 +65145,7 @@ }, "summary": "Create a MinIO location", "tags": [ - "v1" + "users" ] } }, @@ -65162,7 +65162,7 @@ }, "summary": "Returns the specified MinIO location", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -65192,7 +65192,7 @@ }, "summary": "Updates the specified MinIO location", "tags": [ - "v1" + "users" ] } }, @@ -65224,7 +65224,7 @@ }, "summary": "Create a S3 location", "tags": [ - "v1" + "users" ] } }, @@ -65238,7 +65238,7 @@ }, "summary": "Returns the specified S3 location", "tags": [ - "v1" + "users" ] }, "get": { @@ -65253,7 +65253,7 @@ }, "summary": "Returns the specified S3 location", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -65283,7 +65283,7 @@ }, "summary": "Updates the specified S3 location", "tags": [ - "v1" + "users" ] } }, @@ -65313,7 +65313,7 @@ }, "summary": "Update the default backup location", "tags": [ - "v1" + "users" ] } }, @@ -65327,7 +65327,7 @@ }, "summary": "Deletes the specified location", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -65367,7 +65367,7 @@ }, "summary": "Returns the SSH keys", "tags": [ - "v1" + "users" ] }, "post": { @@ -65397,7 +65397,7 @@ }, "summary": "Creates a SSH key", "tags": [ - "v1" + "users" ] } }, @@ -65411,7 +65411,7 @@ }, "summary": "Returns the specified user ssh key", "tags": [ - "v1" + "users" ] }, "get": { @@ -65426,7 +65426,7 @@ }, "summary": "Returns the specified user ssh key", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -65456,7 +65456,7 @@ }, "summary": "Updates the specified user ssh key", "tags": [ - "v1" + "users" ] } }, @@ -65496,7 +65496,7 @@ }, "summary": "Returns the specified vSphere DNS mapping", "tags": [ - "v1" + "users" ] } }, @@ -65527,7 +65527,7 @@ }, "summary": "Returns the specified vSphere DNS mappings", "tags": [ - "v1" + "users" ] }, "post": { @@ -65557,7 +65557,7 @@ }, "summary": "Create a vSphere DNS mapping", "tags": [ - "v1" + "users" ] } }, @@ -65571,7 +65571,7 @@ }, "summary": "Deletes the specified vSphere DNS mapping", "tags": [ - "v1" + "users" ] }, "get": { @@ -65586,7 +65586,7 @@ }, "summary": "Returns the specified vSphere DNS mapping", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -65616,7 +65616,7 @@ }, "summary": "Updates the specified vSphere DNS mapping", "tags": [ - "v1" + "users" ] } }, @@ -65633,7 +65633,7 @@ }, "summary": "Get the system Spectro repository. Restricted to edge services", "tags": [ - "v1" + "users" ] } }, @@ -65659,7 +65659,7 @@ }, "summary": "gets users kubectl session", "tags": [ - "v1" + "users" ] } }, @@ -65676,7 +65676,7 @@ }, "summary": "Retrieves a list of users metadata", "tags": [ - "v1" + "users" ] } }, @@ -65723,7 +65723,7 @@ }, "summary": "User password change request using the user emailId", "tags": [ - "v1" + "users" ] } }, @@ -65762,7 +65762,7 @@ }, "summary": "User password reset request using the email id", "tags": [ - "v1" + "users" ] } }, @@ -65803,7 +65803,7 @@ }, "summary": "Returns the specified user summary list", "tags": [ - "v1" + "users" ] }, "post": { @@ -65827,7 +65827,7 @@ }, "summary": "Retrieves a list of users summary with provided filter spec", "tags": [ - "v1" + "users" ] } }, @@ -65850,7 +65850,7 @@ }, "summary": "Delete the macros for the system user by macro name", "tags": [ - "v1" + "users" ] }, "get": { @@ -65865,7 +65865,7 @@ }, "summary": "List the macros of the system", "tags": [ - "v1" + "users" ] }, "patch": { @@ -65886,7 +65886,7 @@ }, "summary": "Update the macros for the system user by macro name", "tags": [ - "v1" + "users" ] }, "post": { @@ -65907,7 +65907,7 @@ }, "summary": "Create or add new macros for the system user", "tags": [ - "v1" + "users" ] }, "put": { @@ -65928,7 +65928,7 @@ }, "summary": "Update the macros of the system", "tags": [ - "v1" + "users" ] } }, @@ -65943,7 +65943,7 @@ }, "summary": "Deletes the specified User", "tags": [ - "v1" + "users" ] }, "get": { @@ -65959,7 +65959,7 @@ }, "summary": "Returns the specified User", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -65990,7 +65990,7 @@ }, "summary": "Patches the specified User", "tags": [ - "v1" + "users" ] }, "put": { @@ -66012,7 +66012,7 @@ }, "summary": "Update User", "tags": [ - "v1" + "users" ] } }, @@ -66062,7 +66062,7 @@ }, "summary": "User password change request using the user uid", "tags": [ - "v1" + "users" ] } }, @@ -66091,7 +66091,7 @@ }, "summary": "User password reset request using the user uid", "tags": [ - "v1" + "users" ] } }, @@ -66109,7 +66109,7 @@ }, "summary": "Returns the specified User Projects and Roles information", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -66140,7 +66140,7 @@ }, "summary": "Updates the projects and roles for user", "tags": [ - "v1" + "users" ] } }, @@ -66158,7 +66158,7 @@ }, "summary": "Returns the specified individual and resource roles for a user", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -66189,7 +66189,7 @@ }, "summary": "Add resource roles for user", "tags": [ - "v1" + "users" ] } }, @@ -66203,7 +66203,7 @@ }, "summary": "Deleted the resource roles from user", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -66240,7 +66240,7 @@ }, "summary": "Updates the resource roles for user", "tags": [ - "v1" + "users" ] } }, @@ -66258,7 +66258,7 @@ }, "summary": "Returns the specified individual and team roles for a user", "tags": [ - "v1" + "users" ] }, "parameters": [ @@ -66289,7 +66289,7 @@ }, "summary": "Updates the roles for user", "tags": [ - "v1" + "users" ] } }, @@ -66320,7 +66320,7 @@ }, "summary": "Users status login mode", "tags": [ - "v1" + "users" ] } }, @@ -66352,7 +66352,7 @@ }, "summary": "Create workspace", "tags": [ - "v1" + "workspaces" ] } }, @@ -66369,7 +66369,7 @@ }, "summary": "Returns the specified team's workspaces and roles data", "tags": [ - "v1" + "workspaces" ] }, "parameters": [ @@ -66399,7 +66399,7 @@ }, "summary": "Updates the workspace roles for the specified team", "tags": [ - "v1" + "workspaces" ] } }, @@ -66417,7 +66417,7 @@ }, "summary": "Returns the specified User workspaces and Roles information", "tags": [ - "v1" + "workspaces" ] }, "parameters": [ @@ -66448,7 +66448,7 @@ }, "summary": "Updates the workspace roles for user", "tags": [ - "v1" + "workspaces" ] } }, @@ -66476,7 +66476,7 @@ }, "summary": "Validates the workspace name", "tags": [ - "v1" + "workspaces" ] } }, @@ -66490,7 +66490,7 @@ }, "summary": "Deletes the specified workspace", "tags": [ - "v1" + "workspaces" ] }, "get": { @@ -66505,7 +66505,7 @@ }, "summary": "Returns the specified workspace", "tags": [ - "v1" + "workspaces" ] }, "parameters": [ @@ -66536,7 +66536,7 @@ }, "summary": "Delete workspace backup", "tags": [ - "v1" + "workspaces" ] }, "get": { @@ -66558,7 +66558,7 @@ }, "summary": "Returns the workspace backup result", "tags": [ - "v1" + "workspaces" ] }, "parameters": [ @@ -66596,7 +66596,7 @@ }, "summary": "Create workspace backup settings", "tags": [ - "v1" + "workspaces" ] }, "put": { @@ -66617,7 +66617,7 @@ }, "summary": "Update workspace backup settings", "tags": [ - "v1" + "workspaces" ] } }, @@ -66657,7 +66657,7 @@ }, "summary": "Create On demand Workspace Backup", "tags": [ - "v1" + "workspaces" ] } }, @@ -66688,7 +66688,7 @@ }, "summary": "Updates the specified workspace namespaces", "tags": [ - "v1" + "workspaces" ] } }, @@ -66728,7 +66728,7 @@ }, "summary": "Create cluster rbac in workspace", "tags": [ - "v1" + "workspaces" ] } }, @@ -66742,7 +66742,7 @@ }, "summary": "Deletes the specified workspace cluster rbac", "tags": [ - "v1" + "workspaces" ] }, "parameters": [ @@ -66777,7 +66777,7 @@ }, "summary": "Updates the specified workspace cluster rbac", "tags": [ - "v1" + "workspaces" ] } }, @@ -66808,7 +66808,7 @@ }, "summary": "Updates the specified workspace meta", "tags": [ - "v1" + "workspaces" ] } }, @@ -66840,7 +66840,7 @@ }, "summary": "Updates the specified workspace resource allocations", "tags": [ - "v1" + "workspaces" ] } }, @@ -66864,7 +66864,7 @@ }, "summary": "Returns the workspace restore result", "tags": [ - "v1" + "workspaces" ] }, "parameters": [ @@ -66912,7 +66912,7 @@ }, "summary": "Create On demand Workspace Restore", "tags": [ - "v1" + "workspaces" ] } } @@ -66924,5 +66924,140 @@ "http", "https" ], - "swagger": "2.0" + "swagger": "2.0", + "tags": [ + { + "name": "cluster", + "x-displayName": "Cluster" + }, + { + "name": "apiKeys", + "x-displayName": "Api Keys" + }, + { + "name": "appDeployments", + "x-displayName": "App Deployments" + }, + { + "name": "appProfiles", + "x-displayName": "App Profiles" + }, + { + "name": "audits", + "x-displayName": "Audits" + }, + { + "name": "auth", + "x-displayName": "Auth" + }, + { + "name": "cloudaccounts", + "x-displayName": "Cloudaccounts" + }, + { + "name": "cloudconfigs", + "x-displayName": "Cloudconfigs" + }, + { + "name": "clouds", + "x-displayName": "Clouds" + }, + { + "name": "clustergroups", + "x-displayName": "Clustergroups" + }, + { + "name": "clusterprofiles", + "x-displayName": "Clusterprofiles" + }, + { + "name": "dashboard", + "x-displayName": "Dashboard" + }, + { + "name": "datasinks", + "x-displayName": "Datasinks" + }, + { + "name": "edgehosts", + "x-displayName": "Edgehosts" + }, + { + "name": "events", + "x-displayName": "Events" + }, + { + "name": "filters", + "x-displayName": "Filters" + }, + { + "name": "installers", + "x-displayName": "Installers" + }, + { + "name": "metrics", + "x-displayName": "Metrics" + }, + { + "name": "notifications", + "x-displayName": "Notifications" + }, + { + "name": "overlords", + "x-displayName": "Overlords" + }, + { + "name": "packs", + "x-displayName": "Packs" + }, + { + "name": "pcg", + "x-displayName": "Pcg" + }, + { + "name": "permissions", + "x-displayName": "Permissions" + }, + { + "name": "projects", + "x-displayName": "Projects" + }, + { + "name": "registries", + "x-displayName": "Registries" + }, + { + "name": "roles", + "x-displayName": "Roles" + }, + { + "name": "services", + "x-displayName": "Services" + }, + { + "name": "spectroclusters", + "x-displayName": "Spectroclusters" + }, + { + "name": "teams", + "x-displayName": "Teams" + }, + { + "name": "tenants", + "x-displayName": "Tenants" + }, + { + "name": "users", + "x-displayName": "Users" + }, + { + "name": "workspaces", + "x-displayName": "Workspaces" + } + ], + "servers": [ + { + "url": "https://api.spectrocloud.com" + } + ] } \ No newline at end of file diff --git a/docs/deprecated/clusters/edge/edgeforge-workflow/build-images.md b/docs/deprecated/clusters/edge/edgeforge-workflow/build-images.md new file mode 100644 index 0000000000..c018d1ceb4 --- /dev/null +++ b/docs/deprecated/clusters/edge/edgeforge-workflow/build-images.md @@ -0,0 +1,231 @@ +--- +sidebar_label: "Build Images" +title: "Build Images" +description: "Learn about building your eterprise edge artifacts" +hide_table_of_contents: false + +--- + + + + + + +# Overview + +You can use the Palette Edge CLI to create an Edge artifact. The Edge artifacts will include everything you may have created up to this point. + +
+ +* Bring Your Own Operating System (BYOOS) + + +* Content Bundle + + +* User Data + + +![A diagram that illustrates the mentioned pieces making up an Edge artifact created by the Palette Edge CLI](/clusters_edge-forge-workflow_build-images_edge-artifact-result.png) + +Use the following steps to create an Edge artifact for your Edge host. +# Prerequisites + +- Linux Machine (Physical or VM) with an AMD64 architecture. + + +- 8 CPU + + +- 16 GB Memory + + +- 150 GB Storage + + If you experience disk space constraints on the machine where images are built, you can remove unnecessary Docker images and volumes. Or start the process on a machine with more storage allocated. + + +- Access to a container registry with permission to push container images. For guidance on logging in, review the registry login instructions for your respective registry. With docker, use the `docker login` command to log in to the registry. + +# Create Artifact + +Choose the workflow that fits your needs. + +
+ + +1. Download the Palette Edge CLI and assign the executable bit. + +
+ + ```shell + VERSION=3.4.3 + wget https://software.spectrocloud.com/stylus/v$VERSION/cli/linux/palette-edge + chmod +x palette-edge + ``` + +2. Issue the `show` command to review the list of options for Operating System (OS) distribution and versions, Kubernetes distributions and versions, the Spectro Agent Version, and Kairos version. + +
+ + ```shell + ./palette-edge show + ``` + + ![CLI example output from the show command](/clusters_edge-forge-workflow_build-images_edge-cli-show.png) + +
+ + The Kubernetes distribution and versions you choose must be available in the list displayed. We will continuously release newer versions of Kubernetes as part of our release cycle. If you decide to use your custom OS, you must build a Kairos image from the OS you used in the [Bring Your Own OS](/clusters/edge/edgeforge-workflow/build-kairos-os) guide. Typically, you will keep the Spectro Agent and Kairos versions the same. + + +3. Use the `generate` command to create an image scaffolding by providing your choice of OS and Kubernetes distribution. There are several CLI flags you can specify to the `generate` command. The following flags are the most common. + + + + | Parameter | Description | +|--------------------------|-----------------------------------------------------------------------------------------------------| +| `--os-flavor` | OS flavor. | +| `--k8s-flavor` | Kubernetes flavor. | +| `--output` | Directory for generating build files. | +| `--push-image-repository` | Repository for generated container images. | +| `--content-path` | Optional location of the content bundle if you preload content. | +| `--cache-provider-images` | Additional flag to preload generated provider images into the installer ISO. | +| `--cloud-init-file` | Specify the Edge Installer configuration user data file to include in the Edge artifact. | + +
+ + :::info + + When using the `generate` command, the specified registry is where Edge artifacts will be uploaded. + + ::: + + + ```shell + ./palette-edge generate --os-flavor [pick-os] \ + --k8s-flavor [pick-k8s] \ + --output [output directory] \ + --push-image-repository [your registry path] \ + --content-path [path to content bundle, if applicable] \ + --cache-provider-images + ``` + + In this example, an `OpenSuse` + `k3s` image using the upstream Kairos `opensuse-leap` images is selected. The scaffolding image will also get published to a target repo `gcr.io/my-registry` and will include a content bundle. The `generate` command would look similar to the following. + + Example: + ```shell + ./palette-edge generate --os-flavor opensuse-leap \ + --k8s-flavor k3s \ + --output opensuse-k3s \ + --push-image-repository gcr.io/my-registry \ + --content-path /temp/bundles/content-c59a5a88/spectro-content-c59a5a88.zst \ + --cache-provider-images + ``` + Output: + ```shell + INFO Creating directory opensuse-k3s + INFO Created scaffolding directory structure under directory opensuse-k3s with the following parameters + ┌───────────────────────────────────────────────────────────────────────────┐ + | Spectro Agent Version | v0.0.0-d155796 | + | Base Image | quay.io/kairos/core-opensuse-leap:v1.5.0 | + | K8S Flavor | k3s | + | K8S Versions | 1.25.2-k3s1,1.24.6-k3s1,1.23.12-k3s1,1.22.15-k3s1 | + | Push Image Repository | gcr.io/spectro-testing | + | Kairos Version | v1.5.0 | + └───────────────────────────────────────────────────────────────────────────┘ + To build an installer iso and target docker images for + various versions supported, run the 'build.sh' in the + 'opensuse-k3s' directory. For any customizations to made to + all the generated images e.g adding packages, edit the + 'images/Dockerfile' as needed before running + 'build.sh'.Files to be copied to the target images can be + places under 'overlay/files' and files for the iso only can + be placed under 'overlay/files-iso + ``` + + For custom images use the `--base-image-uri` flag and specify the path to the custom image. + + Example: + + ```shell + ./palette-edge generate \ + --base-image-uri quay.io/kairos/core-rockylinux:v1.5.0 \ + --k8s-flavor k3s \ + --output rockylinux-k3s \ + --push-image-repository gcr.io/my-registry + ``` + + +4. Review the content of the output directory you specified using `--output` flag. The output directory structure contains the following files. + +
+ + ![The output directory content in a tree diagram](/clusters_edge-forge-workflow_build-images_edge-cli-output.png) + + +5. Navigate to the output directory and review the file **.VERSIONS.env**. Set the variable `PUSH_BUILD` to `true` so that the Edge provider images and the Edge Installer image get pushed to your image registry. The alternative is to manually push all the images after the image creation process completes. + + + +6. Before you start the build script, you can make changes to customize your build. Review the use cases below to learn more about customization options. + + | Use case | Description | + | --- | --- | + | Modifying/controlling Kubernetes versions and flavors | You can update the .versions.env file or specify the versions and flavors as arguments to the build command. | + | Adding custom packages to target OS images | Edit the **Dockerfile** of the respective OS images to add the install commands using `apt-get` or `zypper`. | + | Adding custom files or directories into Kubernetes provider container images | Add the custom files or directories in the **overlay/files/** folder. The files directory is copied directly under the */* folder in the target image. | + | Adding custom content to Edge Install installer ISO | Place the custom content in the **overlay/files-iso** directory. To embed a content bundle, place it under the **overlay/files-iso/opt/spectrocloud/content** folder. This limits the scope to only the Edge Install installer ISO. | + + +7. Navigate to your output directory and issue the following command to build your Edge images. This command will take a few minutes to complete. + +
+ + ```shell + ./build.sh + ``` + + The following images are generated by the build script. + +
+ + - Edge Installer ISO image. +
+ + - Edge Host container image containing the Palette Edge Host Agent. +
+ + - Kubernetes Provider container images. + +
+ +8. Locate your ISO file in the output directory. The ISO file's default name is **spectro-edge-installer.iso** but it may be different if you used the `--iso-name` CLI flag. + + +Using a bootable USB drive, PXE server, or other means, mount the ISO to the primary drive of the Edge host. The installer flashes to the Edge host's hard disk, and the host will shut down. The Edge host is now ready to be shipped to the edge location. + +
+ +:::info + +You can use several software tools to create a bootable USB drive, such as [balenaEtcher](https://www.balena.io/etcher). For a PXE server, there are open-source projects such as [Fog](https://fogproject.org/download) or [Windows Deployment Services](https://learn.microsoft.com/en-us/windows/deployment/wds-boot-support) for Windows. + +::: + + +# Validate + +1. In the build server, validate the output directory containing the ISO file. + + +2. You can validate that the ISO image is not corrupted and is valid by attempting to flash a bootable device. Most software that creates a bootable device will validate the ISO image before the flash process. + + +3. You can validate that the Edge host is ready for the site installation by simulating a site deployment on one of the Edge hosts. The simulation process will require you to complete the installation process and reset the device after the validation. + + +# Next Steps + +You now have an Edge artifact that you can use to create an Edge host. You can start the site deployment process. Check out the [Site Deployment](/clusters/edge/site-deployment) resource to learn more about the Site Deployment process. + diff --git a/docs/deprecated/clusters/edge/edgeforge-workflow/build-kairos-os.md b/docs/deprecated/clusters/edge/edgeforge-workflow/build-kairos-os.md new file mode 100644 index 0000000000..4454cade1e --- /dev/null +++ b/docs/deprecated/clusters/edge/edgeforge-workflow/build-kairos-os.md @@ -0,0 +1,106 @@ +--- +sidebar_label: "Bring Your Own OS" +title: "Bring Your Own OS - Create Kairos Image" +description: "Learn about building your own Kairos Image" +hide_table_of_contents: false + +--- + + + + + + +# Overview + +Edge supports the ability for you to specify a custom Operating System (OS) for your Edge host runtime. Building a system using your choice of OS requires creating a [Kairos-base](https://kairos.io/) image with your custom OS. The Palette feature, [Bring Your Own OS (BYOOS)](/integrations/byoos) allows you to use a custom OS in a cluster profile. + + +As an example, the following steps will guide you on how to build a Kairos-based Red Hat Enterprise Linux (RHEL) image. Use the same steps for any other operating system. + +
+ + +:::info + +BYOOS gives you the flexibility to tailor and manage the OS layer in your cluster profiles, ensuring that clusters perform optimally to meet your environment needs. +To learn how to use your own OS images with an Edge cluster profile, refer to the [Model Edge Native Cluster Profile](https://docs.spectrocloud.com/clusters/edge/site-deployment/model-profile) guide. + + +::: + +# Prerequisites + +- Linux Machine (Physical or VM) with an AMD64 architecture. + + +- Access to a container registry with permission to push container images. Review the registry login instructions for your respective registry for guidance on logging in. + + +:::caution + +Some operating systems require credentials to download the source image, such as RHEL. An RHEL subscription is required in this example to download the RHEL Universal Base Images (UBI) needed to build the Edge provider image. Ensure you have the necessary credentials to download the OS source image. + + +::: + + +# Build Image + +1. Issue the following commands to prepare your server. You can also add more packages to the `apt install` command if needed. +
+ + ```shell + mkdir -p /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt update -y + sudo apt install docker-ce docker-ce-cli containerd.io docker-compose-plugin git-all -y + ``` + +2. Create a workspace and download the builder code. + +
+ + ```shell + mkdir -p ~/workspace/ + cd workspace/ + git clone https://github.com/spectrocloud/pxke-samples + ``` + +3. Build Karios Image. In this step, you will create a Kairos-based core image from an RHEL 8 base OS. Core images form the basis for Kubernetes provider images used for host cluster provisioning. Review the contents of the Dockerfile to understand the various steps involved in this process. You must supply credentials to your RHEL subscription to successfully build the image. + +
+ + ```shell + cd pxke-samples/core-images + docker build \ + --tag [your image repository]/rhel8-kairos:1.0 \ + --build-arg USERNAME=[rhel subscription username]\ + --build-arg PASSWORD=[rhel subscription password] \ + --file Dockerfile.rhel8 . + ``` + +4. Upload the image to your container registry. + +
+ + ```shell + docker push [your image repository]/rhel8-kairos:1.0 + ``` +--- + +
+ +Your image will be used in the [Build Images](/clusters/edge/edgeforge-workflow/palette-canvos) step and become part of your Edge artifact. The custom OS you created will also be used in the OS layer of the cluster profile by using the [Bring Your Own OS (BYOOS)](/integrations/byoos) pack. +
+ + +# Next Steps + + +Your next step is to evaluate if you need to create a content bundle. To create a content bundle, check out the [Build Content Bundle](/clusters/edge/edgeforge-workflow/palette-canvos) guide. + +
diff --git a/docs/deprecated/clusters/palette-virtual-clusters/add-virtual-cluster-to-host-cluster.md b/docs/deprecated/clusters/palette-virtual-clusters/add-virtual-cluster-to-host-cluster.md new file mode 100644 index 0000000000..3ac522e310 --- /dev/null +++ b/docs/deprecated/clusters/palette-virtual-clusters/add-virtual-cluster-to-host-cluster.md @@ -0,0 +1,167 @@ +--- +sidebar_label: "Add Virtual Clusters to a Host Cluster" +title: "Add Virtual Clusters to a Host Cluster" +description: "How to add Palette Virtual Clusters to a Host Cluster" +icon: "" +hide_table_of_contents: false + +--- + + +# Add Virtual Clusters to a Host Cluster + +:::caution + +As of Palette 3.2, this feature is deprecated. Use the [Deploy a Virtual Cluster to a Cluster Group](/clusters/palette-virtual-clusters/deploy-virtual-cluster) guide to learn how to deploy Palette Virtual clusters. + + +::: + +You can deploy Palette Virtual Clusters in a [Host Cluster](/glossary-all#hostcluster). To do this, Palette provides the **Enable Virtual Clusters** option for new or existing clusters. Clusters with the virtual clusters feature enabled are called Host Clusters. + +The advantages of a virtual cluster environment are: +- You can operate with admin-level privileges while ensuring strong isolation. +- Virtual clusters reduce operational overhead and improve resource utilization. + +Follow steps below to enable and deploy a virtual cluster. + +# Prerequisites + +- A Spectro Cloud account. + +- A configured [Cluster](/clusters). + +- Attach any required policies in your cloud account that must be added to your virtual cluster deployment. + - For AWS, refer to the [Required IAM Policies](/clusters/public-cloud/aws/required-iam-policies#globalroleadditionalpolicies) documentation. + - For Azure, no additional policies are required. + +:::info + +Palette doesn't support _Usage_ and _Cost_ metrics for Virtual Clusters running on Google Kubernetes Engine (GKE). + +::: + +## Add Node-Level Policies in your Cloud Account + +In some situations additional node-level policies must be added to your deployment. + +To add node-level policies: + +1. In **Cluster Mode**, switch to the **Tenant Admin** project. +2. Select **Tenant Settings** in the **Main Menu**. +3. Click **Cloud Accounts** and ensure **Add IAM policies** is enabled for your cloud account. If an account does not already exist, you must add one. +4. You can specify any additional policies to include in virtual clusters deployed with this cloud account. + - For AWS, add the **AmazonEBSCSIDriver** policy so that the virtual clusters can access the underlying host cluster's storage. Check out the [Palette required IAM policies](/clusters/public-cloud/aws/required-iam-policies#globalroleadditionalpolicies) documentation to learn more about additional IAM policies. +5. Confirm your changes. + +# Enable Virtual Clusters on a Host Cluster + +Follow these steps to enable virtual clusters on a new or existing Host Cluster: + +1. In **Cluster Mode**, select **Clusters** in the **Main Menu**. +2. Select a **Host Cluster** from the list and click **Settings > Cluster Settings > Virtual Clusters**. +3. Toggle the **Enable Virtual Clusters** option to _on_. +4. Select an endpoint in the **Cluster Endpoint Type** drop-down menu: _Load Balancer_ or _Ingress_. +5. Configure the load balancer or ingress endpoint. + + + + +### Configure Load Balancer Endpoint +
+These requirements apply to a Load Balancer endpoint: +
+
+ +* The Host Cluster supports dynamic provisioning of load balancers. +* If the Host Cluster is in the public cloud, the AKS/EKS/GCP Cloud Controller Manager must support load balancers by default. +* If the Host Cluster is in a private data center, a bare metal load balancer provider such as MetalLB must be installed and configured. + +
+ + + +Configure Ingress Endpoint: +
+These requirements apply to an Ingress endpoint: +
+
+ +* The Host Cluster must specify a Host domain name service (DNS) Pattern, for example: `*.starship.te.spectrocloud.com` +
+To create a valid Host DNS Pattern, you must deploy the NGINX Ingress Controller on the Host Cluster with SSL passthrough enabled. This allows transport layer security (TLS) termination to occur at the virtual cluster's Kubernetes API server. +
+* A wildcard DNS record must be configured, which maps the Host DNS Pattern to the load balancer associated with the NGINX Ingress Controller. + +To map the Host DNS Pattern to the load balancer with the NGINX Ingress Controller: +
+ +1. Deploy the NGINX Ingress Controller on the Host Cluster and ensure that SSL passthrough is enabled in the `values.yaml` file for the NGINX Ingress Controller pack. Set `charts.ingress-nginx.controller.extraArgs` to _true_ as shown in the example: +
+ +
+ + ```yml + charts: + ingress-nginx: + ... + controller: + ... + extraArgs: + enable-ssl-passthrough: true + ``` +2. Identify the public DNS name of the load balancer associated with the LoadBalancer Service associated with your NGINX Ingress Controller deployment. + +3. Create a wildcard DNS record that maps the Host Pattern to the NGINX Ingress Controller load balancer. The example shows an AWS Route53 record for the `*.starship.te.spectrocloud.com` Host DNS Pattern. + + |Example Record with Host DNS Pattern|| + |-|-| + |![AWS Route 53](/record-details.png) |Here is an example of an
AWS Route53 record for the
`*.starship.te.spectrocloud.com`
Host DNS Pattern.| + +
+
+ + +# Deploy a Virtual Cluster in the Host Cluster + +To deploy a new virtual cluster in an existing Host Cluster: + +1. In **Cluster Mode** select a project from the drop-down menu, and click **Clusters** in the **Main** menu. + +2. Click the **Virtual Clusters** tab to list available virtual clusters, and select **Add New Virtual Cluster**. + +3. Provide **Deploy New Virtual Cluster** configuration information:
+ + - Select the Host Cluster in which you'll enable virtual clusters. + + - Add a cluster name. + + - Optionally, provide a Description and Tags. + + - Click the **Attach Profile** button to assign a profile. + + You can attach one or more Add-on layers to this cluster. If you do not have a Cluster Profile, refer to [Creating Cluster Profile](/cluster-profiles/task-define-profile) for details. + +
+ +4. (Optional) If the Host Cluster's **Cluster Endpoint Type** is a _Load Balancer_, you can provide the following advanced configuration options here: + + - [External Traffic Policy](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip): _Cluster_ or _Local_.
+ + - Load Balancer Source Ranges: Limits which client IPs can access the load balancer. Inputs must be a comma-separated list of CIDR ranges in `a.b.c.d/x` format. [Network Load Balancer support on AWS](https://kubernetes.io/docs/concepts/services-networking/service/#aws-nlb-support) provides additional details. + +# Validate +To validate your virtual cluster is available and ready for use, navigate to **Clusters > Virtual Clusters**, which lists all your virtual clusters. + + +# Resources + +- [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) + +- [CPU resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) + +- [Memory resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) + +- [Amazon EBS CSI driver - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) + +- [Creating the Amazon EBS CSI driver IAM role for service accounts - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/csi-iam-role.html) diff --git a/docs/deprecated/integrations/EKS-D.md b/docs/deprecated/integrations/EKS-D.md new file mode 100644 index 0000000000..a2c7835336 --- /dev/null +++ b/docs/deprecated/integrations/EKS-D.md @@ -0,0 +1,91 @@ +--- +sidebar_label: 'Amazon EKS Distro' +title: 'Kubernetes with Spectro Cloud' +description: 'EKS-D packs in Spectro Cloud' +hiddenFromNav: true +type: "integration" +category: ['kubernetes'] +logoUrl: 'https://registry.spectrocloud.com/v1/kubernetes-eksd/blobs/sha256:5790ca7040999e2f9371163a319cda652ed1e32139bcb9c6fb32a0152d9f48fb?type=image/png' +--- + + +# Amazon EKS Distro + +Amazon EKS-D allows for the manual deployment of secure and reliable workload clusters, free from constant testing and tracking for dependencies, security patches, and updates of Kubernetes. EKS-D provisions clusters with consistent versions of Kubernetes and dependencies of Amazon EKS. The deployment of EKS-D is standardized enough to build your Kubernetes cluster on any public, private or on-prem platform. Once the community support for Kubernetes versions expires, Amazon takes care of the version control including the latest security patches. With EKS-D, users enjoy benefits such as secure Docker images, back-ported security fixes, and a single upstream vendor. +## Provision and Manage Amazon EKS Distro (EKS-D) with Spectro Cloud +Spectro Cloud leverages EKS-D services to customers as a platform of their choice. We support easy provisioning and management of EKS-D services for on-premises as well as for public cloud platforms such as: + +* vSphere Cloud Provider (vSphere) +* Amazon Web Services (AWS) +* Microsoft Azure (Azure) +* Google Cloud Platform (GCP) +* Metal as a Service (MaaS) +* OpenStack Cloud + +We have made the usage of EKS-D easy by incorporating it as integration within the Spectro Cloud pack. At the click of a button, EKS-D is brought to use while creating a Spectro Cloud-specific cluster profile. +Once the cluster profile is created, users can deploy EKS-D based Kubernetes clusters through the Spectro Cloud console. + +![eksd-cluster-profile](/eksd-cluster-profile.png) + +![eksd-cluster](/eksd-cluster.png) + +## Why EKS-D with Spectro Cloud + +Spectro Cloud fosters the tenant EKS-D clusters with add-on features such as authorization, monitoring, logging, load balancing, and more. +The extensive platform support that Spectro Cloud provides to its customers makes EKS-D with Spectro Cloud highly flexible. +We provide isolation to the EKS-D tenant clusters by virtue of projects and RBAC. + +||Spectro Add-On Packs| +|-|------| +|Deployment specifics|Logging| +||Monitoring | +||Security | +||Authentication| +||Service Mesh | +||Load Balancer | +||Ingress | +||Service Mesh | +||Helm Charts | +||Registries| + +||Spectro EKS-D Platform Support| +|-|----| +|Public Cloud|Amazon Web Services (AWS) +||Microsoft Azure (Azure) +||Google Cloud Platform (GCP)| +|On-Premises|vSphere Cloud Provider (vSphere)| +||OpenStack Cloud| +||Metal-as-a-Service Cloud (MaaS)| + +||Resource Isolation| +|-|---| +| Project |Users and teams with specific roles can be associated with the project.| +| |The project helps to organize the cluster resources in a logical grouping | +| RBAC|Role-Based Access Control.| +| |This is a method that allows the same user to have a different type of access control based on the resource being accessed.| + +## Supported EKS-Distro Versions + + + + +* **v1-21-eks-4 ** + + + + +* **v1-20-eks-6 ** + + + + + +* **1.18.9-eks-1-18-1** + + + + + +## Reference + +https://aws.amazon.com/eks/eks-distro diff --git a/docs/deprecated/integrations/oidc-eks.md b/docs/deprecated/integrations/oidc-eks.md new file mode 100644 index 0000000000..d1e924dd36 --- /dev/null +++ b/docs/deprecated/integrations/oidc-eks.md @@ -0,0 +1,31 @@ +--- +sidebar_label: 'aws-eks-oidc' +title: 'aws-eks-oidc' +description: 'aws-eks-oidc Authentication pack in Spectro Cloud' +hiddenFromNav: true +type: "integration" +category: ['authentication'] +logoUrl: 'https://registry.dev.spectrocloud.com/v1/aws-eks-oidc/blobs/sha256:f86813591b3b63b3afcf0a604a7c8c715660448585e89174908f3c6a421ad8d8?type=image/png' +--- + + + + +# OIDC identity provider authentication for Amazon EKS + +OpenID Connect (OIDC) Identity Provider (IDP) authentication for Amazon EKS clusters. This feature allows customers to integrate an OIDC identity provider with a new or existing Amazon EKS cluster running Kubernetes version 1.16 or later. OpenID Connect is an interoperable authentication protocol based on the OAuth 2.0 family of specifications. It adds a thin layer that sits on top of OAuth 2.0 that adds login and profile information about the identity of who is logged in. + + +## Versions Supported + + + + + +**1.0.0** + + + + +## References +https://aws.amazon.com/blogs/containers/introducing-oidc-identity-provider-authentication-amazon-eks/ diff --git a/docs/deprecated/integrations/ubuntu-k3s.md b/docs/deprecated/integrations/ubuntu-k3s.md new file mode 100644 index 0000000000..68a2d52822 --- /dev/null +++ b/docs/deprecated/integrations/ubuntu-k3s.md @@ -0,0 +1,73 @@ +--- +sidebar_label: 'Ubuntu-K3s' +title: 'Ubuntu Lightweight Kubernetes K3s' +description: 'Choosing K3s with Ubuntu within the Palette console' +hiddenFromNav: true +type: "integration" +category: ['system app'] +logoUrl: 'https://registry.spectrocloud.com/v1/ubuntu-k3s/blobs/sha256:10c291a69f428cc6f42458e86cf07fd3a3202c3625cc48121509c56bdf080f38?type=image/png' +--- + + + + + +# Lightweight Kubernetes on Ubuntu +K3s is a purpose-built container orchestrator for running Kubernetes on bare-metal servers. With the bloat stripped out, the CNCF (Cloud Native Computing Foundation) accredited Kubernetes distribution orchestrator makes installation and application deployment faster. Palette supports this Lightweight Kubernetes and Ubuntu pack versions to run at scale. + +
+ +## Version Supported + +
+ +## Ubuntu K3s +
+ + + +
+
+ +Name: **Ubuntu-K3s** +Version: **Ubuntu-K3s-1.22.9-k3s0** + + +
+
+ + +
+ + +
+
+ +Name: **Ubuntu-K3s** +Version: **Ubuntu-K3s-1.21.12-k3s0** + +
+
+ +
+
+ + +## Manifest Parameters + + +```yaml +pack: + spectrocloud.com/install-priority: "0" +#k3sconfig: +# disable: +# - metrics-server +# service-cidr: "10.40.0.0/16" +# cluster-cidr: "10.45.0.0/16" +``` + + + +# References + +[Rancher](https://rancher.com/docs/k3s/latest/en/) diff --git a/docs/docs-content/architecture/_category_.json b/docs/docs-content/architecture/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/architecture/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/architecture/architecture-overview.md b/docs/docs-content/architecture/architecture-overview.md new file mode 100644 index 0000000000..0349a720a5 --- /dev/null +++ b/docs/docs-content/architecture/architecture-overview.md @@ -0,0 +1,61 @@ +--- +sidebar_label: "Overview" +title: "Deployment Architecture Overview" +description: "Spectro Cloud Architecture Overview" +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +--- + + + +Palette is available in three flexible deployment models: + +* **Multi-tenant SaaS**: The management plane is hosted in AWS across three regions (us-east-1, us-west-1, us-west-2) and managed by Spectro Cloud. Each customer will occupy a tenant. The Spectro Cloud operation team controls when to upgrade the management plane. + +* **Dedicated SaaS**: The management plane is hosted in a cloud/region specified by the customer in Spectro Cloud’s cloud account with a dedicated instance managed by Spectro Cloud. The customer can decide when to upgrade the management plane. + +* **Self-hosted**: The management plane is hosted in the customer’s environment. It can be the customer’s on-prem VMware vSphere, OpenStack, bare metal, or in a public cloud using the customer’s cloud account. + + +![A diagram of Palette deployment models eager-load](/architecture_architecture-overview-deployment-models.png) + +
+ + +## Product Security + +At Spectro Cloud, we recognize the importance of robust security measures in today's rapidly evolving digital landscape. As the provider of our cutting-edge SaaS and self-hosted Palette product, our commitment to safeguarding your data and ensuring the integrity of our services is paramount. Learn more about Palette security by reviewing the [Security](../security/security.md) section. + +
+ +## SaaS Architecture and Data Flow + +The Palette SaaS platform can manage public clouds (AWS, Azure, Google Cloud) and on-premises data center (VMware, OpenStack, bare metal). The architecture and data flow slightly differ based on whether the target environment is a public or an on-premises data center. + +### SaaS to Public Clouds + +The following diagram illustrates the data flow for the Palette SaaS platform to manage the EKS cluster using the user's cloud account in AWS: + + +![A diagram of the Palette SaaS architecture eager-load](/architecture_architecture-overview_saas.png) + +There are two main data flows represented in the provisioning flow (red) and monitoring flow (green). + +* **Provisioning data flow**: A tenant user from the browser or API client (e.g., Terraform provider) to configure Cluster Profile, Cloud Configuration (e.g., which cloud account to use, cloud-specific placement settings like VPC, subnet), and cluster specifications (e.g., cluster size, node instance type, etc.). This information is sent to Palette. In turn, Palette will invoke the cloud API to talk to the cloud endpoint using the cloud credentials specified to provision the Kubernetes cluster. Once the cluster is provisioned, a Palette management agent will be pushed and installed in the cluster. This agent will receive the Cluster Profile and Cluster Specifications as the desired state from SaaS. The agent will further inspect the desired state and pull additional add-on integrations from Palette's public package registry, or optionally a private package registry hosted by the tenant user. Once all required add-on integrations are installed, the agent will send a message to SaaS to indicate the full-stack K8s provisioning is completed. + + +* **Monitoring data flow**: The agent will periodically report the cluster health status back to the Palette SaaS platform. The agent will also stay in the watch loop to check if the cluster's stat matches the declared desired state. If there is any deviation (e.g. a worker node is accidentally shut down by a user directly from the cloud console), the agent can either send an alert message or based on the policy, do auto reconciliation/self-healing to bring the cluster back to match with the desired state. If there is an updated Cluster Profile or Cluster Spec, the agent will receive the updated and desired state from SaaS. It will then enforce the desired state by making cluster configuration changes accordingly. + +### SaaS to Private Clouds / Data Center / Bare Metal +For private clouds like VMware, since the Palette SaaS platform does not have direct access to the private cloud endpoint (e.g., vCenter), there is one extra component, Palette Private Cloud Gateway, to be deployed in a private cloud environment to act as the local orchestrator and the proxy between Palette’s SaaS platform and cloud endpoint. The following diagram illustrates the data flow for the Palette SaaS platform to manage an on-prem VMware private data center: + + +![Palette SaaS architecture diagram with connections to private data centers](/architecture_architecture-overview_on-prem.png) + + +## Self-Hosted Architecture and Data Flow + +Although the Palette SaaS platform fully supports both public clouds and data centers, some customers, especially with regulated industry or air-gapped environments, may prefer to install Palette in their own environment behind the firewall, so that they can control the platform upgrade cycles and ensure no sensitive data are exposed. For these use cases, Palette supports a self-hosted on-premises installation. The platform updates and add-on integration contents can be optionally downloaded from an on-prem private repository instead of pulling from Palette’s hosted public repository. + +![Self-hosted Palette architecture diagram](/architecture_architecture-on-prem-detailed.png) diff --git a/docs/docs-content/architecture/architecture.md b/docs/docs-content/architecture/architecture.md new file mode 100644 index 0000000000..6dc6d9e0fb --- /dev/null +++ b/docs/docs-content/architecture/architecture.md @@ -0,0 +1,36 @@ +--- +sidebar_label: "Architecture" +title: "Palette Architecture" +description: "A deep dive into Palette's architecture and technical concepts" +hide_table_of_contents: false +sidebar_custom_props: + icon: "cubes" +tags: + - Architecture +--- + + +# Architecture + + +Palette supports three different architecture models; multi-tenant SaaS, dedicated SaaS, and self-hosted. To learn more about Palette's architecture and the various components that make up the model, visit the resources listed below. + +![Architecture image with on-prem and SaaS eager-load](/docs_architecture-overview_components-overview.png) + + +## Resources + +- [Architecture Overview](architecture-overview.md) + + +- [Provisioning Order of Operations](orchestration-spectrocloud.md) + + +- [Namespaces and Pods](palette-namespaces-podes.md) + + +- [Network Ports](networking-ports.md) + + +- [IP Addresses](palette-public-ips.md) + diff --git a/docs/docs-content/architecture/grps-proxy.md b/docs/docs-content/architecture/grps-proxy.md new file mode 100644 index 0000000000..3b27e782fc --- /dev/null +++ b/docs/docs-content/architecture/grps-proxy.md @@ -0,0 +1,70 @@ +--- +sidebar_label: "gRPC and Proxies" +title: "gRPC and Proxies" +description: "Learn about gRPC and how a proxy is used to communicate between the management platform and the workload cluster." +hide_table_of_contents: false +sidebar_position: 30 +sidebar_custom_props: + icon: "" +--- + + + +Palette uses [gRPC](https://grpc.io) to communicate between the management platform and the workload cluster. gRPC is a high-performance, open-source universal Remote Procedure Call (RPC) framework. It is used to build distributed applications and services. gRPC is based on HTTP/2 and uses protocol buffers ([protobuf](https://protobuf.dev/)) as the underlying data serialization framework. + + +:::info + +Refer to the [Network Ports](networking-ports.md) documentation for a detailed network architecture diagram with gRPC and to learn more about the ports used for communication. + +::: + + +When gRPC is used with network proxies, the proxy servers may or may not support gRPC or require additional configuration to allow gRPC traffic to pass through. The following table summarizes the different scenarios and whether or not the proxy server supports gRPC. + + +| **Scenario** | **Description** | **Proxy Supported** | +|:-------------|:----------------|:--------------------| +| gRCP with HTTP/HTTPS - No SSL bump| gRPC traffic is sent over HTTP/HTTPS, and the proxy does not perform a Secure Socket Layer (SSL) bump. This is universally supported. | ✅ | +| gRPC with HTTP/HTTPS - SSL bump | gRPC traffic is sent over HTTP/HTTPS, and the proxy performs an SSL bump. Support varies by vendor. | ⚠️ | +| gRPC with [Squid](https://wiki.squid-cache.org) Open Source Proxy | gRPC traffic is sent over HTTP/HTTPS, and the proxy performs an SSL bump. Supported in some scenarios but requires additional configuration. | ❌ or ⚠️ | + + +The following sections provide more information about gRPC and proxies. + + +## Proxy Without SSL Bump + +Because gRPC is based on HTTP/2, any proxy server that supports the [HTTP CONNECT](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/CONNECT) method can be used to forward gRPC traffic. No configuration is required for this scenario. The exception is when the proxy server performs an SSL bump, discussed in the [Proxy With SSL Bump](/architecture/grps-proxy#proxywithsslbump) section. + + +:::info + +SSL bump is a technique used to decrypt and inspect HTTPS traffic. When SSL bump is enabled, the proxy server terminates the Transport Layer Security (TLS) connection and establishes a new TLS connection to the destination server. In this scenario, the proxy server must support gRPC and may require additional configuration. + +::: + +## Proxy With SSL Bump + +Several vendors provide proxy servers that support gRPC. Some of the vendors may require additional configurations or the use of a specific version of the proxy server. We encourage you to review your proxy server documentation for more information. + +When you review the vendor documentation, search for information about gRPC and HTTP/2. We provide the following links to some vendors' documentation that addresses HTTP/2 and gRPC support. + + +- [F5](https://my.f5.com/manage/s/article/K47440400) + + +- [Palo Alto](https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA10g000000CmdQCAS) + + +- [Fortinet](https://docs.fortinet.com/document/fortigate/7.4.0/administration-guide/710924/https-2-support-in-proxy-mode-ssl-inspection) + + +- [Check Point](https://support.checkpoint.com/results/sk/sk116022) + + +## Squid Proxy With SSL Bump + +A common open-source proxy server is [Squid](https://wiki.squid-cache.org). Squid is a caching proxy for the Web supporting HTTP, HTTPS, FTP, and more. Squid supports gRPC but requires additional configuration. gRPC with SSL bump does not work with all versions of Squid, such as versions 5 and 6. Review the [SSL Bump issue](https://bugs.squid-cache.org/show_bug.cgi?id=5245) to learn more about the issue and track the progress of the fix. + +If you are using a Squid version not affected by the issue, you can configure Squid with SSL bump to support gRPC. Use the [Configuring SSL Bumping in the Squid service](https://support.kaspersky.com/KWTS/6.1/en-US/166244.htm) guide to learn how to configure Squid with SSL bump. Additionally, you may have to configure exclusion rules when using SSL bumping with gRPC. Refer to the [Adding exclusions for SSL Bumping](https://support.kaspersky.com/KWTS/6.1/en-US/193664.htm) to learn more. diff --git a/docs/docs-content/architecture/networking-ports.md b/docs/docs-content/architecture/networking-ports.md new file mode 100644 index 0000000000..06e9080f46 --- /dev/null +++ b/docs/docs-content/architecture/networking-ports.md @@ -0,0 +1,193 @@ +--- +sidebar_label: "Network Ports" +title: "Network Communication and Ports" +description: "Port-Direction-Purpose Management Platform and Workload Clusters" +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +--- + + +Depending on what version of Palette you are using, the internal architecture and network communication will be different. Before Palette 4.0.0 the management platform communicated with the workload cluster via NATS. Starting with Palette 4.0.0, the management platform communicates with the workload cluster via gRPC. Use the tabs below to view the network communication and ports for each architecture. + + + + + + +## SaaS Network Communications and Ports + + + + +The following ports must be reachable from a network perspective for Palette SaaS to function correctly. + +![SaaS Network Diagram with ports](/architecture_networking-ports_saas-network-diagram.png "title=SaaS Network Diagram with ports") + + + +#### SaaS Managed + + +![SaaS network diagram displaying the network paths for edge](/architecture_networking-ports_saas-network-diagram-edge.png) + + + + +:::caution + +NATS is deprecated and will be removed in a future release. Starting with Palette 4.0.0, gRPC is used for all communication between the management platform and the workload cluster. + +::: + + +The following ports must be reachable from a network perspective for Palette to operate properly. + +## Management Platform + +|Port |Direction|Purpose | +|:---------------|:---------|:-----------------------| +|HTTPS (tcp/443) |INBOUND |Browser/API access to management platform .| +|HTTPS (tcp/443) |INBOUND |gRPC communication between Palette and the workload cluster.| +|NATS (tcp/4222) |INBOUND |Agent running inside connecting to management platform [Deprecated]| + + +## Workload Cluster + + +|Port |Direction | Purpose| +|:---------------|:---------|:--------------| +|HTTPS (tcp/443) |OUTBOUND | API access to management platform and gRPC| +|HTTPS (tcp/443) |OUTBOUND | gRPC, Registry (packs, integrations), Pack containers, Application Updates| +|NATS (tcp/4222) |OUTBOUND |Registry (packs, integrations), Pack containers, Application Updates [Deprecated]| + +:::info + +You can expose inbound port 22 for SSH if you would like to access your cluster nodes for troubleshooting remotely. This is entirely optional and not required for Palette to operate appropriately. + +::: + + +## Self-Hosted Network Communications and Ports + +The following ports must be reachable from a network perspective for Palette self-hosted to function correctly. + + +![On-prem network diagram](/architecture_networking-ports_network-diagram.png "#title="network diagram") + + + + +:::caution + +NATS is deprecated and will be removed in a future release. Starting with Palette 4.0.0, gRPC is used for all communication between the management platform and the workload cluster. + +::: + +## Management Platform + +|Port |Direction|Purpose | +|:---------------|:---------|:-----------------------| +|HTTPS (tcp/443) |INBOUND |Browser/API access to management platform, gRPC| +|NATS (tcp/4222) |INBOUND |Message Bus for workload clusters [Deprecated]| +|HTTPS (tcp/443) |OUTBOUND |vSphere vCenter API, Registry (packs, integrations), Pack containers, app updates, gRPC| +|HTTPS (tcp/6443)|OUTBOUND |Workload K8s cluster API Server| + + +## Workload Cluster + + +|Port |Direction | Purpose| +|:---------------|:---------|:--------------| +|HTTPS (tcp/443) |OUTBOUND | API access to management platform| +|NATS (tcp/4222) |OUTBOUND |Agent communication via message bus. [Deprecated] | +|HTTPS (tcp/443) |OUTBOUND |vSphere vCenter API, gRPC, Registry (packs, integrations), Pack containers, Application updates| + +:::info + +You can expose inbound port 22 for SSH if you would like to access your cluster nodes for troubleshooting remotely. This is entirely optional and not required for Palette to operate appropriately. + +::: + + + + + + + +## SaaS Network Communications and Ports + +The following ports must be reachable from a network perspective for Palette SaaS to function correctly. + + +![SaaS Network Diagram with ports](/architecture_networking-ports_network-diagram_nats.png "title=SaaS Network Diagram with ports") + + + +#### SaaS Managed + + +![SaaS network diagram displaying the network paths for edge](/architecture_networking-ports_saas-network-diagram-edge_nats.png) + + +The following ports must be reachable from a network perspective for Palette to operate properly. + +## Management Platform + +|Port |Direction|Purpose | +|:---------------|:---------|:-----------------------| +|HTTPS (tcp/443) |INBOUND |Browser/API access to management platform| +|NATS (tcp/4222) |INBOUND |Agent running inside connecting to management platform| + + +## Workload Cluster + + +|Port |Direction | Purpose| +|:---------------|:---------|:--------------| +|HTTPS (tcp/443) |OUTBOUND | API access to management platform| +|NATS (tcp/4222) |OUTBOUND |Registry (packs, integrations), Pack containers, Application Updates| +|NATS (tcp/4222) |OUTBOUND |Registry (packs, integrations), Pack containers, Application Updates| + +:::info + +You can expose inbound port 22 for SSH if you would like to access your cluster nodes for troubleshooting remotely. This is entirely optional and not required for Palette to operate appropriately. + +::: + + +## Self-Hosted Network Communications and Ports + +The following ports must be reachable from a network perspective for Palette self-hosted to function correctly. + + +![On-prem network diagram](/architecture_networking-ports_on_prem_network-diagram.png "#title="network diagram") + +## Management Platform + +|Port |Direction|Purpose | +|:---------------|:---------|:-----------------------| +|HTTPS (tcp/443) |INBOUND |Browser/API access to management platform| +|NATS (tcp/4222) |INBOUND |Message Bus for workload clusters| +|HTTPS (tcp/443) |OUTBOUND |vSphere vCenter API, Registry (packs, integrations), Pack containers, app updates.| +|HTTPS (tcp/6443)|OUTBOUND |Workload K8s cluster API Server| + + +## Workload Cluster + + +|Port |Direction | Purpose| +|:---------------|:---------|:--------------| +|HTTPS (tcp/443) |OUTBOUND | API access to management platform| +|NATS (tcp/4222) |OUTBOUND |Agent communication via message bus | +|HTTPS (tcp/443) |OUTBOUND |vSphere vCenter API, Registry (packs, integrations), Pack containers, Application updates. + +:::info + +You can expose inbound port 22 for SSH if you would like to access your cluster nodes for troubleshooting remotely. This is entirely optional and not required for Palette to operate appropriately. + +::: + + + + diff --git a/docs/docs-content/architecture/orchestration-spectrocloud.md b/docs/docs-content/architecture/orchestration-spectrocloud.md new file mode 100644 index 0000000000..c24b12a0ca --- /dev/null +++ b/docs/docs-content/architecture/orchestration-spectrocloud.md @@ -0,0 +1,67 @@ +--- +sidebar_label: "Order of Operations" +title: "Order of Operations" +description: "The methods of workload cluster provisioning for K8S clusters with Palette" +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +--- + + +Spectro Cloud Palette provisions standard, upstream Kubernetes clusters using [Cluster API](https://cluster-api.sigs.k8s.io/). + +Cluster API is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters. + +Cluster API demonstratively manages the lifecycle of a cluster (creation, scaling, enhancement, and deletion) and helps in automating the process of cluster lifecycle management for platform operations. Cluster API also helps in consistent and repeated cluster deployment across multiple infrastructure environments. + +
+ +## Workload Cluster Provisioning + +![workload_cluster_provisioning.png](/architecture_orchestartion-spectrocloud_provision-flow.png) + + +1. New K8S cluster request from user submitted to the cluster management system. + + +2. Palette creates the Cluster-API (CAPI) custom-resource specifications for the target cloud, e.g in VMware this would translate to: Cluster, vSphereCluster, KubeadmControlPlane (with replica 1), and VSphereMachineTemplate. These resources are created in the management cluster. + + +3. Cluster API and corresponding cluster-api provider, e.g: cluster-api-provider-vsphere, provisions the first control-plane node CP-A on the target cloud. + + +4. When CP-A is operational, the management platform will install a Palette agent into the workload cluster and then perform a pivot of the Cluster API resources. + + +5. CP-A agent will retrieve the latest specifications for the cluster, e.g: 3 control-plane, and 3 workers. CP-A will generate and update the remaining CAPI resources, e.g: update replicas to 3 for KubeadmControlPlane, create the worker's MachineDeployment or VSphereMachineTemplate. Cluster API running in CP-A will provision the remaining control plane and worker nodes. + + +6. The Palette agent will install all the additional add-ons as specified by the cluster's cluster profile (e.g: logging, monitoring, security). + + :::info + + We do not hard code credentials. Palette uses the *cloud-init* process to inject the user-defined SSH keys into the clusters. + + ::: + + +## Why Palette Pivots? + + +Palette's decentralized model is based on a "decentralized management - local policy enforcement" scalable architecture. + + + ![distributed_orchestration.png](/architecture_orchestartion-spectrocloud_distributed-flow.png) + + +As part of the workload K8s cluster provisioning, only the first control-plane node is launched by Cluster API, running in the Palette management cluster. Once the control-plane node is operational, Cluster API resources are _pivoted_ from the management platform into the target workload cluster. + +The target workload cluster is then responsible for provisioning and maintaining the remaining control-plane and worker nodes. All Day-2 operations which result in node changes, including OS/K8s upgrades, scaling, and K8s certificate rotation, are triggered by changes to the Cluster API resources in the target workload cluster. + +Palette pivots these clusters for several reasons, related to scalability and availability: + +* **Scalability** - The management platform scales to meet the demand of all your workload clusters as the number of tenant clusters and nodes increases in size. + +* **Resiliency** - Even if the management platform were to experience an outage, the workload clusters would retain their resiliency capabilities, auto-recovery, launching of new nodes on failures, auto-scaling, and other policies still work! + +* **Intermittent network resiliency** - The design supports use cases where the workload clusters can still operate in intermittent and disconnected network availability situations. diff --git a/docs/docs-content/architecture/palette-namespaces-podes.md b/docs/docs-content/architecture/palette-namespaces-podes.md new file mode 100644 index 0000000000..b8aa1a445f --- /dev/null +++ b/docs/docs-content/architecture/palette-namespaces-podes.md @@ -0,0 +1,144 @@ +--- +sidebar_label: "Namespaces and Pods" +title: "Palette Specific Namespaces and Pods" +description: "Palette specific namespaces and pods mapping" +icon: "" +hide_table_of_contents: false +sidebar_position: 50 +--- + + +The page encompasses the set of Palette specific namespaces and pods belonging to each of these individual namespaces. +The information is organised as follows: + +* Namespace-Pod mapping for Palette Tenant Cluster +* Namespace-Pod mapping for Palette Gateways (PCG) +* Namespace-Pod mapping for Palette On-Prem Enterprise Cluster + +## Palette Tenant Cluster NameSpaces with Pods + +The following table gives the namespace to pod mapping for Palette Tenant Cluster. + +|PALETTE NAMESPACES | PODS | +|------|-------| +|capi-webhook-system |capi-controller-manager-< UUID>| +||capi-kubeadm-bootstrap-controller-manager-< UUID >| +||capi-kubeadm-control-plane-controller-manager-< UUID >| +|cert-manager|cert-manager-< UUID >| +||cert-manager-cainjector-< UUID> | +||cert-manager-webhook-< UUID > | +|cluster-< UUID > | capi-controller-manager-< UUID > +||capi-kubeadm-bootstrap-controller-manager-< UUID > +||capi-kubeadm-control-plane-controller-manager-< UUID > +||capv-controller-manager-< UUID > +||cluster-management-agent-< UUID > +|cluster-< UUID > |metrics-server-< UUID > | +| |palette-controller-manager-< UUID >| +|kube-system |calico-kube-controllers-< UUID > +| |calico-node-< UUID > +| |coredns-< UUID > +| |etcd-vmdynamictest-cp-< UUID >| +| |kube-apiserver-vmdynamictest-cp-< UUID > | +| |kube-controller-manager-vmdynamictest-cp-< UUID > | +| |kube-proxy-< UUID > | +| |kube-scheduler-vmdynamictest-cp-< UUID > | +| |kube-vip-vmdynamictest-cp-< UUID > | +|reach-system |reach-controller-manager-< UUID > | + + + +## Palette PCG NameSpaces with Pods + +The following table gives the namespace to pod mapping for Palette vSphere Gateway. + +|PALETTE NAMESPACES|PODS | +|--|--| +|capi-webhook-system|capi-controller-manager-< UUID > | +| |capi-kubeadm-bootstrap-controller-manager-< UUID > | +| |capi-kubeadm-control-plane-controller-manager-< UUID > | +| |capv-controller-manager-< UUID > | +|cert-manager | cert-manager-< UUID > | +| | cert-manager-cainjector-< UUID > | +| | cert-manager-webhook-< UUID > | | +|cluster-< UUID > |capi-controller-manager-< UUID >| +| |capi-kubeadm-bootstrap-controller-manager-< UUID > | +| |capi-kubeadm-control-plane-controller-manager-< UUID > | +| | capv-controller-manager-< UUID > | +| | capv-static-ip-controller-manager-< UUID > | +| |cluster-management-agent-< UUID > | +| | ipam-controller-manager-< UUID > | metrics-server-< UUID > | +| | palette-controller-manager-< UUID > | +|jet-system | jet-< UUID > | +|| spectro-cloud-driver-< UUID > | +|kube-system |calico-kube-controllers-< UUID > | +| |calico-node-< UUID > | |coredns-< UUID > | | || | coredns-< UUID > | +| | etcd-gateway1-cp-< UUID > | +| | kube-apiserver-gateway1-cp-< UUID > | +| | kube-controller-manager-gateway1-cp-< UUID > | | +| | kube-proxy-< UUID > || +| |kube-scheduler-gateway1-cp-< UUID > || +| | kube-vip-gateway1-cp-< UUID |> +| | vsphere-cloud-controller-manager-< UUID > | | +| | vsphere-csi-controller-< UUID > || +| | vsphere-csi-node-< UUID > || +|reach-system | reach-controller-manager-< UUID > | + +## Enterprise NameSpaces with Pods + +The following table gives the namespace to pod mapping for Palette On-Prem Enterprise Clusters. + +|PALETTE NAMESPACES|PODES| +|--|--| +| capi-webhook-system |capi-controller-manager-< UUID > +| |capi-kubeadm-bootstrap-controller-manager-< UUID > +| |capi-kubeadm-control-plane-controller-manager-< UUID > +| |capv-controller-manager-< UUID > +| |ipam-controller-manager-< UUID > +|cert-manager |cert-manager-< UUID > +| |cert-manager-cainjector-< UUID > +| |cert-manager-webhook-< UUID > +|cluster-mgmt-< UUID >|capi-kubeadm-bootstrap-controller-manager-< UUID > +| |capi-kubeadm-control-plane-controller-manager-< UUID > +| |capv-controller-manager-< UUID > +| |capv-static-ip-controller-manager-9< UUID > +| |cluster-management-agent-< UUID > +| |ipam-controller-manager-< UUID > +| |metrics-server-< UUID > +| |palette-controller-manager-< UUID > +|cp-system| spectro-cp-ui-< UUID > +|hubble-system |auth-< UUID >|auth-< UUID > +| | cloud-fb8< UUID > +| | configserver-< UUID > +| | event-< UUID > +| | hashboard-< UUID > +| | hutil-< UUID > +| | mgmt-< UUID > +| | mongo-0 +| | mongo-1 +| | mongo-2 +| | packsync-1< UUID > +| | spectrocluster-< UUID > +| | system-< UUID > +| | timeseries-< UUID > +| | user-< UUID > +|ingress-nginx |ingress-nginx-admission-create-spwch +| |ingress-nginx-admission-patch-< UUID > +| |ingress-nginx-controller-< UUID > +|jet-system| jet-< UUID > +|kube-system|calico-kube-controllers-< UUID > +| | calico-node-< UUID > +| | calico-node-w< UUID > +| | coredns-< UUID > +| | etcd-vsphere-spectro-mgmt-cp-< UUID > +| | kube-apiserver-vsphere-spectro-mgmt-cp-< UUID > +| | kube-controller-manager-vsphere-spectro-mgmt-cp-< UUID > +| | kube-proxy-bl< UUID > +| | kube-proxy-l< UUID > +| | kube-scheduler-vsphere-spectro-mgmt-cp-< UUID > +| | kube-vip-vsphere-spectro-mgmt-cp-< UUID > +| | vsphere-cloud-controller-manager-< UUID > +| | vsphere-csi-controller-df< UUID > +| | vsphere-csi-node-< UUID > +| | vsphere-csi-node-rhm< UUID > +|nats-system| nas-< UUID > +|ui-system |spectro-ui-< UUID > diff --git a/docs/docs-content/architecture/palette-public-ips.md b/docs/docs-content/architecture/palette-public-ips.md new file mode 100644 index 0000000000..19017fb5fa --- /dev/null +++ b/docs/docs-content/architecture/palette-public-ips.md @@ -0,0 +1,75 @@ +--- +sidebar_label: "IP Addresses" +title: "IP Addresses" +description: "Palette's public IP Addresses." +icon: "" +hide_table_of_contents: false +sidebar_position: 40 +--- + + +In this section, you can find the public IP addresses that support Spectro Cloud SaaS operations. These IP addresses are essential to ensure seamless communication between your infrastructure and our platform. + + +## IP Address Ranges + +Allow the following IP address ranges in your network configuration to enable traffic to and from the Spectro Cloud SaaS platform. + +| **IP Address**| **Region** | +|---|---| +|44.232.106.120 | North West U.S. | +|44.233.247.65 | North West U.S. | +|52.35.163.177 | North West U.S. | +|13.52.68.206 | South West U.S. | +|18.144.153.171 | South West U.S. | +|52.6.49.233 | North East U.S. | +|54.158.209.13 | North East U.S. | +|54.80.29.137 | North East U.S. | + + +## Palette Domains + +Palette uses the following domains for communication between the management platform and the workload cluster. + +
+ +:::caution + +NATS and the associated port, 4222, are deprecated and will be removed in a future release. Starting with Palette 4.0.0, gRPC is used for all communication between the management platform and the workload cluster. + +::: + + + +|Domain |Ports | +|:---------------|:---------| +|api.spectrocloud.com |443 | +|api1.spectrocloud.com |443 | +|api2.spectrocloud.com |443 | +|api3.spectrocloud.com |443 | +|message.spectrocloud.com |443, 4222 | +|message1.spectrocloud.com |443, 4222 | +|message2.spectrocloud.com |443, 4222 | +|message3.spectrocloud.com |443, 4222 | +|msg.spectrocloud.com |443 | +|msg1.spectrocloud.com |443 | +|msg2.spectrocloud.com |443 | +|msg3.spectrocloud.com |443 | +|console.spectrocloud.com |443 | +|proxy.console.spectrocloud.com |443 | +|registry.spectrocloud.com |443 | +|saas-repo.console.spectrocloud.com |443 | +|registry.spectrocloud.com |443 | +|maasgoldenimage-console.s3.amazonaws.com |443 | +|openstackgoldenimage-console.s3.amazonaws.com |443 | +|edgegoldenimage-console.s3.amazonaws.com |443 | +|vmwaregoldenimage-console.s3.amazonaws.com |443 | +| registry1.spectrocloud.com |443 | +| registry2.spectrocloud.com |443 | +| registry3.spectrocloud.com |443 | +| 415789037893.dkr.ecr.us-east-1.amazonaws.com |443 | +| 415789037893.dkr.ecr.us-west-2.amazonaws.com |443 | + + +
+ diff --git a/docs/docs-content/audit-logs/_category_.json b/docs/docs-content/audit-logs/_category_.json new file mode 100644 index 0000000000..0f4d23305c --- /dev/null +++ b/docs/docs-content/audit-logs/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 150 +} diff --git a/docs/docs-content/audit-logs/audit-logs.md b/docs/docs-content/audit-logs/audit-logs.md new file mode 100644 index 0000000000..03393c20a7 --- /dev/null +++ b/docs/docs-content/audit-logs/audit-logs.md @@ -0,0 +1,94 @@ +--- +sidebar_label: "Audit Logs" +title: "Audit Logs" +description: "Learn about auditing in Palette and how to access audit logs." +hide_table_of_contents: false +sidebar_custom_props: + icon: "admin" +--- + + + +The Spectro Cloud management platform application captures audit logs to track the user interaction with the application resources along with the timeline. For certain resources, the system-level modifications are also captured in the audit logs. + +The audit log contains information about the resource and the user who performed the action. The user or the system action on the resource is classified as *Create*, *Update*, and *Delete*. Every resource is categorized as a type that helps the user to scope down the audit logs. + +Audit logs are retained for the last one year. + +## Accessing Audit Logs + +Audits can be accessed for the tenant scope and the project scope. The tenant scope audits show all the activity logs across all projects and tenant actions. The project scope audits show the activity logs for the specific project. + +* The tenant scope audit logs can be accessed in the Spectro Cloud console under the **Admin > Audit Logs**. The user should have the *Tenant Admin* role or at least the `audit.get` and `audit.list` permissions at the tenant scope to access the audit logs. +* The project scope audit logs can be accessed under the **Project** *selection* > **Audit Logs**. The user should have at least the *Project Viewer* role with `audit.get` and `audit.list` permissions for the selected project to access the audit logs. +* Tenant admins (or users with appropriate permissions) can download the audit logs as a *.csv file. + +## Filtering Audit Logs + +The audit logs can be filtered based on user and resource attributes. The following attributes can be used to filter the audit logs: + +* Type - The action type on the resource. +* Resource Type - The resource type. (The resources are grouped based on the type). +* Start Date and End Date - Period range for the audit logs. + +## Adding Update Note + +For certain resources like the Cluster Profile, users can associate a custom update note in addition to the generic audit event log. On a successful save of the Cluster Profile, the user will be prompted to provide an update note about the changes made on the profile. This message will be shown when the user selects an audit log from the list. + +## Pushing the Audit Log to the AWS Cloud Trail + +Spectro Cloud users can now push the compliance, management, operational, and risk audit logs to the AWS CloudTrail. This enables continuous monitoring, security analysis, resource tracking, and troubleshooting of the workload cluster using the event history. + +
+ +:::caution +An AWS account with cloud trail created is the prerequisite. + +The permissions listed need to be enabled for CloudWatch. +::: + +### Permission List + +Ensure that the IAM user or the ROOT user role created should have the following IAM policy included for Amazon CloudWatch: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "logs:DescribeLogGroups", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DeleteLogStream", + "logs:DescribeLogStreams" + ], + "Resource": [ + "" + ] + } + ] +} +``` +### Instructions to Push Cluster Audit Logs to AWS Trails + +* Go to Admin Settings and select Audit Trails. +* Select the wizard ‘Add new Audit Trail’ and fill in the following details: + + * Audit Name: Custom name to identify the logs + * Type: Choice of monitoring service (currently set to AWS Cloud Watch) + * Group: The log group name obtained from cloud watch logs of AWS cloud trail creation + * Region: The region of the AWS account + * Method of verification: + * Credentials: +Use the AWS Access Key and Secret Access Key to validate the AWS account for pushing the Audit log trails from Spectro Cloud console. + * STS: +Use Amazon’s unique resource identifier- ARN, to validate the AWS account for pushing the Audit log trails from Spectro Cloud console. + +* Stream Optional. +* Confirm the information to complete the audit trail creation wizard. +* The audit trail could be edited and deleted using the **three-dot Menu**. + + diff --git a/docs/docs-content/audit-logs/kube-api-audit-logging.md b/docs/docs-content/audit-logs/kube-api-audit-logging.md new file mode 100644 index 0000000000..5455b836d4 --- /dev/null +++ b/docs/docs-content/audit-logs/kube-api-audit-logging.md @@ -0,0 +1,202 @@ +--- +sidebar_label: "Enable Audit Logging" +title: "Enable API Audit Logging" +description: "Learn how to configure the kube-apiserver audit logging feature for Palette." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +--- + + +Kubernetes auditing is a feature of the Kubernetes cluster management system that allows administrators to track and log events within the cluster. Administrators can review actions taken by users and applications and changes to the cluster's configuration or state. By enabling auditing, organizations and system administrators can better understand their users' actions and behaviors. The audit log answers common questions about what, where, when, and who. + +You can also meet internal security control requirements by enabling audit logging. Many security controls require the following capabilities. + +- ensuring administrators can trace the actions of individual users back to a specific person. + + +- to debug an issue where an unknown application is modifying resources + +The guidance on this page is based on the upstream Kubernetes documentation and `kube-apiserver` source code. Follow the steps below to enable audit logging for the Kubernetes API server. + +
+ +:::caution + +Enabling audit logging causes the API server to consume more memory, as it needs to store additional context for each request to facilitate auditing. Memory consumption depends on the audit logging configuration. + +::: + +## Prerequisites + +- Access to a Kubernetes cluster node. + + +- Write access to the file system. + + +- Remote access to the Kubernetes cluster master nodes. + + + +## Enable Auditing + +The Kubernetes API Audit policies define the rules for capturing events and specifying the level of detail to include. +The audit policy you create will capture all requests at the *metadata* level. To learn more about the various audit levels, visit the Kubernetes API [Audit Policy](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/#audit-policy) documentation. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Identify one of your cluster contro-plane nodes. You can find a cluster node by navigating to the left **Main Menu** and selecting **Clusters**. Click on your cluster to access the details pages and click on the **Nodes** tab. The tab contains information about each pool, select a node from the **Master Pool** to view its IP address. + + +3. SSH into one of your control-plane nodes using its IP address and the SSH key you specified during the cluster creation process. + + + +4. From a control-plane node in the target cluster, issue the following command to create your audit policy file. + +
+ + ```bash + cat << EOF > /etc/kubernetes/audit-policy.yaml + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: Metadata + EOF + ``` + The audit log output will be written to a file located at **/var/log/kubernetes/audit/audit.log**. In production environments, you should ensure this log file is ingested by a logging and monitoring application. + + The **/var/log/kubernetes/audit/** directory should be backed by persistent storage to ensure that any unshipped audit logs will not be lost during an unexpected outage of the node. + +
+ +5. Next, you will update the Kubernetes API server manifest file. The manifest file is located in the **/etc/kubernetes/manifests** folder. +Before you modify the manifest file, create a backup copy. + +
+ + ```shell + cp /etc/kubernetes/manifests/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.backup + ``` + +6. Now that you have a backup copy of the manifest file go ahead and open up the file **/etc/kubernetes/manifests/kube-apiserver.yaml** in a text editor such as Vi or Nano. + +
+ + ```shell + vi /etc/kubernetes/manifests/kube-apiserver.yaml + ``` + + Append the following YAML configuration to your kube-apiserver manifest. + +
+ + ```yaml + volumeMounts: + - mountPath: /etc/kubernetes/audit-policy.yaml + name: audit + readOnly: true + - mountPath: /var/log/kubernetes/audit/ + name: audit-log + readOnly: false + volumes: + - name: audit + hostPath: + path: /etc/kubernetes/audit-policy.yaml + type: File + - name: audit-log + hostPath: + path: /var/log/kubernetes/audit/ + type: DirectoryOrCreate + ``` + +7. The next step is to update the Kubernetes API parameters with audit settings. +The top of the file contains the Kubernetes API parameters. Refer to the code snippet below to determine where to place these parameters. + +
+ + ```yaml + spec: + containers: + - command: + - kube-apiserver + - --advertise-address=172.18.0.2 + - --allow-privileged=true + - --authorization-mode=Node,RBAC + ``` + +8. Go ahead and add the following audit parameters under the `- kube-apiserver` line. + +
+ + ```shell + - --audit-policy-file=/etc/kubernetes/audit-policy.yaml + - --audit-log-path=/var/log/kubernetes/audit/audit.log + - --audit-log-batch-max-size=5 + - --audit-log-compress + - --audit-log-format=json + - --audit-log-maxage=30 + - --audit-log-maxbackup=100 + - --audit-log-maxsize=50 + - --audit-log-mode=batch + - --audit-log-truncate-enabled + - --audit-log-truncate-max-batch-size=10485760 + - --audit-log-truncate-max-event-size=102400 + - --audit-log-version=audit.k8s.io/v1 + ``` + +9. Save your changes and exit the file. When you exit the file, the changes will automatically get picked up by the Kubelet process and applied. + +You can also add the following Kubernetes API parameters to fine-tune the audit logging. + +| Parameter | Type | Description | +|-----------------------------------|----------|-------------------------------------------------------------------------------------------------------------------------------| +| `--audit-log-batch-max-wait` | duration | The amount of time to wait before force writing the batch that hadn't reached the max size. Only used in batch mode. Ex: `"5s"` | +| `--audit-log-batch-throttle-enable` | boolean | Whether batching throttling is enabled. Only used in batch mode. | +| `--audit-log-batch-throttle-qps` | float | The maximum average number of batches per second. Only used in batch mode | + + +To learn more about each of the Kubernetes API server flags, visit the Kubernetes API parameter [documentation page](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/). + +## Validate + +You can validate that audit logs are captured by navigating to the specified audit folder in the `--audit-log-path` parameter. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Identify one of your cluster contro-plane nodes. You find a cluster node by navigating to the left **Main Menu** and selecting **Clusters**. Click on your cluster to access the details pages and click on the **Nodes** tab. The tab contains information about each pool, select a node from the **Master Pool** to view its IP address. + + +3. SSH into one of your control-plane nodes using its IP address and the SSH key you specified during the cluster creation process. + + + +4. From a control-plane node in the target cluster, you can validate that audit logs are captured by reviewing the audit log file in the specified audit folder you specified in the `--audit-log-path` parameter. + + +5. Display the audit file content by using the following command. Replace the file path with the audit folder you specified in the `--audit-log-path` parameter. + +
+ + ```shell + cat /var/log/kubernetes/audit/audit.log + ``` + + Example Output. + ```shell hideClipboard + {"kind":"Event","apiVersion":"audit.k8s.io/v1","level":"Metadata","auditID":"3cb20ec3-e944-4059-873c-078342b38fec","stage":"ResponseComplete","requestURI":"/apis/coordination.k8s.io/v1/namespaces/cluster-63a1ee9100663777ef2f75c8/leases/kubeadm-bootstrap-manager-leader-election-capi","verb":"update","user":{"username":"system:serviceaccount:cluster-63a1ee9100663777ef2f75c8:palette-manager","uid":"e728f219-d5e8-4a44-92c4-5ddcf22ce476","groups":["system:serviceaccounts","system:serviceaccounts:cluster-63a1ee9100663777ef2f75c8","system:authenticated"],"extra":{"authentication.kubernetes.io/pod-name":["capi-kubeadm-bootstrap-controller-manager-688596bc4b-pxmmh"],"authentication.kubernetes.io/pod-uid":["a0e9a0fd-0812-434e-a1a4-b8af9bb98a87"]}},"sourceIPs":["192.168.161.18"],"userAgent":"manager/v0.0.0 (linux/amd64) kubernetes/$Format/leader-election","objectRef":{"resource":"leases","namespace":"cluster-63a1ee9100663777ef2f75c8","name":"kubeadm-bootstrap-manager-leader-election-capi","uid":"8e70db1f-a26c-4af5-a558-78e860ae9903","apiGroup":"coordination.k8s.io","apiVersion":"v1","resourceVersion":"13660827"},"responseStatus":{"metadata":{},"code":200},"requestReceivedTimestamp":"2023-01-18T20:35:29.755649Z","stageTimestamp":"2023-01-18T20:35:29.760586Z","annotations":{"authorization.k8s.io/decision":"allow","authorization.k8s.io/reason":"RBAC: allowed by ClusterRoleBinding "palette-manager-admin-rolebinding" of ClusterRole "cluster-admin" to ServiceAccount "palette-manager/cluster-63a1ee9100663777ef2f75c8""}} + ``` + +## Resources + +- [Kubernetes API parameters](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) + + +- [Kubernetes Auditing Documentation](https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/) + +
diff --git a/docs/docs-content/cluster-profiles/_category_.json b/docs/docs-content/cluster-profiles/_category_.json new file mode 100644 index 0000000000..e7e7c54966 --- /dev/null +++ b/docs/docs-content/cluster-profiles/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 40 +} diff --git a/docs/docs-content/cluster-profiles/byoos/_category.json b/docs/docs-content/cluster-profiles/byoos/_category.json new file mode 100644 index 0000000000..ae9ddb024d --- /dev/null +++ b/docs/docs-content/cluster-profiles/byoos/_category.json @@ -0,0 +1,3 @@ +{ + "position": 50 +} diff --git a/docs/docs-content/cluster-profiles/byoos/byoos.md b/docs/docs-content/cluster-profiles/byoos/byoos.md new file mode 100644 index 0000000000..26dee6e42e --- /dev/null +++ b/docs/docs-content/cluster-profiles/byoos/byoos.md @@ -0,0 +1,36 @@ +--- +sidebar_label: "Bring Your Own OS (BYOOS)" +title: "Bring Your Own OS (BYOOS)" +description: "Learn how to use your own OS images with a cluster profile" +icon: "" +hide_table_of_contents: false + +--- + + +With Palette, you can bring your own operating system and use it with your Kubernetes clusters using the [Bring Your Own Operating System (BYOOS)](../../glossary-all.md#bringyourownoperatingsystem(byoos)) feature. The BYOOS pack allows you to upload your own OS images, configure the necessary drivers, and customize the OS to meet your specific requirements. + + + +Bringing your own operating system provides several benefits, including the ability to control your own dependencies, improve performance, and ensure compatibility with your existing applications. With BYOOS, you can choose the OS that best fits your needs, whether it's a commercial or open-source distribution, and integrate it with your Kubernetes clusters. + + + +The BYOOS feature is especially useful for enterprises and organizations that have strict requirements around security, compliance, or specific hardware configurations. With the ability to bring your own OS, you can ensure that your Kubernetes clusters meet these requirements, without compromising on performance or functionality. + + + +BYOOS in Palette gives you greater flexibility, control, and customization options when managing your Kubernetes clusters. You can tailor your OS to your specific needs, ensuring your clusters perform optimally and meet your organization's unique requirements. + + + +To learn more about BYOOS, use the following resources to learn more. + +## Resources + +- [Create Images with Image Builder](image-builder.md) + + +- [BYOOS Pack](../../integrations/byoos.md) + +
\ No newline at end of file diff --git a/docs/docs-content/cluster-profiles/byoos/image-builder.md b/docs/docs-content/cluster-profiles/byoos/image-builder.md new file mode 100644 index 0000000000..c4914bd3f3 --- /dev/null +++ b/docs/docs-content/cluster-profiles/byoos/image-builder.md @@ -0,0 +1,312 @@ +--- +sidebar_label: "Create Images with Image Builder" +title: "Create Images with Image Builder" +description: "Learn how to use the Image Builder project to create images for Palette" +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +--- + + +You can create and deploy custom images to most infrastructure providers using various tools. Many infrastructure providers have tools that you can use to create custom images for the platform, such as [AWS EC2 Image Builder](https://aws.amazon.com/image-builder/) for AWS or [Azure VM Image Builder](https://azure.microsoft.com/en-us/products/image-builder) for Azure. You can also use platform agnostic tools, such as [HashiCorp Packer](https://developer.hashicorp.com/packer), or something more tailored to Kubernetes, such as the [Kubernetes Image Builder](https://image-builder.sigs.k8s.io/introduction.html) (KBI) project. + + +## Kubernetes Image Builder + +KIB is a project designed to help users create images for various platforms. The project is a consolidation of multiple tools that together work to create an artifact, or in simpler terms, a custom image. + +You can use the custom images created by KIB with Palette, assuming the infrastructure provider is supported in Palette. Use the following diagram to understand how you can use KIB to create custom images that you can use with Palette. + +![A diagram displaying the steps for creating a custom image](/cluster-profiles_byoos_image-builder_workflow-diagram.png)
+ +1. You will download the KIB project and configure the image builder's **packer.json** file. + + +2. Use the `make` command to create a custom image containing a specific Operating System (OS) version and flavor. + + +3. The custom image is created and distributed to the target regions you specified in the **packer.json** file. + + +4. Create a cluster profile pointing to your custom image. + + +5. Deploy a host cluster using your cluster profile containing the custom image. + + +This guide will teach you how to use the Kubernetes Image Builder to create images for your infrastructure provider so that you can use the custom image in a cluster profile. + +### Prerequisites + + +* Palette v3.4.0 or greater. + + +* [Git](https://git-scm.com/downloads) v2.39.1 or greater. + + +* Access credentials to the target infrastructure provider. KBI, through the help of Packer, deploys a compute instance to the target environment during the image creation process. + + +* The cloud provider you choose may have different requirements. Review the KIB [documentation](https://image-builder.sigs.k8s.io/capi/capi.html) for your provider to learn more about the provider prerequisites. + + +* [HashiCorp Packer](https://developer.hashicorp.com/packer/tutorials/docker-get-started/get-started-install-cli) installed v1.8.6 or greater. + +
+ +:::caution + +To use a commercial OS, you must provide the license before starting the image creation process. + +::: + +### Create an Image + +The following steps will guide you through creating your image. You will create a custom Red Hat Enterprise Linux (RHEL) for Amazon Web Services (AWS). RHEL is a commercial product, so you will need license subscription credentials, but you can use the same steps for a non-RHEL image. The critical point to take away in this guide is using KIB to create the image. + +
+ +1. Clone the KIB repository. + +
+ + + + + ```shell + git clone https://github.com/kubernetes-sigs/image-builder.git + ``` + + + + + ```shell + git clone git@github.com:kubernetes-sigs/image-builder.git + ``` + + + + + +2. Switch the directory into the image builder folder. + +
+ + ```shell + cd image-builder/images/capi + ``` + +3. Open up the image builder [documentation site](https://image-builder.sigs.k8s.io/introduction.html) in your web browser and review the steps for the infrastructure provider you want to build an image for. + + + +4. If you are using a commercial OS such as RHEL, set the required environment variables per the KIB documentation. For RHEL, the following environment variables are required. Replace the placeholder values with your actual credentials. + +
+ + ```shell + export RHSM_USER=REPLACE_ME + export RHSM_PASS=REPLACE_ME + ``` + + If you want to debug the Packer compute instance in case of an error, set the following environment variable. The Packer flag will allow you to remote connect to the instance versus Packer's default behavior of terminating the instance. + +
+ + ```shell + export PACKER_FLAGS=-on-error=ask + ``` + +5. Navigate to the **packer** folder and open up the folder for the target infrastructure provider. Review the file **packer.json**. Make any configuration changes you desire, such as the Kubernetes version, cloud credentials, network settings, instance size, image regions etc. You must make changes in the file's `variables` section. Only a condensed version of the 'variables' object below is used for illustrative purposes to enhance the reader's experience. + +
+ + ```json hideClipboard + "variables": { + ... + "ami_groups": "", + "ami_regions": "us-east-1, us-west-2", + "ami_users": "", + "ansible_common_vars": "", + "ansible_extra_vars": "", + "ansible_scp_extra_args": "", + "ansible_user_vars": "", + "aws_access_key": "", + "aws_profile": "", + "aws_region": "us-east-1", + "aws_secret_key": "", + "aws_security_group_ids": "", + "aws_session_token": "", + "build_timestamp": "{{timestamp}}", + "builder_instance_type": "m5.xlarge", + .... + }, + ``` + +
+ + :::info + + The file **packer.json** contains many variables you can use to customize the image. We recommend you review the KIB [documentation](https://image-builder.sigs.k8s.io/capi/capi.html) for your provider as it explains each variable. + + ::: + + + +6. Set the credentials for your infrastructure provider. Each infrastructure provider supports different methods for providing credentials to Packer. You can review each infrastructure provider's authentication section by visiting the [Packer plugins site](https://developer.hashicorp.com/packer/plugins) and selecting your provider on the left **Main Menu**. + + + +7. Next, find the `make` command for your provider. You can use the following command to get a list of all available RHEL options. Replace the `grep` filter with the provider you are creating an image for. + +
+ + ```shell + make | grep rhel + ``` + + Output: + ```shell hideClipboard + build-ami-rhel-8 Builds RHEL-8 AMI + build-azure-sig-rhel-8 Builds RHEL 8 Azure managed image in Shared Image Gallery + build-azure-vhd-rhel-8 Builds RHEL 8 VHD image for Azure + build-node-ova-local-rhel-7 Builds RHEL 7 Node OVA w local hypervisor + build-node-ova-local-rhel-8 Builds RHEL 8 Node OVA w local hypervisor + ... + ``` + +8. Issue the `make` command that aligns with your target provider. In this example, `build-ami-rhel-8 ` is the correct command for an RHEL AWS AMI creation. + +
+ + ```shell + make build-ami-rhel-8 + ``` + + Output: + ```shell hideClipboard + amazon-ebs.{{user `build_name`}}: output will be in this color. + + ==> amazon-ebs.{{user `build_name`}}: Prevalidating any provided VPC information + ==> amazon-ebs.{{user `build_name`}}: Prevalidating AMI Name: capa-ami-rhel-8-v1.24.11-1683320234 + amazon-ebs.{{user `build_name`}}: Found Image ID: ami-0186f9012927dfa39 + ==> amazon-ebs.{{user `build_name`}}: Creating temporary keypair: packer_64556dab-95d3-33e6-bede-f49b6ae430cb + ==> amazon-ebs.{{user `build_name`}}: Creating temporary security group for this instance: packer_64556dae-fb5a-3c7d-2106-1c8960c6d60e + ==> amazon-ebs.{{user `build_name`}}: Authorizing access to port 22 from [0.0.0.0/0] in the temporary security groups... + ==> amazon-ebs.{{user `build_name`}}: Launching a source AWS instance... + amazon-ebs.{{user `build_name`}}: Instance ID: i-06a8bf22b66abc698 + .... + ``` + +9. Once the build process is complete, note the image ID. + +
+ + ```shell hideClipboard + Build 'amazon-ebs.{{user `build_name`}}' finished after 22 minutes 29 seconds. + + ==> Wait completed after 22 minutes 29 seconds + + ==> Builds finished. The artifacts of successful builds are: + --> amazon-ebs.{{user `build_name`}}: AMIs were created: + us-east-1: ami-0f4804aff4cf9c5a2 + + --> amazon-ebs.{{user `build_name`}}: AMIs were created: + us-east-1: ami-0f4804aff4cf9c5a2 + ``` + + +10. Login to [Palette](https://console.spectrocloud.com). + + + +11. Navigate to the left **Main Menu** and select **Profiles**. + + + +12. Click on the **Add Cluster Profile** to create a new cluster profile that uses your new custom image. + + + +13. Fill out the inputs fields for **Name**, **Description**, **Type** and **Tags**. Select the type **Full** and click on **Next**. + + +14. Select your infrastructure provider. In this example, **AWS** is selected. + + + +15. Select the **BYOOS** pack. Use the following information to find the BYOOS pack. + +* Pack Type: OS +* Registry: Public Repo +* Pack Name: Bring Your Own OS (BYO-OS) +* Pack Version: 1.0.x or higher + +16. Update the pack YAML to point to your custom image. You can use the tag values Packer assigns to the image to help you identify the correct value to provide the pack YAML. In the example output below, the tag values `distribution_version` and `distribution` are used to determine the correct values for the YAML. + +
+ + ```shell hideClipboard + ==> amazon-ebs.{{user `build_name`}}: Creating AMI tags + amazon-ebs.{{user `build_name`}}: Adding tag: "build_date": "2023-05-10T17:19:37Z" + amazon-ebs.{{user `build_name`}}: Adding tag: "build_timestamp": "1683739177" + amazon-ebs.{{user `build_name`}}: Adding tag: "kubernetes_cni_version": "v1.2.0" + amazon-ebs.{{user `build_name`}}: Adding tag: "source_ami": "" + amazon-ebs.{{user `build_name`}}: Adding tag: "containerd_version": "1.6.20" + amazon-ebs.{{user `build_name`}}: Adding tag: "distribution_release": "Enterprise" + + amazon-ebs.{{user `build_name`}}: Adding tag: "distribution": "rhel" + amazon-ebs.{{user `build_name`}}: Adding tag: "image_builder_version": "" + amazon-ebs.{{user `build_name`}}: Adding tag: "kubernetes_version": "v1.24.11" + + amazon-ebs.{{user `build_name`}}: Adding tag: "distribution_version": "8 + ``` + + In this example, the YAML is updated to point to the RHEL image created earlier. Use the table below to learn more about each variable. + +
+ + | **Parameter** | **Description** | **Type** | + |---|----|----| + | `osImageOverride` | The ID of the image to use as the base OS layer. This is the image ID as assigned in the infrastructure environment it belongs to. Example: `ami-0f4804aff4cf9c5a2`. | string| + | `osName` | The name of the OS distribution. Example: `rhel`. | string | + | `osVersion`| The version of the OS distribution. Example: `8` | string| + +
+ + ```yaml + pack: + osImageOverride: "ami-0f4804aff4cf9c5a2" + osName: "rhel" + osVersion: "8" + ``` + + + ![View of the cluster profile wizard](/clusters_byoos_image-builder_cluster-profile-byoos-yaml.png) + + +17. Click on **Next layer** to add the Kubernetes layer. + + +18. Select the desired Kubernetes distribution and version. Click on the **** button to reveal the YAML editor. + + +19. Complete the remainder of the cluster profile creation wizard by selecting the next cluster profile layers. + +You now have a cluster profile that uses the custom image you created using the [Kubernetes Image Builder](https://image-builder.sigs.k8s.io/introduction.html) project. + +
+ +:::caution + +When deploying a host cluster, choosing the appropriate cloud provider and region where the image was distributed is critical to successfully launching a cluster using a custom image in the cluster profile. Failure to do so may result in Palette's inability to launch a cluster. + +::: + +### Validate + +Use the following steps to validate your custom image. + +1. You can validate that the custom image is working correctly by deploying a compute instance in the respective infrastructure provider you created the image in using the custom image. Review the compute instance logs to learn more about the problems if you encounter any issues. + + +2. Next, deploy a host cluster that uses the cluster profile you created containing the custom image. Verify the cluster is deployed correctly and without any issues. If you encounter any problems, review the event logs of the cluster to gain more details about the issue. Check out the [Deploy a Cluster](../../clusters/public-cloud/deploy-k8s-cluster.md) tutorial for additional guidance on deploying a host cluster. \ No newline at end of file diff --git a/docs/docs-content/cluster-profiles/cluster-profile-import-export.md b/docs/docs-content/cluster-profiles/cluster-profile-import-export.md new file mode 100644 index 0000000000..524c6f42d9 --- /dev/null +++ b/docs/docs-content/cluster-profiles/cluster-profile-import-export.md @@ -0,0 +1,92 @@ +--- +sidebar_label: "Import Export Cluster Profiles" +title: "Import Export Cluster Profiles" +description: "The method for importing and exporting Cluster Profile on Spectro Cloud" +icon: "" +hide_table_of_contents: false +sidebar_position: 30 +--- + + +Palette enables cluster profiles to be exported and then imported across multiple environments, projects and tenants. This smoothens the reuse and sharing of huge profiles with large number of add-ons and integrations. + +## Prerequisites + +* [Export](#export-cluster-profile) the cluster profile file in JSON format from Palette. + + +* The packs in the exported profile should be available in the target environment during import. + + +* The `macros` used in the exported profile should be available in the target environment during import. If not [create the macros](../clusters/cluster-management/macros.md#create-your-macro) at the target environment. + +## Use Cases + + +The Export/Import Cluster Profile use cases: + +
+ +* Export / Import use case is most suitable for different environments like stage & dev saas setups. + +## Export Cluster Profile + +To Import Palette cluster profiles the existing profile needs to be first exported as json file from Palette. To export follow the steps as below: + +
+ +* As a `Tenant` or `Project` administrator login to Palette. + + +* Select the `Profiles` option from the left ribbon menu. + + +* Select `Cluster Profiles` option from the top menu. + + +* From the listed cluster profiles, select the profile to be exported. + + +* From the profile details page, click `Export profile`. + + +* The profile will be downloaded as json file to the system. + + +* Save the downloaded file for import. + +:::info +While exporting the profile, the sensitive pack values will be masked and must be updated during import. +::: + +## Import Cluster Profile + + +To import a cluster profile: + +
+ +1. As a `Tenant` or `Project` administrator login to the Palette console. + + +2. Select the `Profiles` option from the left ribbon menu. + + +3. Select `Cluster Profiles` option from the top menu. + + +4. To import an existing cluster profile, click on `Import Cluster Profile`. + + +5. In the import cluster profile wizard, + * Click the `Upload file` button to upload the already exported profile JSON file. + * Validate the file contents to avoid duplicate profile names and versions. In the case of a profile already existing with the same name and version combination, an error message is displayed. Customize the name or version number to avoid conflicts and ambiguities. + * Once the file contents are validated, a wizard to `Select Repositories` is open, If there are multiple repositories with the imported profile packs at the destination. Select the repository from which the packs need to be fetched from the UI drop down and confirm. + * Once all the information is provided, confirm the profile creation process to have the profile created and listed. This profile can be used in the same way as any cluster profile for every cluster operations such as deployments, updates and so on. + +:::info + +If there is only a single repository where the imported packs are present within the destination, the `Select Repositories` option will not appear. + +::: + diff --git a/docs/docs-content/cluster-profiles/cluster-profiles.md b/docs/docs-content/cluster-profiles/cluster-profiles.md new file mode 100644 index 0000000000..8acc8882b6 --- /dev/null +++ b/docs/docs-content/cluster-profiles/cluster-profiles.md @@ -0,0 +1,66 @@ +--- +sidebar_label: "Cluster Profiles" +title: "Understanding Cluster Profiles" +description: "Understanding the Cluster Profiles Concept and how they make Spectro Cloud powerful" +hide_table_of_contents: false +sidebar_custom_props: + icon: "bundles" +--- + + + +# Overview + + +[Cluster Profiles](../glossary-all.md#cluster-profile) are like templates that are created with preconfigured layers/components that are required for +workload cluster deployments. Cluster Profiles provide a way for driving consistency across workload cluster +deployments. You can create as many profiles as required. + +A Cluster Profile can contain an environment-specific configuration and can be created to meet specific types of Workload Cluster deployment needs. As an example, you may create a Development Cluster Profile with a very basic configuration or a Production Cluster Profile with additional security, monitoring, and logging layers. + +You may also build Special Purpose Profiles to deploy Workload Clusters for use cases such as AI/ML or High Performance Computing (HPC). Cluster Profiles can be created to be of the type Core Infra, Add-on, or Full. + +![Cluster Profile Types](/cluster_profiles.png) + +Cluster creation requires an Infrastructure or Full Cluster Profile to be selected, and optionally, one or more Add-on profile(s). The same Add-on Layer category may exist in more than one of the Add-on profiles. The Profile would then read, for example: OS, Kubernetes, Networking, Storage, Monitoring, Ingress. + +## Layers + +Cluster Profile Layers are built using content packages which contain integration-specific templates, charts, and manifest. These content packages can either be of two types: + + * **Palette Packs** - These content packages are built using Spectro Cloud's proprietary content format. Spectro Cloud maintains a public registry of Palette Packs that are available to all Tenants. + + + * **Helm Charts** - These charts are a collection of Kubernetes resource files capable of deploying services ranging in varying complexities. Palette provides a few stable public Helm registries out of the box. Tenants can also add any public or private Helm registries to leverage charts from those registries. Palette promotes Container Storage Interface (CSI) and Container Network Interface (CNI) layers to be added as Helm Charts from customized Helm registries and linked to Spectro Registry packs. + +## Core Infrastructure Cluster Profile + +A **Core Infrastructure Cluster Profile** is constructed using the four Core Infrastructure layers: the OS, Kubernetes, the networking, and the storage layers. These profiles are environment specific and are constructed using cloud-specific layers. + +![Core Infra Profile - Azure](/cluster_profile_azure.png) + +## Add-On Cluster Profile + +An **Add-on Cluster Profile** consists of various integrations and can be constructed using layers such as: + +- System apps +- Authentication +- Security +- Monitoring +- Logging +- Ingress +- Load balancer +- Helm Charts + +![Add-On Profile](/addon_profile.png) + +## Full Cluster Profile + +A **Full Cluster Profile** consists of the Core Infrastructure layers and as many additional Add-on layers as required. The Core Infrastructure layers are cloud specific. + +![Full Cluster Profile](/full_profile.png) + +The next sections provide the details of creating and managing Cluster Profiles. + + +
diff --git a/docs/docs-content/cluster-profiles/create-add-on-profile.md b/docs/docs-content/cluster-profiles/create-add-on-profile.md new file mode 100644 index 0000000000..39e7c76083 --- /dev/null +++ b/docs/docs-content/cluster-profiles/create-add-on-profile.md @@ -0,0 +1,135 @@ +--- +sidebar_label: "Create an Add-on Profile" +title: "Create an Add-on Profile" +description: "Learn how to create an add-on cluster profile." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +--- + + +Add-on cluster profiles offer a range of benefits for workload cluster deployments. These profiles provide enhanced functionality by allowing the addition of various layers such as system apps, authentication, security, monitoring, logging, ingress, and load balancers to the cluster. + +This capability allows you to customize and configure clusters based on specific requirements. Add-on cluster profiles follow a modular approach, making managing and maintaining cluster configurations more flexible. Add-on profiles also promote reusability, allowing profiles to be used across multiple environments, projects, and tenants. Additionally, add-on cluster profiles support integration-specific templates, charts, and manifests, providing flexibility and customization options for workload cluster deployments. + + +## Pack Labels and Annotations + +You can specify Namespace labels and annotations to Add-on packs, and packs that are for Container Storage Interfaces (CSI) and Container Network Interfaces (CNI) drivers. These labels and annotations are applied to the Namespace that the pack is deployed to, or to a specific Namespace if specified. You can apply labels and annotations to the pack's YAML file. + +The following parameters are available for specifying Namespace labels and annotations: + +| **Parameter** | **Description** | **Type** | +| --- | --- | --- | +| `namespace` | The Namespace that the pack is deployed to. If the Namespace does not exists, then Palette will create the Namespace. | string | +| `additionalNamespaces`| A list of additional Namespaces that Palette will create. | map | +| `namespaceLabels` | A list of key-value pairs for labels applied to the Namespace. | map | +| `namespaceAnnotations` | A list of key-value pairs for annotations applied to the Namespace. | map | + + + +The following example shows how to specify Namespace labels and annotations for an Add-on Pack, a CSI pack, and a CNI pack. In the example pack YAML configuration, the `wordpress` Namespace is created. An additional Namespace titled `wordpress-storage` is also created. In the parameters sections, `namespaceLabels` and `namespaceAnnotations`, each entry has a key and a value. The key is the name of the target Namespace, and the value is the value of the label or annotation. + + +
+ +```yaml +pack: + namespace: "wordpress" + additionalNamespaces: + "wordpress-storage" + + namespaceLabels: + "monitoring": "org=spectro,team=dev" + "wordpress-storage": "user=demo-user" + "default": "user=demo-user" + + namespaceAnnotations: + "monitoring": "monitoring.io/enable=true" + "wordpress-storage": "storage.metrics.io/format=json" +``` + + + + +## Create an Add-on Profile + +Use the following steps to learn how to create an add-on cluster profile. + + +### Prerequisites + +* Your Palette account role must have the `clusterProfile.create` permission to create an Add-on cluster profile. Refer to the [Cluster Profile](../user-management/palette-rbac/project-scope-roles-permissions.md#clusterprofile) permissions documentation for more information about roles and permissions. + + +### Create Steps + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Profiles**. + + +3. Click on **Add Cluster Profile**. + + +4. Fill out the following input values and ensure you select **Add-on** for the type. Click on **Next** to continue. + +
+ + | Field | Description | + |----|----| + | **Name**| The name of the profile. | + |**Description**| Use the description to provide context about the profile. | + | **Version**| Assign a version to the profile. The default value is `1.0.0`. | + | **Type**| **Add-on** | + | **Tags**| Assign any desired profile tags you want. | + + +5. Select the type of layer to add to the cluster profile. + +
+ + | Type | Description | + |---|---| + | **Pack** | A pack is a collection of files and configurations that can be deployed to a cluster to add functionality or customize the cluster's behavior.| + | **Helm**| You can specify a Helm chart as a layer in an add-on profile.| + | **Manifest**| A manifest is a Kubernetes configuration file that describes the desired state of a Kubernetes resource, such as deployment, service, or pod and is used to create or modify that resource in a cluster.| + + + + +6. Depending on your selected type, fill out the required input fields and click on **Confirm & Create**. + + +
+ + ![A view of the manfiest create process and the YAML code in the text editior](/clusters_imported-clusters_attach-add-on-profile_manfest-view.png) + +
+ +7. If you want to add additional layers, repeat steps five and six. Otherwise, click on **Next** to review the profile. + + +8. Click on **Finish Configuration** to create the cluster profile. + + + +You now have an add-on cluster profile. You can reuse the profile and apply it to several clusters. You can also update a cluster profile and decide what clusters to apply the new version to. Refer to the [Update Cluster Profile](../cluster-profiles/task-update-profile.md) guide for more information about update operations. + + +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. Navigate to left **Main Menu** and select **Profiles**. + + + +3. Select your cluster profile to review its layers or make changes. + + +
diff --git a/docs/docs-content/cluster-profiles/examples.md b/docs/docs-content/cluster-profiles/examples.md new file mode 100644 index 0000000000..331ee4d8ba --- /dev/null +++ b/docs/docs-content/cluster-profiles/examples.md @@ -0,0 +1,43 @@ +--- +sidebar_label: "Cluster Profile Examples" +title: "Cluster Profile Examples" +description: "The method for creating a Cluster Profile for AWS on Spectro Cloud" +icon: "" +hide_table_of_contents: false +sidebar_position: 40 +--- + + +Cluster profiles can be built to launch clusters for specific use cases. Clusters launched for development purposes are typically minimal and do not require advanced integrations. Production clusters on the other hand tend to be more comprehensive with many more integrations. The following are examples of cluster profiles built for development and production purposes: + + + + + +## Development Cluster Profile + +![Development Profile](/development.png) + +* All layers are built with smart tags to enable automatic upgrades of clusters to the newest releases. +* Kubernetes dashboard is the only integration enabled. + + + + + +## Production Cluster Profile + +![Production Profile](/production.png) + +* All layers are pinned to specific versions +* Automatic upgrades are disabled +* Centralized logging enabled - Elastic Search, Fluentd, Kibana +* Centralized monitoring enabled - Prometheus, Grafana +* Runtime-security enabled - Sysdig Falco +* Service observability enabled - Istio +* Role-based access control enabled - Permissions Manager +* Load balancer to expose services externally - MetalLB + + + + diff --git a/docs/docs-content/cluster-profiles/task-define-profile.md b/docs/docs-content/cluster-profiles/task-define-profile.md new file mode 100644 index 0000000000..a90818f451 --- /dev/null +++ b/docs/docs-content/cluster-profiles/task-define-profile.md @@ -0,0 +1,128 @@ +--- +sidebar_label: "Create a Cluster Profile" +title: "Create a Cluster Profile" +description: "Learn how to create a cluster profile in Palette." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +--- + + + + +## Basic Information and Core Layers + +Cluster profiles are created by configuring various layers of the Kubernetes infrastructure stack. To create a **New Cluster Profile**, follow these steps: + +1. Provide the **Basic Information** such as: + + |**Parameter** |**Description** | + |---------|---------| + |**Name** | Give a name for the new cluster. | + |**Version** | Include the [Cluster Profile Version](#clusterprofileversioning) number for the cluster under which the cluster profile needs to be created. See below for more information. | + |**Description** | Provide quick description of your cluster. This is optional. | + |**Profile Type (Full, Infrastructure, Add-on)**| Dictates the layers that can be configured in the cluster profile. If the cluster profile type is Infrastructure or Full, you are able to select a Cloud Type or Data Center environments. For more information on Add-on types go to step four. | + |**Tags** | Tags on a cluster profile are propagated to the VMs deployed on the cloud/data center environments when clusters are created from the cluster profile. This is optional. | + + +2. In the **Cloud Type** section, select the **Environment** you are working with. This list displays the environments supported in Palette. + + +3. Configure the **Profile Layers** of the infrastructure stack. The following layers are considered **Core Infrastructure** layers. Configuring these layers is mandatory for Full or Infrastructure cluster profiles. + + **Note**: These layers are not configurable for **Add-On** cluster profiles: + + - OS + - Kubernetes + - Network + - Storage + + Select the **Registry**, **Pack Name**, **Pack Version**, and **Pack Values** and click on **Next Layer** to go through each profile layer to completely build the core infrastructure. + + **Note**: Container Storage Interface (CSI) and Container Network Interface (CNI) layers can be added as Helm Charts from customized Helm registries and linked to Spectro Registry packs. + + +4. **Add-on Layers** are additional layers such as **Monitoring**, **Security**, **Load Balancers**, **Ingress**, **Logging**, **Monitoring**, **Security**, **Authentication**, **Service Mesh** etc. may be added and configured as desired. These layers may be configured for the profiles of the type **Full** or **Add-On**. These add-on layers can be added in one of the following ways: + + + + Add New Pack - Add a Palette Pack from a pack registry or a Helm Chart from a chart registry. The public Palette Pack registry and a few popular Helm chart repositories are already available out of the box. Additional pack registries or public/private chart registries can be added to Palette. + + + + + + Import from cluster - Charts can be discovered from an existing Kubernetes Cluster. One or more of these discovered charts can be added to the cluster profile. During discovery, charts discovered from a cluster may not be available in any of the chart repositories available with Palette. Users can provide the registry information on hosting these charts during the import process to complete addition of such charts. + + + + + Add Manifest - Layers can be constructed using raw manifests to provision Kubernetes resources that are not available via Palette or Charts. Pack Manifests provide a pass through mechanism wherein additional Kubernetes resources can be orchestrated on to a cluster along with rest of the stack. + + + + +
+ Configure each layer as follows: + + + + + Versions- Choose the desired version. Choices include pinning to a specific version (e.g. 1.1.1) or picking a major or minor train such as 1.x or 1.1.x. Picking a major/minor train results in a dynamic version association. The latest release from that train is linked to the pack at any given point. Future release updates on the train will result in the pack being relinked to the newest version. This allows clusters to always be at the latest released version, without having to make subsequent updates to the profile. + + + + + Configuration Parameters - The configuration option and version selected might provide configuration parameters to provide granular control or fine-tune certain aspects of the functionality. For the packs provided out of the box, the configuration parameters are set to values based on common best practices. Users may override these parameters as desired. Additionally, for certain layers, Palette provides a bunch of presets to quickly enable or configure a feature within the add-on. These presets are a group of properties presets with defaults to provide a quick and easy way to modify a set of relevant properties. If available, users can also enable one or more presets as appropriate. + + + + + Manifest - Attach additional manifests to the layer if desired. Attached manifests provide a way for provisioning additional Kubernetes resources that support an integration or an add-on. Certain integrations offered through packs or charts, may require creation of resources like Secrets or CustomResourceDefinition (CRDs) in order to complete the installation end to end. This can be achieved by adding one or more Attach Manifests to the layer. + + + + + +
+ +---- + +Palette allows users to deploy the same pack to multiple layers which can be required in certain scenarios, where an integration needs to be installed multiple times with different configuration. As an example, you may have two or more applications in the profile that need to use the Postgres database. You will be required to launch the Postgres database twice in this case with different configurations. + +In order to allow packs to be added multiple times in a profile, add the `spectrocloud.com/display-name: ` key to the pack values in the YAML editor. The key `` is a name unique across a cluster profile and the cluster. + + **Example:** + +
+ + ```yaml hideClipboard + pack: + namespace: "external-dns" + spectrocloud.com/display-name: "dns-1" + ``` + + + If the same pack is needed at another layer, repeat the above block with the same namespace but a different name such as `dns-2`. Display names used for a pack across layers should be unique. + +
+ +By default Palette uses Helm chart release name in the format packName-chartName. In cases where a lengthy release name causes some complicacy we can customize Helm chart `releaseNames` using the format below. + +
+ +```yaml hideClipboard +pack: + namespace: kube-system + releaseNameOverride: + actual_chart_name1: custom_name1 + actual_chart_name2: custom_name2 +``` + + +
+ + + + + diff --git a/docs/docs-content/cluster-profiles/task-update-profile.md b/docs/docs-content/cluster-profiles/task-update-profile.md new file mode 100644 index 0000000000..3c25c6e775 --- /dev/null +++ b/docs/docs-content/cluster-profiles/task-update-profile.md @@ -0,0 +1,281 @@ +--- +sidebar_label: 'Update Cluster Profiles' +title: 'Update Cluster Profiles' +description: 'Learn how to update cluster profiles in Palette.' +icon: '' +hide_table_of_contents: false +sidebar_position: 20 +--- + + +You update a cluster profile to change the configuration of one or more layers in the profile stack. You can also update basic profile information such as the name, description, and tags. + +## Update a Cluster Profile + +Use the following steps to learn how to update a cluster profile. + +
+ +### Prerequisites + +- An existing cluster profile. + + +- Permission to update the profile. + + + +### Enablement + +The following steps will guide you in updating basic profile information. + + + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. From the left **Main Menu**, select **Profiles**. + + +3. Click the profile you want to update. Palette displays the profile stack. + + +4. Click the **Settings drop-down Menu** and choose **Edit Info**. + +
+ + You can modify the name, version, description, and tags. Updated tags are not propagated to previously created clusters. However, tag changes will apply to new clusters you create that use the updated profile. + +
+ + +5. Save your changes. + + +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. From the left **Main Menu**, select **Profiles**. + + +3. Click the profile you updated. Palette displays the profile details and profile stack. + + +4. Check that profile details displays your changes. + + + +## Update a Pack Layer + +The following steps will guide you in making updates to a layer in the profile. + +### Enablement + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. From the left **Main Menu**, select **Profiles**. + + +3. Click the profile you want to update. Palette displays the profile details and profile stack + + +4. Click the layer to update. Palette displays the profile stack. To add a pack layer, select one of the following options: + +
+ + - **Add New Pack** + - **Import from cluster** + - **Add Manifest** + + +5. You can do the following: + + - Choose a new pack to add, or import one from another cluster. + + - Edit pack settings in the YAML file. + + - Add, edit, or remove a manifest. + + - Remove non-core pack layers from the profile. Click the layer to display its details and click the **trash can** icon next to **Edit Pack**. + +
+ + :::info + + Operating System (OS) Kubernetes, Networking, and Storage are considered core layers and cannot be removed. + + ::: + + + - Delete the profile by navigating to the **Settings drop-down Menu** and choosing **Delete**. + + + +6. Confirm your updates. + +Clusters that use the updated profile are notified of the changes. You can update clusters to use the latest profile definition at any time. + + +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. From the left **Main Menu**, select **Profiles**. + + +3. If you deleted the profile, verify it is no longer displayed on the **Cluster Profiles** page. + + +4. If you made changes, click the profile you updated. Palette displays the profile details and profile stack. + + +5. Check that layers are added to or removed from the stack. + + +6. If you added, removed, or modified a manifest, click the layer in the stack that you updated and verify the manifest changes. + + + +## Update the Pack Version + +Packs typically contain changes between versions, such as the addition or removal of parameters and policies. The following steps guide you in updating configurations. + +
+ +:::caution + +When updating to a new pack version, these rules apply: + +
+ +- You should not copy the pack configuration from one version to another, as the newer version often contains an adjusted configuration that is tailored to that version. Instead, you should integrate your changes manually in the new version. + + +- Updating to a newer Kubernetes version must be done incrementally, one minor version at a time. + + +- Select a specific target version instead of a group that ends in ``.x`` +We do not recommend downgrading packs to the previous version. + +::: + +
+ +### Enablement + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. From the left **Main Menu**, select **Profiles**. + + +3. Click the profile you want to update. Palette displays the profile stack. + + +4. Click on the pack layer to update. + + +5. In the **Edit Pack** page, select a specific target version, not a group that ends in ``.x``. Palette displays the difference between the current version at left and the new version at right. The target version is displayed in the header.

+ + Differences between the displayed configurations are as follows: + +
+ + + - **Red highlighting**: indicates text that is not present in the new configuration. + +
+ + Red highlighting indicates lines you may have added in the current configuration. You can use the arrow icon that displays between the two configurations to transfer the lines to the new version. + +
+ +
+ + + These lines may also have been removed because they are no longer valid in the new configuration. If you need them, you should copy the lines to the new version. Similarly, you should copy any settings from the current configuration. + +
+
+ + - **Green highlighting**: indicates additions in the new configuration that are not present in the current version. + +
+ + #### Example of Difference Between Current and New Configurations + + + ![Screenshot that shows Palette's pack diff user interface with red highlight at left and green highlight at right](/integrations_pack_diffs.png) + +
+ +
+ + - **Contrasting shades** of red and green highlight in the same line indicates differences occur in only part of the line. + +
+ + #### Example of Line Changes in Current and New Configurations + + ![Screenshot that shows Palette's pack diff user interface with contrasting shades of red and green highlight in the same line](/integrations_pack_line_diffs.png) + + +
+ + +6. Check for red-highlighting in the configuration that is missing in the new configuration. + +
+ + - If there are any lines you added, use the arrow to transfer the lines to the new version. + +
+ + - If there are lines you did not add that are red highlighted, they have been removed in the new version, and you should **not** copy them over. + + +7. Check for changed settings in the new configuration and copy settings from the current configuration to the new version. + + +8. Review new sections in the new configuration. You should adopt them, as they are typically needed to support the new version. + + +9. Check for changes in the same line that have a different value. If it is not a customization you made, you should adopt the new value, as it is known to be compatible with the new version. + + +10. Confirm your updates. + + +## Validate + + + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. From the left **Main Menu**, select **Profiles**. + + +3. Click the profile you updated. Palette displays the profile stack. + + +4. Check that the updated layer displays the new pack version. + +
+ + Palette indicates any misconfigurations with a dot displayed on the problematic layer in the stack and a message letting you know there is an issue. + + +5. Click on the pack layer and review its configuration. Apply fixes and confirm your updates. + + +6. Repeat the process until Palette indicates the configuration works. + + + +
+ diff --git a/docs/docs-content/clusters/_category_.json b/docs/docs-content/clusters/_category_.json new file mode 100644 index 0000000000..c82af61e53 --- /dev/null +++ b/docs/docs-content/clusters/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 60 +} diff --git a/docs/docs-content/clusters/cluster-groups/_category_.json b/docs/docs-content/clusters/cluster-groups/_category_.json new file mode 100644 index 0000000000..c82af61e53 --- /dev/null +++ b/docs/docs-content/clusters/cluster-groups/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 60 +} diff --git a/docs/docs-content/clusters/cluster-groups/cluster-group-backups.md b/docs/docs-content/clusters/cluster-groups/cluster-group-backups.md new file mode 100644 index 0000000000..9b808aea5c --- /dev/null +++ b/docs/docs-content/clusters/cluster-groups/cluster-group-backups.md @@ -0,0 +1,89 @@ +--- +sidebar_label: "Enable Disk Backup on Virtual Clusters" +title: "Enable Disk Backup on Virtual Clusters" +description: "Learn how to configure disk and volume backup for virtual clusters in a cluster group." +hide_table_of_contents: false +sidebar_position: 10 +tags: ["clusters", "cluster groups"] +--- + +Palette [Virtual Clusters](../palette-virtual-clusters/palette-virtual-clusters.md) are a capability that cluster groups support and that you can enable when creating a cluster group. By default, the virtual cluster settings in a cluster group disable disk backups. You can back up all the volumes within a virtual cluster using the following steps. + +## Prerequisites + +* A project or tenant backup location. Refer to the [cluster backup and restore](../cluster-management/backup-restore/backup-restore.md#clusterbackupandrestore) document to learn how to configure a backup location. + +* Cluster group modification [permissions](../../user-management/palette-rbac/palette-rbac.md). + +* A cluster group. Review the [create a cluster group](create-cluster-group.md) for additional guidance. + + +:::info + +You can also enable virtual cluster disk backup during the cluster group creation process. + +::: + + +## Enable Backup for Virtual Clusters + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Cluster Groups**. + + +3. Select a cluster group to enable virtual cluster disk backup. + + +4. Click **Settings** and expand the **Settings** Menu. + + +5. To enable disk backup you need to change the following configurations in the **Advanced Config** section. + + - Set `syncer.extraArgs.rewrite-host-paths` to `true` + ```yaml + syncer: + extraArgs: + - --rewrite-host-paths=true + ``` + - Set `hostpathMapper.enabled` to `true` + ```yaml + hostpathMapper: + enabled: true + ``` + - Set `podSecurityStandard` to `privileged` + ```yaml + isolation: + podSecurityStandard: privileged + ``` + +:::caution + +Setting the `podSecurityStandard` to `privileged` can introduce privilege escalations. We recommend you discuss this with your security system administrator. + +::: + +7. Save your changes. + + +All virtual clusters deployed in this cluster group will now include disk storage during backup operations. + +## Validate + + +You can validate that the disk backups are occurring by deploying a virtual cluster and taking a backup. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Deploy a virtual cluster in your cluster group that has the disk backup settings enabled. Refer to the [Deploy a Virtual Cluster to a Cluster Group](../palette-virtual-clusters/deploy-virtual-cluster.md) guide to learn how to deploy Palette Virtual clusters. + + +3. Create a backup of your virtual cluster and include all disks. Use the [Create a Cluster Backup](../cluster-management/backup-restore/backup-restore.md#get-started) guide for additional guidance. + + +4. Access the backup location's blob storage and review the backup files. + +Example of a backup that includes the virtual cluster disks. +![Example image of a backup that includes disks](/clusters_cluster-groups_cluster-group-backups_backup-overview.png) \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-groups/cluster-groups.md b/docs/docs-content/clusters/cluster-groups/cluster-groups.md new file mode 100644 index 0000000000..22232433a6 --- /dev/null +++ b/docs/docs-content/clusters/cluster-groups/cluster-groups.md @@ -0,0 +1,41 @@ +--- +sidebar_label: "Cluster Groups" +title: "Cluster Groups" +description: "Explore Palette Devx as Free Developer" +hide_table_of_contents: false +sidebar_custom_props: + icon: "object-group" +tags: ["clusters", "cluster groups"] +--- + +A *Cluster Group* is a collection of one or more host clusters that together form a computing platform for you and your users to deploy Palette virtual clusters. Downstream consumers can use the cluster group when using Palette in [*App Mode*](../../introduction/palette-modes.md#what-is-app-mode). + +You can create a cluster group under the Palette [tenant](../../glossary-all.md#tenant) scope. Alternatively, you can create a cluster group at the [project](../../projects.md) scope. + +By default, Palette exposes a managed cluster group called *beehive* that is available for users in app mode. This cluster group is managed by Palette and falls under the free tier. The beehive cluster group is located in the eastern side of the U.S. + +You can create a cluster group that is made up of various types of host clusters. You could create a cluster group by similar cloud providers, Kubernetes versions, or by location. You have the flexibility to define the grouping criteria. The following image displays a cluster group comprised of various host clusters deployed in a public cloud, private cloud, and edge environment. + +
+ +:::caution + +Cluster groups support two network endpoints: load balancer and ingress. All host clusters added to a cluster group must support the endpoint type configured for the cluster. Example: A host cluster configured for ingress as the endpoint type cannot be added to a cluster group configured for the endpoint type load balancer and vice versa. + +::: + +![An example cluster group made up of various clusters](/clusters_cluster-groups_index-page.png) + +## Get Started + +Learn how to create a cluster group by reviewing the [Create and Manage Cluster Groups](create-cluster-group.md) guide. + +
+ +## Resources + +- [Create and Manage Cluster Groups](create-cluster-group.md) + +- [Enable Disk Backup on Virtual Clusters](cluster-group-backups.md) + +- [Set up Ingress for a Cluster Group](ingress-cluster-group.md) diff --git a/docs/docs-content/clusters/cluster-groups/create-cluster-group.md b/docs/docs-content/clusters/cluster-groups/create-cluster-group.md new file mode 100644 index 0000000000..2ed725a830 --- /dev/null +++ b/docs/docs-content/clusters/cluster-groups/create-cluster-group.md @@ -0,0 +1,190 @@ +--- +sidebar_label: "Create and Manage Cluster Groups" +title: "Create and Manage Cluster Groups" +description: "Learn how to create and manage Palette Cluster Groups" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["clusters", "cluster groups"] +--- + + +Use a cluster group to organize your host clusters into a single logical group. A cluster group is a collection of one or more host clusters that together form a computing platform for you and your users to deploy Palette Virtual Clusters. Downstream consumers can use the cluster group when using Palette in [App Mode](../../introduction/palette-modes.md#what-is-app-mode). + +:::info + +Palette does not offer support for host clusters of these types within a cluster group: +- Edge clusters +- Virtual clusters +- Private Cloud Gateway (PCG) cluster +- Imported clusters with read-only access +::: + +Use the instructions below to create a cluster group. + +## Prerequisites + +* To create a Palette Host Cluster Group, you need to deploy a healthy running [Palette host cluster](../clusters.md). + + +* The host clusters must match the network endpoint type of the cluster group. + + +## Enablement + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Cluster Groups**. + + +2. Click **+New Cluster Groups** to create a new cluster group and provide the following information to the creation wizard. + + + * **Basic Information: ** + + | Parameter | Description | + |-------------------------------|-----------------| + |Group Name | A name for the cluster group.| + |Description (optional) | Description of the group, if any. | + |Tag (optional) | Assign tags to the cluster group.| + + +3. Select **Next** to continue. + + + +4. Use the **Select clusters** drop-down menu to add available host clusters. + +:::info + + Only host clusters created under the current scope are available to add to a cluster group. You can add host clusters created under the current project or at the tenant scope. You cannot add host clusters that were created in another project scope. + + +::: + +5. Click **Next** once you have added all the host clusters you wish to include. + + +6. Review the configuration options for **Host Clusters Config** and **Virtual Clusters Config**. + + +#### Cluster Group Configurations + + +|**Host Cluster Config** | **Description** | +|--------------------------------------|-------------------------------------------| +|Oversubscription (%): | The allowed oversubscription for cluster in terms of resources. Default is 120%.| +|Cluster endpoint type: | Load balancer or Ingress.| +|Host DNS: | If the selected cluster endpoint is **Ingress**, then for each selected host cluster provide the host DNS pattern. Ensure that a wildcard DNS record exists that maps the provided host pattern to the ingress controller load balancer for this cluster. Check out the [Setup Ingress](ingress-cluster-group.md) for additional guidance.| + + +#### Palette Virtual Cluster Configuration + +The configuration applied to all virtual clusters launched into the host clusters. Use the **Advanced Config** for further customization. The request sizing applies to the maximum amount of resources a virtual cluster is allowed to claim. + +
+ +|**Palette Virtual Cluster Resource ** | **Default** |**Minimum Limit**| +|------------------------------|-------------------|-----------------| +|CPU (per request) | 6 | 4 | +| Memory (per request) | 8 GiB | 4 GiB | +| Storage (per request) | 10 GiB | 2 GiB | + + +:::caution + +A virtual cluster requires a minimum of 4 CPU, 4 GiB of memory, and 2 Gib of storage to launch successfully. The default settings in the cluster group virtual cluster configuration YAML file has the following values: + +```yaml +vcluster + resources: + limits: + cpu: 1000m + memory: 1Gi + ephemeral-storage: 1Gi + requests: + cpu: 200m + memory: 256Mi + ephemeral-storage: 128Mi +``` + +Increasing the limit and request values could result in a virtual cluster requiring more resources than the default values of 4 CPU, 4 GiB of memory, and 2 Gib of storage. + +::: + +To enable virtual clusters for OpenShift, review the OpenShit [instructions below](#enable-virtual-cluster-for-openshift). + + +7. Click **Next** to complete the cluster group creation process. + + +8. Click **Finish Configuration**. + +### Validate + +To review your cluster group, navigate to the left **Main Menu** and select **Cluster Groups**. Your newly created cluster group is now displayed and ready for use. + + +## Manage your Cluster Group + +Once the cluster group is created, the day two operations can be performed from the cluster group's **Settings** options. To access cluster group settings, navigate to the left **Main Menu** and select **Cluster Groups**. Select a cluster group, and click the **Settings** button. + + +### Add a Host Cluster to the Group + +You can add additional host clusters to a cluster group. Navigate to the left **Main Menu** and select **Cluster Groups**. Select the cluster group you want to add additional host clusters. Click on the **+ Add Host Cluster**. Select the desired host clusters and verify the oversubscription and cluster endpoint type settings. + +### Delete your Cluster Group + +To delete a cluster group, navigate to the left **Main Menu** and select **Cluster Groups**. Select the cluster group you want to review or modify the settings. Click on the **Settings** button. Select **Delete Cluster**, enter the cluster name, and confirm the delete operation. + + +## Enable Virtual Clusters for OpenShift + +To deploy a virtual cluster on OpenShift: + + +1. Create a new Cluster Group or edit an existing one and click **Settings**. + + +2. Select **Settings** in the **Cluster Group Settings** pane. + + +3. In the **Advanced Config** file, locate the securityContext section. + + +4. Comment out these lines: + + * ``fsGroup`` + * ``runAsGroup`` + * ``runAsUser`` + +4. Set `openshift.enable:` to `true`. + + +5. Verify these default parameter values are set as follows: + + * ``allowPrivilegeEscalation: false`` + * ``capabilities.drop: [all]`` + * ``runAsNonRoot: true`` + +The following example shows the required configuration for OpenShift. + +**Example** + + +```yaml +#fsGroup: 12345 +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + + #runAsGroup: 12345 + #runAsUser: 12345 + runAsNonRoot: true + +openshift: + enable: true +``` \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-groups/ingress-cluster-group.md b/docs/docs-content/clusters/cluster-groups/ingress-cluster-group.md new file mode 100644 index 0000000000..220f176062 --- /dev/null +++ b/docs/docs-content/clusters/cluster-groups/ingress-cluster-group.md @@ -0,0 +1,152 @@ +--- +sidebar_label: "Set Up Ingress" +title: "Set Up Ingress for Cluster Groups" +description: "Learn how to configure Ingress for a Palette Cluster Group" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["clusters", "cluster groups"] +--- + + + +Cluster Groups may have a cluster endpoint type of either Load Balancer or Ingress. The cluster endpoint type determines how Palette Virtual Clusters deployed in a Cluster Group are exposed. You specify the cluster endpoint in Cluster Group Settings. + +Using **Ingress** as the cluster endpoint type is a more cost effective way to access your Kubernetes workloads than using type **Load Balancer**, which requires a new cloud Load Balancer to be provisioned for each virtual cluster. + +When you enable **Ingress** as the endpoint for a Cluster Group, you must deploy an [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers) add-on profile, such as NGINX, on each host cluster in the Cluster Group. The Ingress Controller provides the necessary routing functionality for external traffic to reach the Kubernetes API server of each virtual cluster, as well as any apps each virtual cluster contains. + +## Prerequisites + +- At least one infrastructure or cloud-based cluster you’ve created. + + +- The Ingress Controller must have Secure Socket Layer (SSL) passthrough enabled so that Transport Layer Security (TLS) is not terminated at the ingress controller. Palette provides the ```nginx-ingress``` add-on profile with SSL passthrough already enabled. The following example shows how SSL-passthrough is enabled for the NGINX Ingress Controller. You would add an equivalent configuration to the profile of the add-on you are using.

+ + ```yaml + charts: + ingress-nginx: + controller: + extraArgs: + enable-ssl-passthrough: true + ``` + + + - Palette's ```nginx-ingress``` add-on profile automatically reroutes inbound requests from port 6443 to port 443 using a TCP service configuration. This is so that TLS termination on port 443 for all Apps can occur at the cloud load balancer while simultaneously allowing connections to the API servers of your Virtual Clusters on port 6443. + + If you are using an ingress controller other than the NGINX Ingress Controller and would like to terminate TLS at your ingress controller's cloud load balancer, an equivalent TCP service configuration would be required. Alternatively, you may handle all TLS termination inside the cluster by configuring Cert Manager to issue a certificate for each App's Ingress.
+ + The following example shows how port rerouting is achieved for the NGINX Ingress Controller. You would add an equivalent Transmission Control Protocol (TCP) service configuration to the profile of the add-on you are using.

+ + ```yaml + tcp: + 6443: "nginx/nginx-ingress-controller:443" + ``` + +## Set Up Ingress + +The following steps describe how to enable an Ingress Controller for a Cluster Group. You will use the `nginx-ingress` add-on profile, but you may choose another ingress controller. + + +1. Log in to Palette as **Tenant Admin**. + + +2. Identify each host cluster that requires the addition of an NGINX Ingress Controller profile. + + This can be: + + - All the host clusters in an existing Cluster Group,
+ or + + - Existing host clusters that you will add to a new Cluster Group.

+ +3. Either add the `nginx-ingress` add-on profile to each host cluster, or manually configure your own ingress controller add-on profile with the customizations described in the [Prerequisites](ingress-cluster-group.md/#prerequisites) section. + + a. From the **Main Menu**, choose **Clusters** and select a cluster. + + b. In the **Profile** tab, click **Add add-on profile (+)** and select `nginx-ingress`. + + c. Confirm and save your changes. + + +4. For each host cluster with an ingress controller add-on profile deployed, follow these steps to open a web shell, identify the External-IP of the LoadBalancer Service, and copy the record you will need to create a canonical Name (CNAME) Domain Name System (DNS) record: + + a. From the **Main Menu**, select a cluster. The cluster **Overview** tab displays. + + b. In the **Details** section beneath **Metrics**, click the **Connect** button next to the Kubernetes config file to open a web shell. + + c. Invoke the following command to display the External-IP of the ```nginx-ingress``` LoadBalancer Service:

+ + ``` + kubectl -n nginx get service nginx-ingress-controller + ``` + + d. Copy the record to your clipboard or to a text file. You will use the External-IP address to create a CNAME DNS record. +
+ + e. Close the web shell. + + +5. Use your DNS provider to create a wildcard CNAME record that maps to the External-IP for the NGINX Ingress Controller. Paste the External-IP you copied from the web shell to create the CNAME record. + + :::info + + The CNAME record is also known as the host cluster DNS pattern. + + ::: + +
+ +6. Copy the CNAME record to your clipboard. + + +7. Ensure you are in Palette's Cluster Mode, under the Tenant Admin scope. From the **Main Menu**, select **Cluster Groups**, then select the Cluster Group that requires ingress.

+ a. From the **Host Clusters** tab, select **Settings > Clusters**. + + b. Choose **Ingress** as the **Cluster endpoint type**. + + c. Paste the name of the wildcard CNAME record into the **Host DNS** field. + + :::info + If you haven’t yet created a Cluster Group, you can configure each host cluster as described and add them to a new Cluster Group later. + ::: + +## Validate + +To validate that ingress is functioning as expected, do the following: + +1. From the **User Menu**, switch to App Mode and deploy a new virtual cluster.
+ To learn how to deploy a virtual cluster, check out the [Add Virtual Clusters to a Cluster Group](../palette-virtual-clusters/deploy-virtual-cluster.md) guide. + + +2. Use a web shell and type the following command to verify you can connect to the newly deployed virtual cluster: + + ```shell + kubectl get namespaces + ``` +This should display a list of namespaces as shown in the example: + +
+ + ```shell + NAME STATUS AGE + default Active 4d11h + kube-system Active 4d11h + kube-public Active 4d11h + kube-node-lease Active 4d11h + cluster-63c91f359ae82b46c9bad615 Active 4d11h + app-gamebox-lb-spectro-gamebox Active 4d11h + ``` + +If an error message displays, it indicates something is wrong with the configuration. Verify the following: + +- Each host cluster is deployed with NGINX Ingress Controller. + +- The CNAME record correctly maps to the External-IP of the NGINX Ingress Controller’s LoadBalancer Service. + +- Cluster Group Settings specify the Cluster endpoint type as **Ingress**, and **Host DNS** specifies the CNAME record you created. + + + + + + diff --git a/docs/docs-content/clusters/cluster-management/_category_.json b/docs/docs-content/clusters/cluster-management/_category_.json new file mode 100644 index 0000000000..ae9ddb024d --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 50 +} diff --git a/docs/docs-content/clusters/cluster-management/backup-restore/_category_.json b/docs/docs-content/clusters/cluster-management/backup-restore/_category_.json new file mode 100644 index 0000000000..b843e17c90 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/backup-restore/_category_.json @@ -0,0 +1,4 @@ +{ + "position": 50 + } + \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-management/backup-restore/add-backup-location-dynamic.md b/docs/docs-content/clusters/cluster-management/backup-restore/add-backup-location-dynamic.md new file mode 100644 index 0000000000..5218257d46 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/backup-restore/add-backup-location-dynamic.md @@ -0,0 +1,439 @@ +--- +sidebar_label: 'Add Backup Location using Dynamic Credentials' +title: 'Add Backup Location using Dynamic Credentials' +description: 'Learn how to add a backup location in Palette using dynamic access credentials.' +hide_table_of_contents: false +sidebar_position: 20 +tags: ["clusters", "cluster management", "backup"] +--- + + +This guide provides instructions for how to add a backup location in Palette using dynamic access credentials. You use the dynamic access credentials to authenticate Palette with the backup location service provider. Refer to the [Backup Location](backup-restore.md#backuplocation) section to learn more about the supported service providers. + + +Depending on the infrastructure provider, there may be limitations or different prerequisites. + + +## Dynamic Credentials with AWS STS + +To support dynamic credentials with AWS, Palette uses the AWS Security Token Service (STS) authentication method. You can use AWS STS when adding an S3 bucket as the backup location. The following sections outline the prerequisites and provide detailed steps to add an S3 bucket as the backup location using the STS authentication method. + +
+ +:::caution + +Palette supports AWS STS only when your Palette’s hosting environment and the backup location service provider are the same. Palette SaaS is hosted on AWS, so you can use AWS STS to add an S3 bucket as the backup location. Similarly, if you have a self-hosted Palette or Palette VerteX deployed in AWS, you can use AWS STS to add an S3 bucket as the backup location. Otherwise, you cannot use AWS STS to add an S3 bucket as the backup location. + +::: + + +You can use the same AWS account in which you deploy your Kubernetes cluster to add an S3 bucket as the backup location. You can also use a different AWS account to add an S3 bucket as the backup location. Select the tab below that best matches your use case. + + +- [Single Cloud Account with AWS STS](#single-cloud-account-with-aws-sts) + +- [Multiple Cloud Accounts with AWS STS](#multiple-cloud-accounts-with-aws-sts) + + +## Single Cloud Account with AWS STS + + +Use the following steps to add an S3 bucket as the backup location using the STS authentication method when you have one cloud account. + + +### Prerequisites + +* Both your Palette environment instance and the S3 bucket are hosted on AWS. This prerequisite is more applicable to self-hosted Palette and Palette VerteX customers. Palette SaaS in hosted in an AWS environment. + + +* An AWS account. This account is assumed to be the same account where you deploy Kubernetes clusters. Refer to the [Multiple Cloud Accounts with AWS STS](add-backup-location-dynamic.md#multiple-cloud-accounts-with-aws-sts) section to learn how to add a backup location when the cluster deployment cloud account differs from the backup cloud account. + + + +* An S3 bucket in the AWS account. The bucket will store the backup of your clusters or workspaces. + + +* The following IAM policy must be created in your AWS Account. Replace the `BUCKET-NAME` placeholder in the policy below with your bucket name. Refer to the [Creating IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create-console.html) for additional guidance. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateSnapshot", + "ec2:DeleteSnapshot" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:PutObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts" + ], + "Resource": [ + "arn:aws:s3:::BUCKET-NAME/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::BUCKET-NAME" + ] + } + ] + } + ``` + +* If the S3 bucket is using a customer managed AWS Key Management Service (KMS) key for server-side encryption, ensure the Palette IAM role has the necessary permissions to access the KMS key. Otherwise, Palette will be unable to put objects in the S3 bucket, resulting in backup or restore failure. Check out the [Troubleshooting key access](https://docs.aws.amazon.com/kms/latest/developerguide/policy-evaluation.html) guide to learn more about common KMS issues. + + :::tip + + Use the IAM Policy Simulator to verify the IAM role has the necessary permissions to access a customer managed KMS key. Refer to the [Testing IAM policies with the IAM policy simulator](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_testing-policies.html) guide to learn more. + + ::: + +
+ + +### Instructions + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. Click on the **Add New Backup Location** button. Palette will open a wizard to configure the new backup location, as highlighted in the screenshot below. + + ![A screenshot highlighting the wizard and configuration fields to add a backup location in Palette.](/clusters_cluster-management_backup-restore_add_aws_account.png) + + +4. Fill out the input fields listed in the table. + + |**Configuration Field**|**Value**| + |---|---| + |**Location Name**|Provide a name of your choice.| + |**Location Provider**|Select AWS from the **drop-down** Menu. | + |**Certificate**| Optional service provider x509 certificate. | + |**S3 Bucket**|The name of the S3 bucket you created in the object store. The bucket name must be DNS-compliant. For more information, refer to the [Bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) defined by AWS.| + |**Region**|Region where the S3 bucket is hosted. You can check region codes in the [Service endpoints](https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region) section in the AWS documentation.| + |**S3 URL**|Optional S3 URL. If you choose to provide a value, refer to the [Methods for accessing a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html#virtual-host-style-url-ex) guide to determine the bucket URL and enable the **Force S3 path style** checkbox.| + + +5. Next, choose the **STS** authentication method. When you choose the STS authentication method, you must create a new IAM role and provide its Amazon Resource Name (ARN) to Palette. Check out the [Creating a role using custom trust policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-custom.html) guide from Amazon for additional guidance. + + +6. Log in to your AWS Account and create a new IAM role. Attach the IAM policy specified in the [Prerequisites](#prerequisites) section. Use the following configuration while creating the IAM role. + + |**AWS Console Field**|**Value**| + |---|---| + |Trusted entity type| Select *AWS account*| + |AWS account|Select the **Another AWS account** radio button.| + |AWS Account ID|Use the one displayed in Palette, which is Palette's account ID.| + |Options|Select the **Require external ID** checkbox.| + |External ID|Use the one displayed in Palette. Palette generates the external ID.| + |Permissions policies| Attach the IAM policy defined in the [Prerequisites](#prerequisites) section above.| + |Role name|Provide a name of your choice.| + |Role description|Provide an optional description.| + +
+ + + ![A view of the IAM Role creation screen](/clusters_cluster-management_backup_restore_add-backup-location-dynamic_aws_create_role.png) + + +7. Review the details of the newly created IAM role. + +
+ + ![A view of the IAM Role creation summary screen](/clusters_cluster-management_backup_restore_add-backup-location-dynamic_aws_create_role_summary.png) + + + +8. Copy the IAM role Amazon Resource Name (ARN) + + +9. Switch back to Palette, and resume the backup location creation wizard. Paste the copied IAM role ARN into the **ARN** input field. + + +12. Click on **Validate**. Palette will display a validation status message. If the validation status message indicates a success, proceed to the next step. If the validation status message indicates an error, review the error message and verify the IAM configurations you provided. Ensure you have provided the correct IAM role ARN, Palette external ID, and that the IAM role has the required IAM policy permissions mentioned in the [Prerequisites](#prerequisites) section. + + +13. Click on the **Create** button. + + +You now have a backup location for Palette to store the backup of your clusters or workspaces. This backup location uses AWS STS to authenticate Palette with the S3 bucket in the same AWS account you deploy your Kubernetes cluster. + + +### Validate + + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. The **Backup Locations** page will display a list of all backup locations configured for the current project. + + +4. Search for the newly added backup location in the list. The presence of the backup location validates that you successfully added a new backup location. + + +
+ + +## Multiple Cloud Accounts with AWS STS + +Suppose your Kubernetes cluster is deployed in *AWS Account A*, and you want to create the backup in *AWS Account B*, but the Palette instance is hosted in *AWS Account C*. In this scenario, Palette will allow you to use the STS authentication method to add a backup location. The diagram below presents this scenario and shows the order of authentication you must follow. + +![A diagram highlighting the order of authentication required when the backup cloud account differs from the cluster deployment cloud account.](/clusters_cluster-management_backup-restore_separate-cloud-accounts.png) + + + +A multi-cloud account scenario requires you to perform the following authentication steps. + + +1. Grant Palette access to the cluster in AWS Account A. When you register a primary cloud account in Palette, you authenticate and authorize Palette to deploy clusters in the cloud account. Check out the [Add AWS Account](../../public-cloud/aws/add-aws-accounts.md) to guidance on how to add an AWS account in Palette. + + +2. Give Palette permission to use the S3 buckets in AWS Account B. Set the bucket permissions and link them to an IAM role. Then, update the IAM role to let Palette assume it. + + +3. Authorize the cluster with AWS Account B for S3 bucket access. Update the IAM role to allow Palette clusters to assume it. + + +Use the following steps to add an S3 bucket as the backup location using the STS authentication method when you have multiple cloud accounts. + +
+ +### Prerequisites + +* Both your Palette environment instance and the S3 bucket are hosted on AWS. This prerequisite is more applicable to self-hosted Palette and Palette VerteX customers. Palette SaaS is hosted in an AWS environment. + + +* An AWS account where you deploy Kubernetes clusters. This account will be referred to as *AWS Account A*. + + +* Another AWS account where you want to create the backup location. This account will be referred to as *AWS Account B*. This is the AWS account where you want to create the backup location. + + +* An S3 bucket in AWS Account B. The bucket will store the backup of your clusters or workspaces. + + +* The following IAM policy must be created in your AWS Account B. Replace the `BUCKET-NAME` placeholder in the policy below with your bucket name. Refer to the [Creating IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create-console.html) for additional guidance. + +
+ +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateSnapshot", + "ec2:DeleteSnapshot" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:PutObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts" + ], + "Resource": [ + "arn:aws:s3:::BUCKET-NAME/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::BUCKET-NAME" + ] + } + ] +} +``` + +
+ + +### Instructions + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. Click on the **Add New Backup Location** button. Palette will open a wizard to configure the new backup location, as shown in the screenshot below. + + ![A screenshot highlighting the wizard and configuration fields to add a backup location in Palette.](/clusters_cluster-management_backup-restore_add_aws_account.png) + + +4. Fill out the input fields listed in the table below. + + |**Configuration Field**|**Value**| + |---|---| + |**Location Name**|Provide a name of your choice.| + |**Location Provider**|Select AWS from the **drop-down** Menu. | + |**Certificate**| Optional service provider x509 certificate.| + |**S3 Bucket**|The S3 bucket name you created in the object store. The bucket name must be DNS-compliant. For more information, refer to the [Bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) defined by AWS.| + |**Region**|Region where the S3 bucket is hosted. You can check the region code from the [Service endpoints](https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region) section in the AWS documentation.| + |**S3 URL**|Optional S3 bucket URL. If you provide a value, refer to the [Methods for accessing a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html#virtual-host-style-url-ex) guide to determine the bucket URL, and select the **Force S3 path style** checkbox.| + + +5. Next, choose the **STS** authentication method. When you choose the STS authentication method, you must create a new IAM role and provide its Amazon Resource Name (ARN) to Palette. Check out the [Creating a role using custom trust policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-custom.html) guide from Amazon for additional guidance. + + +6. Switch to AWS Account B to create a new IAM role. The IAM role must have the necessary IAM policy attached, which you defined in the prerequisites section above. Refer to the [Creating a role to delegate permissions to an IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html) guide to learn how to create an IAM role. Use the following configuration while creating the IAM role. + + |**AWS Console Field**|**Value**| + |---|---| + |**Trusted entity type**| Select the **AWS account** option.| + |**AWS account**|Select the **Another AWS account** radio button.| + |**AWS Account ID**|Use the one displayed in Palette, which is Palette's account ID.| + |**Options**|Select the **Require external ID** checkbox.| + |External ID|Use the one displayed in Palette. Palette generates the external ID.| + |Permissions policies| Attach the IAM policy defined in the [Prerequisites section](#prerequisites) above.| + |Role name|Provide a name of your choice.| + |Role description|Provide an optional description.| + +
+ + + ![A view of the IAM Role creation screen](/clusters_cluster-management_backup_restore_add-backup-location-dynamic_aws_create_role.png) + + +7. Review the details of the newly created IAM role in AWS Account B. + +
+ + ![A view of the IAM Role creation summary screen](/clusters_cluster-management_backup_restore_add-backup-location-dynamic_aws_create_role_summary.png) + + +8. In the IAM role's **Trust relationships** section, a relationship will already be defined for Palette so that Palette can assume this role under specified conditions. + + + +9. Edit the existing trust policy of the newly created IAM role in AWS Account B. Append the following permission to the existing trust policy. This step will authorize the cluster in AWS Account A to assume the current IAM role. Replace the `[ACCOUNT-ID-FOR-AWS-ACCOUNT-A]` placeholder with the AWS account ID for AWS Account A.

+ +
+ + ```json + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::[ACCOUNT-ID-FOR-AWS-ACCOUNT-A]:root" + }, + "Action": "sts:AssumeRole" + } + ``` + + If you want to establish a trust relationship with a specific IAM role in AWS Account A, say *SpectroCloudRole*, you can use the `"arn:aws:iam::[ACCOUNT-ID-FOR-AWS-ACCOUNT-A]:role/SpectroCloudRole"` ARN instead. + + Your IAM trust policy should be similar to the policy defined below. The IAM policy has two trust relationships, one for Palette and another for the AWS Account A.
+ +
+ + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::[AWS-ACCOUNT-ID-OF-PALETTE]:root" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:ExternalId": "[YOUR-EXTERNAL-ID]" + } + } + }, + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::[ACCOUNT-ID-FOR-AWS-ACCOUNT-A]:root" + }, + "Action": "sts:AssumeRole" + } + ] + } + ``` + + In your case, the `[AWS-ACCOUNT-ID-OF-PALETTE]` and `[YOUR-EXTERNAL-ID]` placeholders will contain the values you used while creating the IAM role. + +
+ + :::info + + Check out [How to use trust policies with IAM roles](https://aws.amazon.com/blogs/security/how-to-use-trust-policies-with-iam-roles/) for a deep dive into the IAM trust policies. + + ::: + + +10. Copy the IAM role ARN from AWS Account B. + + +11. Switch back to Palette, and resume the backup location creation wizard. Paste the copied IAM role ARN into the **ARN** field. + + +12. Click on **Validate**. Palette will display a validation status message. If the validation status message indicates a success, proceed to the next step. If the validation status message indicates an error, review the error message and verify the IAM configurations you provided. Ensure you have provided the correct IAM role ARN, Palette external ID, and that the IAM role has the required IAM policy permissions mentioned in the [Prerequisites section](#prerequisites). + + +13. Click on the **Create** button. + + +You now have a backup location for Palette to use to store the backup of your clusters or workspaces. This backup location is using AWS STS to authenticate Palette with the S3 bucket in AWS Account B. + + +### Validate + +Use the following steps to validate adding the new backup location. + + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. The **Backup Locations** page will display a list of all backup locations configured for the current project. + + +4. Search for the newly added backup location in the list. The presence of the backup location validates that you have successfully added a new backup location. + + + +## Next Steps + +You can now use the newly added backup location to create a backup of your clusters or workspaces. Refer to the [Create a Backup](create-cluster-backup.md) guide to learn how to create a backup of your clusters or workspaces. + + diff --git a/docs/docs-content/clusters/cluster-management/backup-restore/add-backup-location-static.md b/docs/docs-content/clusters/cluster-management/backup-restore/add-backup-location-static.md new file mode 100644 index 0000000000..3004ab08bf --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/backup-restore/add-backup-location-static.md @@ -0,0 +1,403 @@ +--- +sidebar_label: 'Add Backup Location using Static Credentials' +title: 'Add Backup Location using Static Credentials' +description: 'Learn how to add a backup location in Palette using static access credentials.' +hide_table_of_contents: false +sidebar_position: 10 +tags: ["clusters", "cluster management", "backup"] +--- + + +This guide provides instructions to add a backup location in Palette using static credentials. Below is an overview of the steps involved: + + +1. Create a storage bucket to store the backup files. For example, if you configure the backup location in AWS, you will need an S3 bucket. + + +2. Define the access permissions for the storage bucket and associate the permissions with an IAM entity. The IAM entity may be a user, service principal, or role depending on your infrastructure provider. + + +3. Generate the static credentials for the IAM entity with sufficient permissions to perform bucket-related operations. + + +4. Share the static credentials with Palette so that it can assume the IAM entity's role to perform bucket-related operations. + + +The following sections provide detailed instructions. Select the environment where you want to create a backup. + +- [AWS](#aws) + +- [GCP](#gcp) + +- [MinIO](#minio) + +- [Azure](#azure) + + + +## AWS + +### Prerequisites + +* An AWS account. + + +* An S3 bucket in the AWS account. The bucket will store the backup of your clusters or workspaces. + + +* Add the following IAM policy to your AWS account. Replace the `BUCKET-NAME` placeholder in the policy below with your bucket name. Refer to the [Creating IAM policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create-console.html) for additional guidance.

+ + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateSnapshot", + "ec2:DeleteSnapshot" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:PutObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts" + ], + "Resource": [ + "arn:aws:s3:::BUCKET-NAME/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::BUCKET-NAME" + ] + } + ] + } + ``` + +* Create an IAM user in your AWS account. While creating the IAM user, attach the IAM policy you defined in the previous prerequisite item. Refer to the [Creating an IAM user in your AWS account](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) guide to learn how to create an IAM user. + + +* AWS will generate and display an access key for the newly created IAM user. An access key is made of up two parts - an *access key ID* and a *secret access key*. Copy both parts of the access key to a clipboard to use later in this guide. AWS will not display the secret access key again. + + If you skip copying the secret access key, refer to the [Managing access keys for IAM users](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) guide to learn how to create a new access key. + + +* If the S3 bucket is using a customer managed AWS Key Management Service (KMS) key for server-side encryption, ensure the Palette IAM user has the necessary permissions to access the KMS key. Otherwise, Palette will be unable to put objects in the S3 bucket and result in backup or restore failure. Check out the [Troubleshooting key access](https://docs.aws.amazon.com/kms/latest/developerguide/policy-evaluation.html) guide to learn more about common KMS issues. + + :::tip + + Use the IAM Policy Simulator to verify the IAM role has the necessary permissions to access a customer managed KMS key. Refer to the [Testing IAM policies with the IAM policy simulator](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_testing-policies.html) guide to learn more. + + ::: + + + +### Add an AWS S3 Bucket + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. Click on the **Add New Backup Location** button. + + +4. Fill out the input fields listed in the table below. + + |**Configuration Field**|**Value**| + |---|---| + |**Location Name**|Provide a name of your choice. | + |**Location Provider**|Select AWS from the **drop-down** Menu. | + |**Certificate**|Optional Service provider certificate.| + |**S3 Bucket**|Name of the S3 bucket you created in the object store. The bucket name must be DNS-compliant. For more information, refer to the [Bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) defined by AWS.| + |**Region**|Region where the S3 bucket is hosted. You can check the region code from the [Service endpoints](https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_region) section in the AWS documentation.| + |**S3 URL**|Optional bucket URL. If you choose to provide a value, refer to the [Methods for accessing a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html#virtual-host-style-url-ex) guide to determine the bucket URL. If you provided an S3 URL, enable the **Force S3 path style** checkbox.| + + +5. Next, choose the *Credentials* validation method. If you want to use dynamic credentials through the AWS STS service, refer to the [Add a Backup Location using Dynamic Credentials](add-backup-location-dynamic.md) for guided instructions. + + +6. Provide the IAM user's access key if you chose the **Credentials** method. The IAM user must have the necessary IAM policy attached, which you defined in the prerequisites section above. The specified policy allows Palette to create a backup in the S3 bucket. + + +7. Click on the **Validate** button. Palette will display a validation status message. If the validation status message indicates a success, proceed to the next step. If the validation status message indicates an error, review the error message and verify the configurations you provided. + + +8. Click on the **Create** button. + + +You have completed configuring and adding a backup location to Palette with static credentials. + + + +### Validate + +Use the following steps to validate adding the new backup location. + + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. The **Backup Locations** page will display a list of all backup locations configured for the current project. + + +4. Search for the newly added backup location in the list. The presence of the backup location validates that you successfully added a new backup location. + + +## GCP + +### Prerequisites + +* A GCP account. + + +* A storage bucket in the GCP account. + + +* A service account with sufficient permissions to perform the required read and write operations on the bucket. For simplicity, you can assign the storage administrator role to the service account to grant complete control of the storage bucket. Refer to the [IAM roles for Cloud Storage](https://cloud.google.com/storage/docs/access-control/iam-roles) document to learn about the available roles. + + +* JSON credentials for the service account. Refer to the [Create access credentials](https://developers.google.com/workspace/guides/create-credentials#service-account) to learn how to create the credentials for the service account. + + + +### Add a GCP Bucket + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. Click on the **Add New Backup Location** button. + + +4. Fill out the input fields listed in the table below. + + |**Field**|**Value**| + |---|---| + |**Location Name**|Provide a name of your choice.| + |**Location Provider**|Select GCP from the **drop-down** Menu. | + | **Bucket** | The name of the bucket you created in the GCP object store.| + | **JSON Credentials** | Provide the JSON credentials for the external authentication of the GCP storage. Ensure the associated service account has sufficient permissions to perform the required bucket operations. | + + +5. Click on the **Validate** button. Palette will display a validation status message. If the validation status message indicates a success, proceed to the next step. If the validation status message indicates an error, review the error message and verify the configurations you provided. + + +6. Click on the **Create** button. + + +You have completed configuring and adding a backup location to Palette with static credentials. + + + + +### Validate + +Use the following steps to validate adding the new backup location. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. The **Backup Locations** page will display a list of all backup locations configured for the current project. + + +4. Search for the newly added backup location in the list. The presence of the backup location validates that you successfully added a new backup location. + +## MinIO + +### Prerequisites + +* A MinIO account. + + +* An S3-compliant bucket in the MinIO account. + + +* IAM policy in your MinIO account to authorize a MinIO user to perform the required read and write operations on the MinIO bucket. MinIO uses Policy-Based Access Control (PBAC) to control which IAM identities can access the resources and what actions the IAM identities are authorized to perform on the specific resources. Refer to the [MinIO Access Management](https://min.io/docs/minio/linux/administration/identity-access-management/policy-based-access-control.html#access-management) guide to learn more about the IAM policy requirements. + + +* A MinIO user assigned to the IAM policy defined above. You can learn more about MinIO access management in the [MinIO object storage for Kubernetes](https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management.html) documentation. + + +* An access key for the MinIO user. You can create an access key from the MinIO console. Refer to the [MinIO official documentation](https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#access-keys) to learn about creating access keys. + + +* An optional service provider x509 certificate. + + + +### Add a MinIO Bucket + + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to the **Project Settings** and click on **Backup Locations**. + + +3. Click on the **Add New Backup Location** button. + + +4. Fill out the following input fields. Refer to the table below to learn more. + + |**Field**|**Value**| + |---|---| + |**Location Name**|Provide a name of your choice.| + |**Location Provider**|Select MinIO from the drop-down field. | + |**Certificate**|Service provider certificate, if your organization prefers it.| + |**S3 Bucket**|The name of the S3 bucket you created in the MinIO object store. | + | **Region** | The region where the MinIO server is configured. Example: `us-east-1` | + |**S3 URL** | The MinIO object storage console URL. Example: `http://12.123.234.567:0000`| + |**Force S3 path style** | This value is required for MinIO.| + +
+ + :::caution + + Ensure you check the **Force S3 path style** checkbox. S3 path style is required by Velero to access the MinIO object storage. Palette uses [Velero](https://velero.io/docs) to create backups. + + ::: + + +5. Next, provide the access key for the MiniIO user. The access key has two parts - the *access key ID* and the *secret key*. + + +6. Click on the **Create** button. + + +You have completed configuring and adding a backup location to Palette with static credentials. + + +### Validate + +Use the following steps to validate adding the new backup location. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. The **Backup Locations** page will display a list of all backup locations configured for the current project. + + +4. Search for the newly added backup location in the list. The presence of the backup location validates that you successfully added a new backup location. + + +## Azure + + +### Prerequisites + +* An active Azure cloud account. You will need the following Azure items to complete the backup setup: + * Tenant ID + * Subscription ID + + +* An Azure storage account in the Azure account. You will need to be aware of the values for the following Azure storage items: + * Resource group name + * Storage account name + * Stock-Keeping Unit (SKU) + +Refer to the [Create a storage account](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-create?tabs=azure-portal) guide to learn how to create an Azure storage account + + + +* A container in the Azure Storage account. Refer to the [Manage blob containers using the Azure portal](https://learn.microsoft.com/en-us/azure/storage/blobs/blob-containers-portal) guide to learn how to create an Azure storage container. + + +* An Azure service principal with sufficient permissions to perform the required read and write operations on the container. You will need the values of the following items: + * Client ID + * Client Secret + +Check out the [Work with Azure service principal using the Azure CLI](https://learn.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli#what-is-an-azure-service-principal) guide to learn more about Azure service principals. + + + +### Add an Azure Blob Container + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. Click on the **Add New Backup Location** button. + + +4. Fill out the input fields listed in the table below. + + |**Field**|**Value**| + |---|---| + |**Location Name**|Provide a name of your choice.| + |**Location Provider**|Select Azure from the **drop-down** Menu. | + |**Container Name** | Name of the container created in the Azure storage. | + | **Storage Name** | Name of the Azure storage resource. | + | **Stock-Keeping Unit** | Azure storage resource SKU. | + |**Tenant ID** | Azure tenant ID.| + | **Subscription ID** | Azure subscription ID where you created the Azure storage resource.| + | **Resource Group:** | Azure resource group name. | + | **Client ID** | Azure client ID of the service principal. | + | **Client Secret** | Azure client secret for the service principal you created for Palette to assume. | + + + +5. Click on the **Validate** button. Palette will display a validation status message. If the validation status message indicates a success, proceed to the next step. If the validation status message indicates an error, review the error message and verify the configurations you provided. + + +6. Click on the **Create** button. + + +You have completed configuring and adding a backup location to Palette with static credentials. + + + +### Validate + +Use the following steps to validate adding the new backup location. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to **Project Settings** and click on **Backup Locations**. + + +3. The **Backup Locations** page will display a list of all backup locations configured for the current project. + + +4. Search for the newly added backup location in the list. The presence of the backup location validates that you successfully added a new backup location. + + + + +## Next Steps + +You can now use the newly added backup location to create a backup of your clusters or workspaces. Refer to the [Create a Backup](create-cluster-backup.md) guide to learn how to create a backup of your clusters or workspaces. \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-management/backup-restore/backup-restore.md b/docs/docs-content/clusters/cluster-management/backup-restore/backup-restore.md new file mode 100644 index 0000000000..d1a3617924 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/backup-restore/backup-restore.md @@ -0,0 +1,94 @@ +--- +sidebar_label: "Backup and Restore" +title: "Backup and Restore" +description: "An overview of the cluster backup and restore concepts." +hide_table_of_contents: false +sidebar_position: 70 +tags: ["clusters", "cluster management"] +--- + + +Palette supports backup and restore capabilities for Kubernetes clusters. + +A backup is a persistent state of Kubernetes resources, ranging from objects such as Pods, DaemonSets, and Services to persistent volumes. A backup allows you to save the current state of a cluster and restore it at a later point in time if needed. You can restore a backup to the same or a different cluster. + +You can schedule a backup of a specific cluster or an entire [workspace](../../../workspace/workspace.md). You can also maintain multiple backups of a cluster or workspace. + + +## Get Started + + +To get started with creating a backup, check out the [Add a Backup Location using Static Credentials](add-backup-location-static.md) or [Add a Backup Location using Dynamic Credentials](add-backup-location-dynamic.md) guide. + + +:::info + +If you are using a workspace, refer to the [Manage Palette Workspace](/workspace/workload-features#managepaletteworkspace) guide to learn more about backup and restore actions for a workspace. + +::: + +
+ + +## What is a Backup Location? + +A backup location is an object storage, such as an AWS Simple Storage Service (S3) bucket, where you store and retrieve the backup files. Before you create a backup, the initial step is configuring a backup location. You can configure a backup location in a public cloud or a data center environment and add it in Palette. Palette supports the following object storage solutions as backup locations. + + +- Amazon Web Services (AWS) S3 bucket + + +- Google Cloud Platform (GCP) bucket + + +- MinIO S3 bucket + + +- Azure blob storage + + +
+ + +:::info + +Palette uses open-source Velero to provide backup and restore capabilities. You can learn more about Velero by checking out the Velero [Restore Reference](https://velero.io/docs/main/restore-reference/) and [Backup Reference](https://velero.io/docs/main/backup-reference/). + + +::: + + +You can add a backup location to the same cloud account you use to deploy Kubernetes clusters or use a different account. Both authentication methods require an Identity Access Management (IAM) entity in the cloud account and access credentials for the IAM entity. + + +## Backup Locations and Credentials + + +Palette uses the access credentials to authenticate itself while accessing the storage bucket. Palette supports static credentials for all cloud service providers. You can also use dynamic credentials with the backup and restore workflow. + +Review the table below to learn more about what cloud providers and credentials methods are supported. + + +|**Service Provider**|**Static Credentials Support**|**Dynamic Credentials Support**| +|---|---|---| +|AWS|✅|✅ | +|GCP|✅|❌| +|MinIO|✅|❌| +|Azure|✅|❌| + +To learn more about adding a backup location, check out the [Add a Backup Location using Static Credentials](/clusters/cluster-management/backup-restore/add-backup-location-static) or [Add a Backup Location using Dynamic Credentials](/clusters/cluster-management/backup-restore/add-backup-location-dynamic) guide. + + +## Resources + + +- [Add a Backup Location using Static Credentials](add-backup-location-static.md) + + +- [Add a Backup Location using Dynamic Credentials](add-backup-location-dynamic.md) + + +- [Create a Cluster Backup](create-cluster-backup.md) + + +- [Restore a Cluster Backup](restore-cluster-backup.md) diff --git a/docs/docs-content/clusters/cluster-management/backup-restore/create-cluster-backup.md b/docs/docs-content/clusters/cluster-management/backup-restore/create-cluster-backup.md new file mode 100644 index 0000000000..41204b242d --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/backup-restore/create-cluster-backup.md @@ -0,0 +1,223 @@ +--- +sidebar_label: 'Create Cluster Backup' +title: 'Create Cluster Backup' +description: 'Learn how to create a cluster backup to an existing backup location.' +hide_table_of_contents: false +sidebar_position: 30 +tags: ["clusters", "cluster management", "backup"] +--- + + +This guide provides instructions for how to create a cluster backup using Palette. You can refer to the cluster from where you created the backup as the *source cluster*. + +A backup operation can only back up specified namespaces, cluster-scoped resources, and Persistent Volumes (PVs) from the source cluster. The backup operation includes the source cluster profile in Palette's final backup object. + + +:::info + +Palette uses open-source Velero to provide backup and restore capabilities. You can learn more about Velero by checking out their [Restore Reference](https://velero.io/docs/main/restore-reference) and [Backup Reference](https://velero.io/docs/main/backup-reference). + + +::: + +You can schedule a cluster backup or initiate a backup on demand. You can define a backup schedule in the cluster configuration for an existing cluster or while deploying a cluster. + +Palette supports scheduling recurring backups, with the ability to customize the frequency and the time. You can also specify the backup expiry period, meaning the duration, after which Palette will delete the backup automatically. For example, you can schedule a backup for every week on Sunday at midnight and automatically expire the backup after three months. Additionally, you can initiate a backup on demand for an existing cluster. + + + +The following sections will describe the prerequisites and the detailed instructions to create a cluster backup. +You can schedule a backup or initiate a backup on demand. + +- [Schedule a Backup](#schedule-a-backup) + +- [On-demand Backup](#on-demand-backup) + +
+ +## Schedule a Backup + +Use the following instructions to schedule a backup for an existing cluster. + + +### Prerequisites + +- An available backup location in Palette. Refer to the [Add a Backup Location using Static Credentials](add-backup-location-static.md) or [Add a Backup Location using Dynamic Credentials](add-backup-location-dynamic.md). + + +- An active cluster in Palette. + +### Instructions + + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select a cluster you want to back up. Ensure the cluster status is *Healthy*. + + +4. Click on the **Settings drop-down Menu** in the top right corner, and select **Cluster Settings**. + + +5. Next, on the **Settings Menu**, select the **Schedule Backups**. The screenshot below highlights the fields for scheduling a backup. + +
+ + ![A screenshot highlighting the fields for scheduling a backup for an existing cluster.](/clusters_cluster-management_backup-restore_scheduled-backup.png) + + +6. Fill out the required input fields to schedule a backup. Refer to the table to learn more about each input field. + + |**Field**|**Description**| + |---|---| + |**Backup prefix**|Palette will generate a name automatically. Provide a prefix string you want to prepend to the auto-generated name. | + |**Select backup location**|Choose a backup location. You must configure a location before creating a backup. | + |**Backup schedule**|Create a backup schedule of your choice. You can review the scheduling options below the current table.| + |**Select period until expiry**|Select an expiry duration for the backups. Palette will delete the backup after the expiry duration.| + |**Include all disks**|Select this checkbox if you want to include all the disks in the backup.| + |**Include cluster resources**|Select the checkbox if you want Palette to back up the cluster-scoped and the namespace-scoped resources. However, if you do not select the checkbox, Palette will back up only the namespace-scoped resources.| + |**Include Namespaces** (Optional)| Palette will back up all namespaces by default. However, you can specify any namespaces you do not want backed up.| + +
+ + :::info + + In Kubernetes, there are two types of resources: cluster-scoped and namespace-scoped. + + Cluster-scoped resources, such as StorageClasses, ClusterRoles, and others, are visible and accessible to all users in the cluster, regardless of the namespaces. + + Namespace-scoped resources, like Pods, Deployments, Services, and others, belong to a specific namespace and can only be accessed by users with the necessary permissions. + + ::: + +
+ + A cluster backup supports the following scheduling options: + + * You can customize your backup to occur at a specific month, day, hour, and minute that suits your needs. + * Every week on Sunday at midnight + * Every two weeks at midnight + * Every month on the first at midnight + * Every two months on the first at midnight + * Never + + +7. Click on the **Create Backup** button. + +You now have successfully created a scheduled backup for the selected cluster. You can view the status of the backup in the **Backups** tab. + +### Validate + +Use the following steps to validate creating a backup in Palette. + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to the left **Main Menu**, and select **Clusters**. + + +3. Select the cluster from where you created the backup. Palette displays the details of the selected cluster. + + +4. Navigate to the **Backups** tab and click on the **Backups** nested tab. Palette displays a list of all available backups for the current cluster, including the newly created one. The screenshot below shows an example backup. This step validates that you have successfully created the backup. + + ![A screenshot highlighting the list of available backups for the specific cluster.](/clusters_cluster-management_backup-restore_view-backup.png) + + +5. You can click on the newly created backup from the list to view its details. Palette displays the backup name, status, creation date, expiry date, list of backed-up namespaces, and a boolean field indicating whether the backup includes all disks and cluster-scoped resources. + + +## On-demand Backup + +Use the following instructions to create an on-demand backup for an existing cluster. + + +### Prerequisites + +- An available backup location in Palette. Refer to the [Add a Backup Location using Static Credentials](add-backup-location-static.md) or [Add a Backup Location using Dynamic Credentials](add-backup-location-dynamic.md). + +### Instructions + + +- An active cluster in Palette. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select a cluster you want to back up. Ensure the cluster status is *Healthy*. + + +4. Navigate to the **Backups** tab and click on the **Create Backup** button. The screenshot below shows the popup window Palette opens to initiate an on-demand backup.

+ +
+ + ![A screenshot highlighting the fields for an on-demand backup for an existing cluster.](/clusters_cluster-management_backup-restore_ondemand-backup.png) + + + +5. Use the following information to configure a scheduled backup. + + |**Field**|**Description**| + |---|---| + |**Backup name**|Provide a name for the backup. | + |**Select backup location**|Choose a backup location. You must configure a backup location before creating a backup. Refer to the [Add a Backup Location using Static Credentials](add-backup-location-static.md) or [Add a Backup Location using Dynamic Credentials](add-backup-location-dynamic.md) guides to learn about adding a backup location using static or dynamic credentials. | + |**Select period until expiry**|Select an expiry duration for the backup. The backup will be automatically removed after the expiry duration.| + |**Include all disks**|Select this checkbox if you want to include PVs and volume snapshots in the backup.| + |**Include cluster resources**|Select the checkbox if you want to include cluster-scoped resources in the backup. If you do not select the checkbox, Palette will back up only the namespace-scoped resources.| + |**Include Namespaces** (Optional)| Palette will back up all namespaces by default. However, you can specify namespaces you do not want backed up. | + +
+ + :::info + + In Kubernetes, there are two types of resources: cluster-scoped and namespace-scoped. + + Cluster-scoped resources, such as StorageClasses, ClusterRoles, and others, are visible and accessible to all users in the cluster, regardless of the namespaces. + + Namespace-scoped resources, like Pods, Deployments, Services, and others, belong to a specific namespace and can only be accessed by users with the necessary permissions. + + ::: + +
+ + +6. Click on the **Create Backup** button. + +You now have successfully created a backup for the selected cluster. Depending on the size of the cluster, the backup process may take some time to complete. You can view the status of the backup in the **Backups** tab. + + + + + +### Validate + +Use the following steps to validate creating a backup in Palette. +
+ +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to the left **Main Menu**, and select **Clusters**. + + +3. Select the cluster from where you created the backup. Palette displays the details of the selected cluster. + + +4. Navigate to the **Backups** tab and click on the **Backups** nested tab. Palette displays a list of all available backups for the current cluster, including the newly created one. The screenshot below shows an example backup. This step validates that you have successfully created the backup. + + ![A screenshot highlighting the list of available backups for the specific cluster.](/clusters_cluster-management_backup-restore_view-backup.png) + + +5. You can click on the newly created backup from the list to view its details. Palette displays the backup name, status, creation date, expiry date, list of backed-up namespaces, and a boolean field indicating whether the backup includes all disks and cluster-scoped resources. + + +## Next Steps + +When the backup is available, you can restore it to the same or to a different cluster. Refer to the [Restore a Cluster Backup](restore-cluster-backup.md) guide to learn more about restoring a backup. diff --git a/docs/docs-content/clusters/cluster-management/backup-restore/restore-cluster-backup.md b/docs/docs-content/clusters/cluster-management/backup-restore/restore-cluster-backup.md new file mode 100644 index 0000000000..f3b5e0ee4d --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/backup-restore/restore-cluster-backup.md @@ -0,0 +1,179 @@ +--- +sidebar_label: 'Restore Cluster Backup' +title: 'Restore Cluster Backup' +description: 'Learn how to restore a cluster backup to the source or a different cluster.' +hide_table_of_contents: false +sidebar_position: 40 +tags: ["clusters", "cluster management", "backup"] +--- + +This guide provides instructions to restore a cluster backup in Palette. You can restore a backup to the same cluster you created it from or to a different cluster within the same project. The following terms are used to identify the backup source and destination. + + +- *Source cluster* - The cluster from which you created the backup. + + +- *Destination cluster* - The cluster from which you want to restore the backup. + + +Before you restore a backup, take a moment and review the storage classes in the destination cluster. The following section provides details about the storage classes and how to identify them. + + + +## Storage Class + +A [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/) is a Kubernetes resource that helps provision persistent volumes dynamically. When restoring a backup with persistent volumes, you need the storage classes in the destination cluster. Storage classes in the destination cluster must match the storage classes in the source cluster. + +
+ +### Default Storage Class +When you create a cluster profile, Palette *usually* creates a default storage class called `spectro-storage-class`. In some cases, such as when you create a cluster profile for specific cloud providers' managed Kubernetes service, Palette does not create a default storage class. For example, when you create a cluster profile for GCP GKE using the [GKE Managed GCE Persistent Disk Driver](https://docs.spectrocloud.com/integrations/gce) storage pack, Palette will not create a storage class by default. In this scenario, you can define a custom storage class in the cluster profile or create one after deploying the cluster. + +
+ +### Identify Storage Classes + +Below are different methods to identify the storage classes of your source and destination clusters. + + +You can review the cluster profile's Container Storage Interface (CSI) layer configuration YAML. If the CSI YAML defines the storage classes, you will find them within the `storageClasses` attribute. Below is an example of a CSI YAML that defines two storage classes, `spectro-storage-class` and `addon-storage-class`. + +
+ + +```yaml +storageClasses: + - name: spectro-storage-class + annotations: + storageclass.kubernetes.io/is-default-class: "true" + type: "gp2" + - name: addon-storage-class +``` + + + +Another method to identify the storage classes in the destination cluster is to use the kubectl CLI. If you have access to the destination cluster, you can issue the following command to view a list of storage classes. + + + +```bash +kubectl get storageclasses --all-namespaces +``` + +Review the output from the above command. If the output contains the storage classes you need, you can proceed with the restore operation. Otherwise, you can create the required storage classes in the destination cluster. + + +## Create Storage Classes + +If there is a mismatch between storage classes in the source and destination cluster, create the required new storage classes in the destination cluster. To define a new storage class in the destination cluster, you will need to define a [StorageClass resource](https://kubernetes.io/docs/concepts/storage/storage-classes/#the-storageclass-resource) manifest and apply it using kubectl. Alternatively, you can define the storage classes in the cluster profile through a manifest layer and apply the updates to the cluster before initiating a restore. + + + + +:::info + +A restore operation only restores the specified namespaces, cluster-scoped resources, and persistent volumes from the backup. To learn more about the restore operation, refer to the Velero [Restore Reference](https://velero.io/docs/main/restore-reference). + +::: + +### Prerequisites + +- The source cluster is available and healthy in Palette. + + :::caution + + If the source cluster is unavailable in Palette, you cannot restore its backup. + + ::: + + +- A destination cluster is available and healthy in Palette. + + +- The destination cluster must belong to the same project as the source cluster. + + +- A backup is created for the source cluster. Check out the [Create Cluster Backup](create-cluster-backup.md) for guidance on how to create a backup. + + +- Ensure the storage classes in the destination cluster match the storage classes in the source cluster. + + +- If the backup location is configured using dynamic credentials, such as the AWS Security Token Service (STS) authentication method, ensure you define a trust relationship with the destination cluster. The trust relationship enables the destination cluster to assume the necessary IAM role to access the backup files. Refer to the [Add a Backup Location using Dynamic Credentials](add-backup-location-dynamic.md) guide. + + +### Instructions + +Use the following instructions in Palette to restore a backup to a destination cluster. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Select the source cluster to view its details page. + + +4. Navigate to the **Backups** tab and click on **Backups** in the nested tabs. Palette displays a list of available backups. + + + +5. Select a backup you want to restore from the list. + + +6. Click on the **Restore Backup** button at the bottom, as highlighted in the screenshot below. + + ![A screenshot highlighting the details of a specific backup.](/clusters_cluster-management_backup-restore_restore.png) + + + +7. In the restore operation wizard, select the destination cluster where you want to restore the backup. For example, you can select the current or a different cluster. You can initiate a restore operation on any destination cluster in the same project as the source cluster. A backup does not store infrastructure-related information, such as node pools and configuration. Therefore, the destination cluster can have a different infrastructure provider than the source cluster. + + You can include specific namespaces, Persistent Volumes (PVs), and cluster-scoped resources. + + ![A screenshot highlighting the restore operation configurations.](/clusters_cluster-management_backup-restore_confirm-restore.png) + + +8. Review the restore operation configurations, and click on the **Confirm Restore** button at the bottom. + + +You now have successfully initiated a restore operation. Palette displays the status of the restore operation in the **Restores** nested tab. You can navigate to the **Events** tab to view the logs for the restore operation. + + + +### Validate + +Use the following steps to validate restoring a cluster backup. + + +1. Log in to [Palette](https://console.spectrocloud.com/). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the destination cluster where you want to restore the backup. + + +4. In the cluster details **Overview** tab, take note of the cluster's **Last Modified** field. The cluster's last modified timestamp will be updated to the time when you initiated the restore operation. + + +5. Navigate to the **Backups** tab and click on **Restores** in the nested tabs. Palette displays the status, restore timestamp, source cluster, and the backup name for each restore operation you performed for the current cluster. The screenshot below shows an example restore operation. + + ![A screenshot highlighting the restoration status for the destination cluster.](/clusters_cluster-management_backup-restore_verify-restore.png) + + You restored the backup successfully when the backup status displays *Completed*. + + :::info + + Remember, a backup does not include the cluster profile of the source cluster. Therefore, the restore operation will not change the cluster profile of the destination cluster. + + ::: + + +6. To review the backup logs, navigate to the **Events** tab. + + +7. Examine the logs. Each log contains a status message. When the restore operation is complete, all the namespace-scoped resources contain your desired backed-up data. \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-management/certificate-management.md b/docs/docs-content/clusters/cluster-management/certificate-management.md new file mode 100644 index 0000000000..a8bec905ec --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/certificate-management.md @@ -0,0 +1,405 @@ +--- +sidebar_label: "Certificate Management" +title: "Certificate Management" +description: "Learn how to manage the cluster certificates through Palette." +hide_table_of_contents: false +sidebar_position: 50 +tags: ["clusters", "cluster management"] +--- + + +Palette installs Kubernetes through the tool, [kubeadm](https://kubernetes.io/docs/reference/setup-tools/kubeadm). As a result, all deployed clusters include auto-generated Public Key Infrastructure (PKI) certificates created by kubeadm. We recommend you review the [PKI certificates and requirement](https://kubernetes.io/docs/setup/best-practices/certificates) Kubernetes documentation to learn more about the auto-generated certificates and to better understand their purpose. + +This reference page focuses on how to renew the PKI certificates through Palette. You have two options for how you can renew the cluster PKI certificates: + +
+ + * Automatic Certificate Renewal + + + * Manual Certificate Renewal + + + +:::info + +Certificates created by kubeadm expire after 365 days. The Root Certificate Authority (CA) is valid for 3652 days or 10 years. + +::: + + +You can learn more about each option in the following sections. + + + +## Automatic Certificate Renewal + +When you upgrade the control plane on a cluster, the PKI certificates are automatically updated. Upgrading a cluster's Kubernetes version, whether a minor patch or a major release, results in renewed PKI certificates. This is the method that requires the least user actions when it comes to renewing PKI certificates. We recommend regularly updating your clusters to stay current with security fixes and best practices. By keeping your host cluster updated, you prevent the scenario of PKI certificates from reaching their one-year expiration date. + +
+ +:::info + +You can upgrade the Kubernetes version of a cluster by updating the Kubernetes layer of the [cluster profile](/cluster-profiles) and applying the cluster profile update to the cluster. For guidelines on updating pack versions, review [Update the Pack Version](../../cluster-profiles/task-update-profile.md#updatethepackversion). + +::: + + +## Manual Certificate Renewal + +You can renew the cluster PKI certificates on-demand using the Palette API or the Palette User Interface (UI). When you manually trigger a certificate renewal action, it results in cluster nodes becoming repaved. Palette will scale up the cluster control plane nodes count and deploy new nodes with new PKI certificates auto-generated by kubeadm. Once the new control plane nodes are active and available, Palette will remove the previous control plane nodes. The worker nodes renew once the control plane nodes are updated and available. + +
+ +:::caution + +A manual renewal of cluster PKI certificates results in all cluster nodes becoming repaved. + +::: + + +Review the following sections to learn more about manually renewing the cluster PKI certificates. Choose your preferred update method, using the Palette UI or the API. + + + +
+ + +Use the following steps to update the cluster PKI certificates through the Palette UI. + +## UI + +### Prerequisites + +- A deployed host cluster. + + +- Access to the host cluster. + +### Renew Cluster Certificate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the host cluster to renew its PKI certificates. + + +4. From the cluster details page, click on **View K8s Certificates**. + +
+ + ![A view of the Palette UI with an arrow pointing to the **View K8s Certificates** button.](/clusters_cluster-management_certificate-management_cluster-details-page.png) + + +5. Next, select **Renew All** to start the renewal process. + +
+ + ![A view of the cluster certificates displaying the expiration date](/clusters_cluster-management_certificate-management_certificate-renew-page.png) + +The renewal process may take several minutes, depending on the number of cluster nodes. + +### Validate + +Using the following steps, you can validate that the cluster's PKI certificates were renewed. + +
+ +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the host cluster with the renewed PKI certificates. + + +4. From the cluster details page, click on **View K8s Certificates**. + + +5. Review the expiration date for each component. Each component's expiration date will have a status of **365d** with a date that is one year away. + + +## API + +Use the following steps to update the cluster PKI certificates through the Palette API. + +### Prerequisites + +- A Palette API key. You can learn how to create a Palette API key by reviewing the [API Key](../../user-management/user-authentication.md) documentation. + + +- A deployed host cluster. + + +- Access to the host cluster. + + +### Renew Cluster Certificate + + +1. Set your Palette API key as an environment variable. Add your actual API key in place of `REPLACE_ME`. + +
+ + ```shell + export API_KEY=REPLACE_ME + ``` + + +2. Set the project ID as an environment variable. Add your project ID in place of `REPLACE_ME`. You can find the project ID on the Palette landing page. The project ID is displayed in the top right corner of the page. + +
+ + ```shell + export PROJECT_ID=REPLACE_ME + ``` + + + + +3. Set the cluster ID as an environment variable. Add your cluster's ID in place of `REPLACE_ME`. You can get the cluster ID from the cluster detail's page URL. The value after `clusters/` is the cluster ID. + +
+ + ```shell + export CLUSTER_ID=REPLACE_ME + ``` + + + +4. Use the Palette API endpoint `https://api.spectrocloud.com/v1/spectroclusters/{uid}/k8certificates/renew` to renew a cluster's PKI certificates. The endpoint accepts the HTTP method `PATCH`, and the only required parameter is the cluster ID. + +
+ + ```shell + curl --request PATCH \ + --url 'https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID/k8certificates/renew' \ + --header 'Content-Type: application/json' \ + --header 'Accept: application/json' \ + --header "ApiKey: $API_KEY" \ + --header "ProjectUid: $PROJECT_ID" + ``` + +3. No output is returned and an HTTP status `204` is expected. + + + + +The renewal process may take several minutes, depending on the number of cluster nodes. + +### Validate + + +Using the following steps, you can validate that the cluster's PKI certificates were renewed. + +
+ + +1. Set your Palette API key as an environment variable. Add your actual API key in place of `REPLACE_ME`. + +
+ + ```shell + export API_KEY=REPLACE_ME + ``` + + +2. Set the project ID as an environment variable. Add your project ID in place of `REPLACE_ME`. + +
+ + ```shell + export PROJECT_ID=REPLACE_ME + ``` + + + +3. Set the cluster ID as an environment variable. Add your cluster's ID in place of `REPLACE_ME`. + +
+ + ```shell + export CLUSTER_ID=REPLACE_ME + ``` + + + +4. Retrieve the cluster's certificate information from Palette by using the `https://api.spectrocloud.com/v1/spectroclusters/{uid}/k8certificates` endpoint. + +
+ + ``` + curl \ + --url 'https://api.spectrocloud.com/v1/spectroclusters/$CLUSTER_ID/k8certificates' \ + --header 'Content-Type: application/json' \ + --header 'Accept: application/json' \ + --header "ApiKey: $API_KEY" \ + --header "ProjectUid: $PROJECT_ID" + ``` + + +5. Validate the output and confirm the expiration date is one year away. + +
+ + ```json hideClipboard + { + "machineCertificates": [ + { + "certificateAuthorities": [ + { + "certificates": [ + { + "expiry": "2024-05-23T16:51:05.000Z", + "name": "front-proxy-client" + } + ], + "expiry": "2033-05-23T16:45:22.209Z", + "name": "front-proxy-ca" + }, + { + "certificates": [ + { + "expiry": "2024-05-23T16:51:05.000Z", + "name": "kube-apiserver" + }, + { + "expiry": "2024-05-23T16:51:05.000Z", + "name": "kube-apiserver-kubelet-client" + } + ], + "expiry": "2033-05-23T16:45:22.209Z", + "name": "ca" + }, + { + "certificates": [ + { + "expiry": "2024-05-23T16:51:05.000Z", + "name": "kube-apiserver-etcd-client" + }, + { + "expiry": "2024-05-23T16:51:05.000Z", + "name": "kube-etcd-healthcheck-client" + }, + { + "expiry": "2024-05-23T16:51:05.000Z", + "name": "kube-etcd-peer" + }, + { + "expiry": "2024-05-23T16:51:05.000Z", + "name": "kube-etcd-server" + } + ], + "expiry": "2033-05-23T16:45:22.209Z", + "name": "etcd-ca" + } + ], + "name": "ip-10-0-1-120.ec2.internal" + } + ] + } + ``` + + +
+ +## Advanced - Only Renew Control Plane Nodes + +You can configure Palette to only renew the PKI certificates for the control plane nodes. You can achieve this by using the annotation `spectrocloud.com/cert-renew-controlplane-only` and setting the value to `true`. To enable this behavior, you must use `kubectl` and apply the update to a Custom Resource Definition (CRD) created by Palette during the cluster deployment process. + +Use the following steps to configure Palette only to renew the certificates for control plane nodes. + + +### Prerequisites + +- Kubectl is installed in your system. + + +- A host cluster deployed. + + +- Access to the host cluster's kubeconfig file. Refer to the [Access Cluster with CLI](palette-webctl.md) guide to learn how to use your cluster's kubeconfig file. + + +### Configure Cluster + + +1. Set your cluster name as an environment variable. Add your cluster's name in place of `REPLACE_ME`. + +
+ + ```shell + export CLUSTER_NAME=REPLACE_ME + ``` + +1. Use the following command to retrieve the namespace of the CRD Palette created in your cluster. + +
+ + ```shell + namespace=$(kubectl get spc --all-namespaces --output jsonpath='{range .items[?(@.metadata.name=="'"$CLUSTER_NAME"'")]}{.metadata.namespace}{"\n"}{end}') + ``` + + +2. Use `kubectl` to update the CRD to include the `spectrocloud.com/cert-renew-controlplane-only` annotation. + +
+ + ```shell + kubectl annotate spc/certificate-renew --namespace $namespace spectrocloud.com/cert-renew-controlplane-only="true" + ``` + +3. Verify the annotation was set correctly with the command below. The expected output is `true`. + +
+ + ```shell + kubectl get spc/$CLUSTER_NAME --namespace $(kubectl get spc --all-namespaces --output jsonpath='{range .items[?(@.metadata.name=="'"$CLUSTER_NAME"'")]}{.metadata.namespace}{"\n"}{end}') --output jsonpath='{.metadata.annotations.spectrocloud\.com/cert-renew-controlplane-only}' + ``` + + Output + ``` + true + ``` + + +4. Next, trigger a certificate renewal using either [Automatic Certificate Renewal](#automatic-certificate-renewal) or [Manual Certificate Renewal](#manual-certificate-renewal). + + + +The renewal process may take several minutes, depending on the number of cluster nodes. + +### Validate + +Using the following steps, you can validate that the cluster's PKI certificates are renewed only for the control plane nodes. + + +
+ +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the host cluster to renew its PKI certificates. + + +4. From the cluster details page, click on **View K8s Certificates**. + + +5. Review the expiration date for each component. Each component's expiration date will have a status of **365d** with a date that is one year away. + + +6. Navigate to the **Nodes** tab and verify the **Worker Pool** nodes' **Age** is not updated recently. + +
+ + ![View of the cluster nodes where only the control plane nodes are modified](/clusters_cluster-management_certificate-management_control-plane-only-change.png) + + +
\ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-management/cloud-cost.md b/docs/docs-content/clusters/cluster-management/cloud-cost.md new file mode 100644 index 0000000000..8be8c8bdea --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/cloud-cost.md @@ -0,0 +1,73 @@ +--- +sidebar_label: "Cost Visibility" +title: "Cost Visibility" +description: "Calculate Cloud Cost in Palette" +hide_table_of_contents: false +sidebar_position: 80 +tags: ["clusters", "cluster management", "cost management"] +--- + +Palette calculates estimated cloud cost for workload clusters based on the rate of the instance type used for cluster node pools and usage cost breakdown by namespaces based on actual resource utilization within the namespace. + +## Cloud Cost + +Cluster cloud cost is the sum of the estimated cost of all the nodes launched in the cluster. The cost calculation is done based on the instance type and storage type selected for each machine pool. + +| |**FORMULAS FOR CALCULATION**| +|--|--------------| +| |Machine Pool Cost = ( Number of Nodes X Instance Price ) + ( Storage Size X Storage Price )| +| |Cluster Cloud Cost = Master Pool Cost + Worker Pool Cost| + +**Example 1:** + +Let's assume that a cluster ‘demo’ is launched with two machine pools with the following configuration: + +|MACHINE POOL|SIZE | INSTANCE TYPE WITH COST|ROOT DISK WITH COST| +|--|-----|---|----| +|MASTER POOL|3|AWS t2.medium($0.0496/hour)|60GB - gp2($0.00014/GB/hour)| +|WORKER POOL|3|AWS t2.large($0.0992/hour)|60GB - gp2($0.00014/GB/hour)| + +|Calculation for the above scenario| +|----------| +|master-pool cost = ( 3 X $0.0496 ) + ( 60 X $0.00014 ) = $0.1572/hour| +|worker-pool cost = ( 3 X $0.0992 ) + ( 60 X $0.00014 ) = $0.306/hour| +|Cluster Cloud Cost = $0.1572 + $0.306 = $0.4632/hour| + +:::info +For private clouds like VMware, OpenStack, MAAS, etc., the unit rate for CPU and Memory can be configured as an administrative setting. These rates are used in place of instance-type rates for cost calculation. +::: + + +## Usage Cost +Usage cost is calculated based on the pods' actual CPU & Memory usage, including the claimed PVC storage size. The pod cost calculation is done by dividing the instance type rate into CPU and memory rates proportional to the instance type category. + +|Instance Type Category| CPU: Memory | +|--|--| +|General Purpose|65% : 35%| +|Compute Optimized|65% : 35%| +|Memory-Optimized|25% : 75%| + +|**FORMULAS FOR CALCULATION** || +|--|--------------| +|Pod CPU Cost = (CPU Proportion x Instance Rate ) x Pod CPU Usage| +|Pod Memory Cost = (Memory Proportion x Instance Rate) x Pod Memory Usage| +|Pod Storage Cost = PVC Storage Size x Storage Rate| +|Pod Cost = Pod CPU Cost + Pod Memory Cost + Pod Storage Cost| + +**Example 2** + +For the cluster configuration of master-pool & worker-pool considers in example 1, + +|Calculation for the example scenario| +|----------| +|Pod CPU usage = 200m, Pod Memory Usage = 200MB, Pod Storage Size = 10GB| +|Pod CPU Cost = ( 65% * $0.0992 ) * 200m = 0.06448 * 0.2 = $0.012896/hour| +|Pod Memory Cost = ( 35% * $0.0992 ) * 200MB = 0.03472 * 0.2GB = $0.006944/hour| +|Pod Storage Cost = 10GB * $0.00014 = $0.0014/hour| +|Pod Cost = $0.012896 + $0.006944 + $0.0014 = $0.02124/hour| + + +:::info + Cluster costs are calculated for all cluster types (new and existing) across all cloud types (public and private) +::: + diff --git a/docs/docs-content/clusters/cluster-management/cluster-management.md b/docs/docs-content/clusters/cluster-management/cluster-management.md new file mode 100644 index 0000000000..3b39bc3f28 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/cluster-management.md @@ -0,0 +1,77 @@ +--- +sidebar_label: "Manage Clusters" +title: "Manage Clusters" +description: "Events and Notifications on Cluster Updates" +hide_table_of_contents: false +sidebar_custom_props: + icon: "envelope-open-text" +tags: ["clusters", "cluster management"] +--- + +Palette supports several Day-2 operations to manage the end-to-end lifecycle of Kubernetes clusters launched through Palette. It also provides several capabilities across new and imported clusters to perform ongoing management operations like backup/restore and to keep your clusters secure, compliant, and up to date. Additionally, Palette gives you visibility into the workloads running inside your cluster and cluster costs. + + +The following sections describe these capabilities in detail: + +* [Reconfigure](reconfigure.md) - Scale your clusters up/down by adding/reducing the number of nodes in a node pool and adding additional worker pools. Resize nodes in a node pool by modifying the node specs (CPU, Memory, or Instance Type for public clouds). Add additional fault domains such as availability zones to a node pool. + + +* [Updates](cluster-updates.md) - Upgrade core packs (OS, Kubernetes, CSI, CNI) and add-on layers, such as Monitoring and Security. + + + +* [Cluster Health Alerts](health-alerts.md) - Palette monitors the health of all workload clusters and raises an alert when the cluster goes to an unhealthy state. Besides displaying the alert on the UI console, Palette provides the ability to have these alerts pushed out to a variety of channels. Users can set up email alerts to receive an email when the health status of their cluster changes. + + + +* [Certificate Management](certificate-management.md) - You can renew cluster certificates on-demand or leverage the automatic cluster update process to handle certificate renewal operations. + + +* [Cluster Monitoring](monitoring/deploy-monitor-stack.md) - Monitor your cluster resources by collecting and reviewing metrics. + + +* [Compliance Scans](compliance-scan.md) - Perform continuous compliance checks to ensure your clusters are secure and compliant. + + + +* [OS Patching](os-patching.md) - Automatically apply the most recent security patches to cluster nodes to stay up to date with the latest OS patches. + + +* [Backup and Restore](backup-restore/backup-restore.md) - Regularly back up your cluster configurations and any persistent volumes that your applications use. Choose critical namespaces you would like to back up. Restore as required to new or existing clusters. + + + +* [Cost Visibility](cloud-cost.md) - Get visibility into the estimated cloud cost for the cluster based on cluster node configuration. Get additional insights into per namespace cost (Usage Cost) calculated based on the number of resources consumed within the namespace. + + +* [Workload Visibility](workloads.md) - Palette provides visibility into the resources running inside workload clusters. These resources are displayed on the cluster details page. + + +* [Node Labels and Taints](taints.md) - You can constrain a pod to run only on a particular set of nodes. There are several ways to do this. Common approaches, such as nodeSelector and node affinity, use labels to facilitate the selection. Taints allow a node to repel a set of pods for appropriate pod allocation to node pools. + + + +* [RBAC and NS Support](cluster-rbac.md) - RoleBindings and ClusterRoleBindings are Role-Based Access Control (RBAC) concepts that allow granular control over cluster-wide resources as well as namespace resources. Palette provides the ability to specify these bindings to configure granular RBAC rules. Palette can also define new namespaces for the cluster and manage the namespaces, such as removing them and assigning quotas and role bindings to them. + + +* [Namespace Management](namespace-management.md) - use Kubernetes namespaces to partition resources among multiple users without having to set up multiple physical clusters, configure Role-Based Access Control (RBAC) based on namespaces, and more. + + + +* [Add-on Pack Status and Monitoring](pack-monitoring.md) - Palette displays the status and installation progress of add-on packs associated with the cluster you are monitoring. Pack status appears gray during initial onboarding and before deployment, blue when the pack is in the process of being installed, and green to indicate successful installation. Red indicates a failed installation and requires attention. + + +* [Kubectl](palette-webctl.md) - Learn how to use `kubectl` to interact with your host clusters. + + + +* [Platform Management](palette-lock-cluster.md) - Palette supports the Cluster(s) Management feature to exclude a cluster or a group of clusters from getting upgraded when Palette is upgraded. + + +* [NOC UI](palette-lock-cluster.md) - Palette provides Intuitive UI-based location monitoring for the clusters running at multiple locations. For public cloud clusters Palette displays the `Region` set during the cluster creation process and displays the location on the UI Map. For private cloud clusters the user can set the location through the Palette UI. The user can monitor the location details of all the clusters running under a specific scope. + +* [Palette Access Control](cluster-tag-filter/cluster-tag-filter.md) - Palette provides the ability to manage user and role access privileges through tags. This feature helps you reduce the overhead in managing user and role access to clusters by assigning tags. Tags can be used to group clusters, allowing you to apply access controls to the tag rather than to each cluster, user, or role. This reduces the overhead of managing access controls for individual users and clusters. + + + +* [Image Swap](image-swap.md) - Learn how to use image swap capabilities with Palette. diff --git a/docs/docs-content/clusters/cluster-management/cluster-rbac.md b/docs/docs-content/clusters/cluster-management/cluster-rbac.md new file mode 100644 index 0000000000..7e0b911f1c --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/cluster-rbac.md @@ -0,0 +1,233 @@ +--- +sidebar_label: "RBAC and Namespace Support" +title: "RBAC and Namespace Support" +description: "Cluster Level RBAC and NS Support for Access Control" +hide_table_of_contents: false +sidebar_position: 110 +tags: ["clusters", "cluster management"] +--- + + +[*RoleBindings*](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) and *ClusterRoleBindings* are Role-Based Access Control (RBAC) concepts that allow granular control over cluster-wide resources. Palette provides you the ability to specify bindings to configure granular RBAC rules. + +You can configure namespaces and RBAC from within a cluster or from a [Palette Workspace](../../workspace/workspace.md) that contains a collection of like clusters that need to be managed as a group. If a host cluster is part of a Palette workspace, then all roleBindings must occur at the namespace level. + +As you review RBAC support, use the following definitions: + +- **Role** An entity that is assigned a set of access permissions within a namespace. Roles require the assignment of a Kubernetes namespace. + +
+ + + ```yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + namespace: default + name: pod-reader + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] + ``` + +- **Cluster Role** An entity that is assigned a set of access permissions scoped to the cluster and all of its Kubernetes namespaces. ClusterRoles do not have a namespace assigned. + +
+ + ```yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: secret-reader + rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] + ``` + +- **RoleBinding** associates a subject with a role. A subject can be a user, a group, or a [*ServiceAccount*](https://kubernetes.io/docs/concepts/security/service-accounts/). Role binding is used to grant permissions to a subject. Role and RoleBinding are used to scope a subject to a specific Kubernetes namespace. + +
+ + ```yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: read-pods + namespace: default + subjects: + - kind: User + name: jane + apiGroup: rbac.authorization.k8s.io + roleRef: + kind: Role + name: pod-reader + apiGroup: rbac.authorization.k8s.io + ``` + +- **ClusterRoleBinding** associates a subject with a ClusterRole. A subject can be a user, a group, or a [*ServiceAccount*](https://kubernetes.io/docs/concepts/security/service-accounts/). A ClusterRoleBinding is used to grant permissions to a subject. ClusterRole and ClusterRoleBinding are used to scope a subject's access to the cluster which includes all the Kubernetes namespaces inside the cluster. + + +There are many reasons why you may want to create roles and assign permissions to different users or groups. Below are a few common scenarios. + +* Use Role and a RoleBinding to scope security to a single Kubernetes namespace. +* Use Role and a RoleBinding to scope security to several Kubernetes namespaces. +* Use ClusterRole and ClusterRoleBinding to scope security to all namespaces. + + +
+ +:::caution + +Palette does not provide a way for roles to be configured natively through its platform. You can create roles by using a manifest layer in the cluster profile. RBAC management only allows you to specify role bindings. + +::: + + +Use the steps below to create a RoleBinding or ClusterRoleBinding for your host clusters. + + +
+ +## Palette Roles and Kubernetes Roles + +Palette offers a set of [default roles](../../user-management/palette-rbac/palette-rbac.md#assign-palette-specific-roles-to-users) you can assign to your users. The Palette roles are only in scope at the platform level. This means you can manage the permissions for users' actions in Palette, such as creating or deleting clusters, creating projects, creating users, and more. + +The Kubernetes roles are used to control the actions users are allowed to do inside the cluster. For example, a user in Palette could have the *Cluster Profile Viewer* role, which grants them the ability to view cluster profiles for a specific project. In all the clusters in this project, the user could be assigned a role binding to a custom role that grants them administrative access in all the clusters. + +In summary, using Palette roles allows you to control what actions users can do in Palette. Use Kubernetes roles to control users' actions inside a host cluster. + +
+ +:::caution + +Palette roles do not automatically map to a Kubernetes role. You must create a role binding for a specific user or group of users. + +::: + +## Create Role Bindings + +### Prerequisites + +To create a role binding the role must exist inside the host cluster. You can use any of the [default cluster roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) provided by Kubernetes. The alternative to default cluster roles is to create a role by using a manifest in the cluster profile. + +If you have OpenID Connect (OIDC) configured at the Kubernetes layer of your cluster profile, you can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](../../integrations/kubernetes-generic.md#configure-custom-oidc). + +### Enablement + +You can create role bindings during the cluster creation process or after the host cluster is deployed. + +For a new cluster, you can modify the cluster settings at the end of the cluster creation process. RBAC is one of the cluster settings you can modify. Select **RBAC** from the left **Settings Menu**. + + +![A view of the cluster settings page when creating a cluster](/clusters_cluster-management_cluster-rbac_cluster-creation-settings.png) + +To create or modify a role binding for an active cluster. Navigate to the cluster details page and click on **Settings**. Select **RBAC** from the left **Settings Menu**. + +![A view of the cluster settings page for an active cluster](/clusters_cluster-management_cluster-rbac_cluster-settings.png) + + +The RBAC settings view contains two tabs: + +* **Cluster**: Use this tab to create a ClusterRoleBinding. +* **Namespaces**: Use this tab to create a RoleBinding within Kubernetes namespaces. + +Select the tab for your specific role scope to learn how to create the appropriate role binding. + + + + +1. From the cluster settings view, select the **RBAC** tab. + + +2. Click on **Add New Binding**. + + +3. Fill out the following details: + + * Role Name: Define a custom role name to identify the cluster role. + * Subjects: Subjects are a group of users, services, or teams using the Kubernetes API. + * Subject Name: Custom name to identify a subject. + + +:::info + +In Kubernetes, a role binding connects a user or group with a set of permissions called a Role. The Role can be in the same namespace as the RoleBinding. If you want to give a role access to all the namespaces in your cluster, use a ClusterRoleBinding. + +::: + +4. Click on **Confirm** to save your changes. + +A ClusterRoleBinding will be created in your host cluster. Keep in mind that you can assign multiple subjects to a ClusterRoleBinding. + + + + + +1. From the cluster settings view, select the **RBAC** tab. + + +2. Click on **Add New Binding**. + + +3. Add the namespace name or provide a regular expression to automatically apply the following settings to other namespaces in the future. Example: `/^web-app/`. Click on **Add To List**. + + +4. Allocate resources to the selected namespace. You can allocate the maximum CPU and Memory the role is allowed to consume from the listed namespaces. + + +5. Click on **Add New Binding**. + + +6. Fill out the following details: + * Namespace: Select the namespace. + * Role Type: The type of role. You can specify either a role or a cluster role. + * Role Name: Define a custom role name to identify the cluster role. + * Subjects: Subjects are a group of users, services, or teams using the Kubernetes API. + * Subject Name: Custom name to identify a subject. + + +:::info + +In Kubernetes, a role binding connects a user or group with a set of permissions called a Role. The Role can be in the same namespace as the RoleBinding. If you want to give a role access to all the namespaces in your cluster, use a ClusterRoleBinding. + +::: + + +A role binding will be created in the listed namespaces. Keep in mind that you can assign multiple subjects to a RoleBinding or ClusterRoleBinding. + + + + + +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the cluster you created the role binding in to view its details page. + + +4. Download the **kubeconfig** file for the cluster or use the web shell to access the host cluster. + + +5. Use the following commands to review details about the role and to ensure the role binding was successful. + + +#### Cluster Role: + +```shell +kubectl get clusterrole --output yaml +``` + + +#### Role + +```shell +kubectl get role --namespace --show-kind --export +``` + diff --git a/docs/docs-content/clusters/cluster-management/cluster-tag-filter/_category_.json b/docs/docs-content/clusters/cluster-management/cluster-tag-filter/_category_.json new file mode 100644 index 0000000000..70a2b90cd7 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/cluster-tag-filter/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 200 +} diff --git a/docs/docs-content/clusters/cluster-management/cluster-tag-filter/cluster-tag-filter.md b/docs/docs-content/clusters/cluster-management/cluster-tag-filter/cluster-tag-filter.md new file mode 100644 index 0000000000..0e852e21f5 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/cluster-tag-filter/cluster-tag-filter.md @@ -0,0 +1,34 @@ +--- +sidebar_label: "Cluster Access Control" +title: "Cluster Access Control" +description: "Learn how to manage and administer access control to clusters through tags." +hide_table_of_contents: false +tags: ["clusters", "cluster management", "filter"] +--- + +Palette provides the ability to manage user and role access privileges through tags. This feature helps you reduce the overhead in managing users' and roles' access to clusters by assigning tags. Tags can be used to group clusters, allowing you to apply access controls to the tag rather than to each cluster, user, or role, reducing the overhead of managing access controls for individual users and clusters. + +To get started with an attribute access control through tags, check out the [Create Resource Filter](create-add-filter.md) guide. + + +## Resources + +- [Cluster Resource Filter](create-add-filter.md) + +- [Create Resource Filter](create-add-filter.md#createresourcefilter) + +- [Add Resource Role](create-add-filter.md#addresourcerole) + + +- [Palette Resource Roles](../../../user-management/palette-rbac/resource-scope-roles-permissions.md) + +- [Palette Global Resource Roles](../../../user-management/palette-rbac/resource-scope-roles-permissions.md#paletteglobalresourceroles) + +- [Palette Custom Resource Roles](../../../user-management/palette-rbac/resource-scope-roles-permissions.md#palettecustomresourceroles) + +- [Create Custom Role](../../../user-management/new-user.md#createcustomrole) + +- [Create New User in Palette](../../../user-management/new-user.md#createanewuser) + + + diff --git a/docs/docs-content/clusters/cluster-management/cluster-tag-filter/create-add-filter.md b/docs/docs-content/clusters/cluster-management/cluster-tag-filter/create-add-filter.md new file mode 100644 index 0000000000..2fcc4f7dec --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/cluster-tag-filter/create-add-filter.md @@ -0,0 +1,108 @@ +--- +sidebar_label: "Cluster Resource Filter" +title: "Cluster Resource Filter" +description: "Create and Add Cluster Resource Filter" +hide_table_of_contents: false +tags: ["clusters", "cluster management", "filters"] +--- + +The page guides you on how to create a Palette Resource Filter and add these filters to the users to establish cluster access restrictions. + +## Create Resource Filter + +You must create a Resource Filter in Palette to establish user-based access restrictions to clusters across multiple projects. The resource filters are created under the scope of Tenant Admin. To create a resource filter, follow the steps below: + +
+ +1. Log in to Palette as **Tenant Admin** and go to **Tenant Settings** from the left **Main Menu**. + + +2. Select **Filters** tab and click **+New Resource Filter**. + + +3. To the **Create New Filter** wizard give the following information: + * Filter Name: A custom name for the tag filter. + * A filter expression. Use the following table to familiarize yourself with the filter expression format: + + |Conjunction| Property| Operator| Tag-Value| + |-------|-----|---------|------------------| + |and | Tag | is | Custom tag value| + |or | Tag | is | Custom tag value| + |and | Tag | is not | Custom tag value| + |or | Tag | is not | Custom tag value| + +4. Click the **Confirm** button to complete the filter creation wizard. + +**Note:** The tags are case-sensitive. + +### Validate + +Upon creating a filter, a display message will pop up to confirm the successful creation of the tag. You can also use the following steps to review the filter is available for use. + +1. Navigate to the left **Main Menu** and click on **Tentant Settings**. + +2. Access the **Manage Filters** page to find the filter name listed. + +3. You can **Edit** and **Delete** filters by clicking on the **three-dot Menu** at the end of the row. + +## Add Resource Role + +You can assign the resource filter created, in combination with roles, to a [user](../../../user-management/new-user.md#createanewuser) to enforce access restriction. Palette provisions two types of roles: + +* [Palette Global Roles](../../..//user-management/palette-rbac/resource-scope-roles-permissions.md#paletteglobalresourceroles), the set of roles that are available in Palette console + +* [Custom Resource Roles](../../..//user-management/palette-rbac/resource-scope-roles-permissions.md#palettecustomresourceroles), can be generated according to your requirements from the available set of permissions and operations. + +### Prerequisites + +* A [Palette account](https://console.spectrocloud.com) with Tenant scope privileges. + +* A [user created](../../../user-management/new-user.md#createanewuser) to assign the resource privileges. + +To assign the resource roles and filter to the user follow the below steps: +
+ +1. Log in to Palette as Tenant Admin + + +2. Select the user to be assigned with a role from the **Users & Teams** from the left **Main Menu** to go to **User Details**. + + +3. From the user details wizard, select **Resource Roles** Tab and click **+ New Resource Role**. + + +4. In the **Add Roles to User** wizard, enter the following details: + * **Projects**: The projects to which the user is assigned. + * **Filers**: Select the filters to be assigned from the drop-down. The Filters created will be displayed in the drop-down menu. + * Select the **check box** to assign the roles to the user from the list displayed. These are Palette built-in roles. + + +5. Click **Confirm** to complete the Add Role wizard. + +## Remove or Edit the Role + +To remove or edit an attached role: + +1. Log in to Palette as Tenant Admin + + +2. From the left **Main Menu** click on **Users & Teams**. This will take you to the **User Details** page. + + +3. From the **Resource Roles** tab, click the **three-dot** menu towards the role name. + + +4. Click **Edit** or **Remove** option from the drop-down menu. + +### Validate + +Upon creating a filter, a display message will pop up to confirm the successful role assignment. You can also use the following steps to review the roles created: + +1. Navigate to the left **Main Menu** and click on **Clusters**. + +2. This page will list all the clusters to which the user has access based on the filter created. You need to switch to each project and view the accessible clusters. + +## Resource + +* [Create a New User](../../..//user-management/new-user.md#createanewuser) + diff --git a/docs/docs-content/clusters/cluster-management/cluster-updates.md b/docs/docs-content/clusters/cluster-management/cluster-updates.md new file mode 100644 index 0000000000..ff65c9b123 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/cluster-updates.md @@ -0,0 +1,80 @@ +--- +sidebar_label: "Cluster Updates" +title: "Cluster Updates" +description: "Events and Notifications on Cluster Updates" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["clusters", "cluster management"] +--- + + +Palette supports various kinds of updates to running clusters, such as upgrade of Kubernetes version, upgrade of add-on versions, the addition of new add-ons, removing existing ones, etc. Based on the nature of the change, one of the following two mechanisms can be used to apply cluster updates to the cluster. + +## Cluster Profile Updates + +Fundamental changes to the cluster’s definition, such as upgrading Kubernetes versions, installing new packs, uninstalling previously installed packs, and updating default pack configuration, are initiated through the cluster profile. These changes result in update notifications on all the clusters that are instantiated from the cluster profile. Update notifications consist of detailed information about all the changes applied to the profile since the initial installation or the previous update. In addition, users can update the clusters individually at an appropriate time. + +:::info + +Cluster Profile Changes will not be displayed or allowed on clusters when the cluster is provisioning and all worker node additions are completed. This is done to prevent the Kubernetes clusters from becoming unstable and transitioning into an unrecoverable state due to the changes in core components. +::: + +![Cluster Notification - Update Available](/cluster_list_update_available.png) + +Updates to pack configuration might result in a conflict if the configuration was previously overridden within the cluster. These conflicts are presented to the user and need to be resolved before changes can be applied to the cluster. + + +![Cluster Update Details](/cluster_update_available_detail.png) + + + +## Instructions + +* Navigate to the cluster profiles page and choose the profile to be updated. +* Make the desired changes. These include add/delete layers, change pack version, change pack values, etc. Save your changes. +* On the Clusters page, observe the ‘Updates Available’ tag on every previously launched cluster using the updated cluster profile. +* Click on one of the clusters to be updated to invoke the cluster details page. +* An update notification in the form of a button called ‘Updates Available’ can be seen on the right top of the screen. Click the button to open the update notifications dialog. +* A notification is created for each change made to the profile. Review all notifications. Depending on the nature of the change, additional action might be required for certain notifications. There are typical scenarios where the settings or attached manifests for a pack are directly updated on the cluster, resulting in a conflict with the new incoming changes from the profile. The updated profile settings and modified cluster settings are shown side by side for such cases, with the differences highlighted. Resolve all of the conflicts. When there has been no update to the pack settings or manifests, the incoming changes from the profile are automatically merged. A side-by-side comparison between the original and merged cluster settings is still displayed in such cases for review purposes. However, users may choose to customize settings from this dialog further. +* Once all the notifications are reviewed and conflicts, if any, are resolved, confirm updates to apply changes to the cluster. +* The system starts the update process in a few seconds. Depending upon the nature of the change, a rolling update nodes of the clusters may take place. The detailed status of the upgrade is made available at UI. +* Repeat this process for other clusters to be upgraded. + + +### Examples - Update Notifications + +|**Update Type** |**Description**|**Notification Example** | +|:---------------|:---------|:-----------------------| +Pack Version Upgrade |The existing pack version is upgraded to a different version in the cluster profile |Kubernetes version is updated 1.18.16 > 1.20.0| +|Pack Values Update |The existing pack values are updated in the cluster profile |Kubernetes 1.20.0 values are updated| +|Add Pack|Add a new pack to the cluster profile |New Kibana 7.2.4 layer is added| +|Delete Pack|Delete the existing pack from the cluster profile |Kibana 7.2.4 layer is deleted| +|Attach Pack Manifest|Delete the existing pack from the cluster profile |Manifest security is attached to the pack Kubernetes| +|Update Pack Manifest|The attached pack manifest content is updated in the cluster profile|manifest security is updated in the pack Kubernetes| +|Delete Pack Manifest |The attached pack manifest is deleted from the cluster profile|manifest security is deleted in the pack Kubernetes| + +:::info + +Prior to applying the notifications resulting from a profile update, the notification is automatically cleared if the corresponding changes are reverted. + +::: + + +### Examples - Notification settings + +As described above, notifications originate from changes to pack settings or manifest. They are accompanied by a settings dialog with a split pane showing differences in values. Following are a few examples of such scenarios: + +|Values Updated |Values overridden in Clusters |Settings displayed (LHS) |Settings displayed (RHS) |Auto Merged | Action | +|:---------------|:---------|:--------------------|:--------|:-------|:--------| +|Pack Values|No|Original pack settings| Updated pack settings| Yes| Review and/or modify if desired| +|Attached Manifests|No|Original Manifests| Updated Manifests| Yes| Review and/or modify if desired| +|Pack Values|Yes|Updated settings from Cluster Profile| Current settings from cluster| No| Resolve all conflicts| +|Attached Manifests|Yes|Updated settings from Cluster Profile| Current settings from cluster| No| Resolve all conflicts| +|Pack Version Changed|No|Original pack settings| Updated pack settings| Yes| Review and/or modify if desired| +|Pack Version Changed|Yes|Updated settings from Cluster Profile| Current settings from cluster| No| Resolve all conflicts| + +## Configuration overrides + +Every pack installed via cluster profile provides a set of out-of-the-box default settings. These can be overridden at the time of launching a new cluster or any time afterward for a running cluster. Besides basic defaults, Palette also provides useful presets. Presets are preconfigured configuration blocks logically grouped. Can leverage these to turn a feature on/off quickly. For example, enabling ingress for a Prometheus/Grafana pack requires many settings to be added. However, the Ingres preset for the Prometheus pack makes it easy to make this change. + +![Cluster Update Details](/cluster_config_override.png) \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-management/compliance-scan.md b/docs/docs-content/clusters/cluster-management/compliance-scan.md new file mode 100644 index 0000000000..5c3134e125 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/compliance-scan.md @@ -0,0 +1,123 @@ +--- +sidebar_label: "Compliance Scan" +title: "Compliance Scan" +description: "Learn how to apply compliance scans on your clusters." +hide_table_of_contents: false +sidebar_position: 50 +tags: ["clusters", "cluster management"] +--- + +Palette provides a way to run compliance, security, conformance, and software bill of materials (SBOM) scans on tenant clusters. These scans ensure cluster adherence to specific compliance and security standards. The scans also detect potential vulnerabilities by performing penetration tests. + +Palette supports four types of scans. Each scan generates reports with details specific to the type of scan. You can initiate multiple scans of each type over time. In addition, Palette keeps a history of previous scans for comparison purposes. To learn more about each scan type, refer to the following sections. + + +:::info + +Scans may not work as expected when a node is in maintenance mode. Before scheduling a scan, we recommend you turn off maintenance mode if enabled. To verify if a node is in maintenance mode, navigate to **Clusters** > **Nodes** and check the **Health** column for a **Maintenance mode** icon. To turn off maintenance mode, click on the **three-dot Menu** in the row of the node you want to scan, and select **Turn off maintenance mode**. + +::: + +## Configuration Security + +This scan examines the compliance of deployed Kubernetes security features against the CIS Kubernetes Benchmarks. CIS Kubernetes Benchmarks are consensus-driven security guidelines for the Kubernetes. Different releases of the CIS benchmark cover different releases of Kubernetes. By default, Kubernetes configuration security will determine the test set based on the Kubernetes version running on the cluster being scanned. Internally, Palette leverages an open-source tool called KubeBench from Aqua Security to perform this scan. Scans are run against master and worker nodes of the Kubernetes cluster, and a combined report is made available on the UI. Users can filter the report to view only the master or worker results if required. + +All the tests in the report are marked as Scored or Not Scored. The ones marked Not Scored cannot be automatically run, and it is suggested to be tested manually. + +![kcs.png](/kcs.png) + +## Penetration Testing + +Kubernetes penetration testing scans Kubernetes-related open-ports for any configuration issues that can leave the tenant clusters exposed to attackers. It hunts for security issues in your Kubernetes clusters and increases awareness and visibility of the security controls in Kubernetes environments. The scan gives a full report on the cluster security concerns. Internally Palette leverages an open-source tool called KubeHunter from Aqua Security to perform this scan. Scans are run in 2 modes, Internal and External. In the internal mode, tests are run against the internal endpoint of the API server, whereas, in external mode, the external public-facing endpoint is used for testing. A combined report of vulnerabilities found in both modes is shown on the Palette UI. Users can filter the report to view just the internal or external report if required. + +![kpt.png](/kpt.png) + +## Conformance Testing + +Kubernetes conformance testing is about validating your Kubernetes configuration to ensure that they are conformant to the CNCF specifications. Palette leverages an open-source tool called Sonobuoy to perform this scan. Automatically select a subset of relevant tests for execution based on the type of cloud (public, private) and the type of deployment infrastructure (IaaS, managed cloud service). Each test can take up to 2 hours to complete. If a cluster has a single worker node, a few tests may fail due to resources. For accurate assessment of conformance for distribution of Kubernetes, set up a cluster with at least two worker nodes. These tests are not destructive. However, they do launch several workloads in test namespaces as part of the tests. As a result, the consumption of cluster resources during the test run duration increases and may impact other workloads running on the cluster. + +The scan summary of total passed and failed tests are displayed while the test is in progress. In addition, a complete overview of the tests that were run is displayed after the completion of the report. + +![conformance.png](/conformance.png) + +## SBOM: Dependencies & Vulnerabilities + +## What is an SBOM? +An SBOM is a comprehensive list of the components, libraries, and other assets that make up a software application. It details the various third-party components and dependencies used in the software and helps to manage security and compliance risks associated with those components. + +The SBOM provides metadata about each component such as version, origin, license, and more. Reviewing the SBOM enables organizations to track vulnerabilities, perform regular software maintenance, and ensure compliance with regulatory requirements such as the European Union's General Data Protection Regulation (GDPR) and the Payment Card Industry Data Security Standard (PCI DSS). + +![sbom_scan.png](/sbom_scan.png) + +## Configure an SBOM Scan +To initiate an SBOM scan, navigate to **Clusters** and select the cluster to scan. On the **Cluster Overview** page, click the **Scans** tab, and expand the **Software Bill of Materials (SBOM)** drop-down menu. Select **Configure Scan** and choose the desired SBOM format, scan scope, and an optional backup location. Confirm your changes. + +Palette will identify every unique container image within your chosen scope and generate an SBOM for that image. Palette also runs the SBOM through a vulnerability scanner to flag any Common Vulnerabilities and Exposures (CVEs). Palette leverages two open-source tools from Anchore: [Syft](https://github.com/anchore/syft) for SBOM generation and [Grype](https://github.com/anchore/grype) for vulnerability detection. + +Suppose you specify a [backup location](backup-restore/backup-restore.md). In that case, the SBOM for each image will be uploaded to your backup location, and you can subsequently download the SBOMs with the click of a button or using the Palette API. + +If a backup location is not provided, Palette will preserve all of the identified dependencies and vulnerabilities, but the raw SBOMs will not be available for download. The report results are available for review regardless of their backup location setting. + +
+ +#### SBOM Scan Format +* [SPDX](https://github.com/spdx/spdx-spec/blob/v2.2/schemas/spdx-schema.json): A standard SBOM format widely used by organizations and governments. The SPDX format has been around longer than any other SBOM format. + +* [CycloneDX](https://cyclonedx.org/specification/overview/): An open-source XML-based SBOM format that provides a standard representation of software components and their metadata. + +* Syft JSON: Syft's custom SBOM format. The Syft SBOM format contains the most metadata compared to the other SBOM formats. + +#### SBOM Scan Scopes +* Cluster: Scans all the images in your Kubernetes cluster. + +* Namespace: Scans all images in a particular Kubernetes namespace. + +* Label Selector: Scans all images used by all the Pods matching a label selector within a particular Kubernetes namespace. + +* Pod: Scans all images used by a single Pod. + +## Review SBOM Results +To review a completed scan, expand the **Software Bill of Materials (SBOM)** row. The expanded row displays the completed report containing detailed information about every scanned image. The context column indicates every unique use of each image, broken out by container name, namespace, and pod name. Each image may be used by various containers within a given scope. The vulnerability summary column provides a condensed view of the vulnerability report, which can be viewed in greater detail by clicking on any row in the scan report. + +![sbom_results.png](/sbom_results.png) + +Each identified image has its own detailed results page containing dependency and vulnerability reports. To review an image's result page, select the **>** button. Regardless of the selected SBOM format, each dependency’s name, version, and type is displayed, and each vulnerability's name, severity, code, impacted version, and fixed version is displayed. + +Additional metadata will be included in the SBOM. Exactly what additional metadata is included depends on the selected SBOM format. + +![sbom_dependencies.png](/sbom_dependencies.png) + +For each identified vulnerability, you can view the name, severity level, vulnerability code, installed or impacted version, and the fix version (if a fix is available). Any CVEs documented in the [NIST National Vulnerability Database](https://nvd.nist.gov/vuln) (NVD) will render as a hyperlink to the NVD detail page for that particular vulnerability. + +![sbom_vulnerabilities.png](/sbom_vulnerabilities.png) + +## Scan Options + +The following options are available for running cluster scans: + +## On Demand +A cluster scan of any type can be started by navigating to the **Scans** tab of a cluster in Palette. Scan progress displays as 'Initiated' and transitions to 'Completed' when the scan is complete. + +|__On Demand Scan__| +|------------------| +|Select the cluster to scan -> Scan(top panel) -> Run Scan.| + +## Scheduled +You can set a schedule for each scan type when you deploy the cluster, and you can change the schedule at a later time. + +|__During Cluster Deployment__| +|-----------------------------| +|Add New Cluster -> Settings -> Schedule scans -> Enable and schedule desired scans.| + +|__Running Cluster__| +|----------------------| +|Select the cluster to scan -> Settings -> Cluster Settings -> Scan Policies -> Enable and schedule scans of your choice.| + +### Schedule Options Available + +This operation can be performed on all cluster types across all clouds. Schedule your compliance scan for month, day, hour, or minute. The following options are available: +* Every week on Sunday at midnight. +* Every two weeks at midnight. +* Every month on the first day of the month at midnight. +* Every two months on the first day of the month at midnight + diff --git a/docs/docs-content/clusters/cluster-management/health-alerts.md b/docs/docs-content/clusters/cluster-management/health-alerts.md new file mode 100644 index 0000000000..48ef06de57 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/health-alerts.md @@ -0,0 +1,63 @@ +--- +sidebar_label: "Cluster Health Alerts" +title: "Cluster Health Alerts" +description: "Cluster Health Alerts" +hide_table_of_contents: false +sidebar_position: 40 +tags: ["clusters", "cluster management"] +--- + + +Palette monitors the health of all Workload Clusters and raises an alert when the Cluster goes to an unhealthy state. Besides displaying the alert on the User Interface (UI) console, Palette provides the ability to have these alerts pushed out to a variety of channels. Users can set up simple email alerts to receive a notice when the Health Status of their cluster changes. Additionally, they can set up Webhooks to integrate alerts with a variety of IT Service Management (ITSM) tools such as ServiceNow, Slack, or Microsoft Teams. These alerts are set up at the Project level and apply to all Clusters within the Project. + +The Palette Management Server relies on the following to trigger Cluster-health Alerts: + +* Node and resource metrics pushed by Spectro agent from clusters. + + +* Machines' info and heartbeat from the agent. + + Management server has a timeout of 10 mins for heartbeat detection. An alert is triggered if agent heartbeat is not received within the fixed timeout. Cluster will be marked as "unhealthy" when the agent is down/paused for troubleshooting. This behavior is applicable for: + * Both workload clusters and management cluster of public cloud. + * Management clusters of on-premises, enterprise infrastructure. + +## Email Alerts +1. As the Project Administrator, navigate to Project Settings. + + +2. Click **Alerts** to access the **Manage Alerts** page. + + +3. Enable **ClusterHealth**. + + +4. Select **Email all project members** option if the alert needs to be received by every Project Member or specify the email Ids of members who are supposed to receive the alerts. + + +5. Save the settings to start receiving the health alerts from your workload cluster. + +## Webhook Alerts + +1. As **Project Administrator**, navigate to **Project Settings**. + + +2. Click **Alerts** to access the **Manage Alerts** page. + + +3. Click on **Add New Webhook**. + + +4. Follow the Webhook creation wizard with the following details: + * **Alert type** - ClusterHealth + * **Method** - POST to Post the alert message to the hooked target + * **URL** - URL of the target to be hooked to receive alerts + * **Body** - JSON formatted alert message + * **Headers** - Optional header as key-value pair depending on the target + * **Active** - Enable to Disable the Webhook + + +5. Confirm the details provided to receive the health alerts for your workload clusters in your ITSM tools. + + + + diff --git a/docs/docs-content/clusters/cluster-management/image-swap.md b/docs/docs-content/clusters/cluster-management/image-swap.md new file mode 100644 index 0000000000..f924af898a --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/image-swap.md @@ -0,0 +1,190 @@ +--- +sidebar_label: "Image Swap" +title: "Image Swap" +description: "Learn how to swap out images and registries through the image swap webhook exposed by Palette." +hide_table_of_contents: false +sidebar_position: 230 +tags: ["clusters", "cluster management"] +--- + + +Palette supports swapping out images and registries at the Kubernetes layer. Palette uses the *ImageSwap* webhook that is exposed by the [ImageSwap Mutating Admission Controller for Kubernetes](https://github.com/phenixblue/imageswap-webhook/blob/master/README.md). You can use this feature to override a specific number of container image registries or particular images. The following are some common use cases for image swapping:

+ +- Avoid rate limit issues encountered with public images by pointing to an alternate image registry that caches public images. This is more common in an Enterprise setting. + + +- Changing the URL of an internal or external container registry. + + +- Support air-gapped environments by redirecting public image requests to an internal registry. + + To use the image swap feature, specify an image swap configuration in the Kubernetes pack YAML. The `imageSwap` block must be its own node, meaning that it's a standalone block at the root level of the YAML. + + ```yaml +imageSwap: + imageChange: |- + default: + # your custom configuration goes here + ``` + + + You can add the `imageSwap` section when you create the cluster profile or at cluster deployment. You can customize the image swap functionality several ways. We recommend you review the official [Image Swap configuration](https://github.com/phenixblue/imageswap-webhook/blob/master/README.md#configuration) documentation to learn more. To help you get started, the following are some common configuration patterns. + + +:::info + +The `default::`entry specifies the default configuration for all images. The `::` delimiter is used to separate different elements of the configuration. + +::: + + # Configuration Examples + + ### Override a Specific Registry + + In this example, image swapping is disabled for all registries except for `example.private.io`. All image requests for `example.private.io` will be swapped for `harbor.internal.example.com`. + + + + ```yaml +imageSwap: + imageChange: |- + default:: + example.private.io::harbor.internal.example.com + ``` + +### Apply a Global Swap with an Exception + +Enable image swapping for all registries except `example.private.io`. All image requests for `example.private.io` will not get swapped. All other image requests will get swapped to `harbor.internal.example.com`. + + + +```yaml +imageSwap: + imageChange: |- + default::harbor.internal.example.com + example.private.io:: +``` + +### Swap a Specific Image + +Swap out a specific image. The image `example.private.io/demo:v1.0.0` will be swapped with `gcr.io/google-samples/hello-app:1.0`. The syntax format is `[EXACT]::`. + + + + +```yaml +imageSwap: + imageChange: |- + default:: + [EXACT]example.private.io/demo:v1.0.0::gcr.io/google-samples/hello-app:1.0 +``` + + +### Replace Image Path + + +Replace an image path with a custom registry. All image requests that start with `ghcr.io/example*` will get swapped with `example.private.io`. + + + + +```yaml +imageSwap: + imageChange: |- + default:: + [REPLACE]ghcr.io/example*::example.private.io +``` + + +:::info + + If the registry or image mentioned in the image swap configuration cannot be located, Kubernetes will try to obtain the image from the source mentioned in the deployment configuration. + +::: + + +The examples provided are intended to help you get started. Refer to the official [Image Swap configuration](https://github.com/phenixblue/imageswap-webhook/blob/master/README.md#configuration) for more examples and information. + + +## Image Swap with Palette + +Use the following steps to learn how to use Palette's image swap functionality. + +### Prerequisites + +* Kubernetes 1.19.0 or greater. + + +* Palette v3.4.0 or greater. + + +### Swap Image + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Profiles**. + + +3. Click on **Add Cluster Profile**. + + +4. Fill out the input fields for **Name**, **Description**, **Type** and **Tags**. Select the type **Full** and click on **Next**. + + +5. Select your infrastructure provider and click on **Next**. + + +6. Complete the Operating System (OS) layer by selecting **Registry**, **Pack Name**, and **Pack Version**. Click on **Next layer** to continue. + + +7. Select a Kubernetes distribution and version. + + +8. Next, select the code editor button **** to edit the pack YAML configuration. Within the `pack` section's scope, add your `imageSwap` configuration block. Click on **Next layer** to continue. + +
+ + ![A view of the Kubernetes layer YAML with an imageSwap configuration block.](/clusters_cluster-management_image-swap_kubernetes-layer-yaml.png) + + + + +9. Complete the remainder of the cluster profile creation wizard. + + + +10. Deploy a host cluster and use the cluster profile containing the image swap functionality. Check out the [Deploy a Cluster](../public-cloud/deploy-k8s-cluster.md) tutorial for additional guidance in deploying a host cluster. + + +### Validate + +You can validate that the image swap is functioning correctly by using the following steps. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the host cluster you deployed with the image swap functionality. + + +4. Download the kubeconfig file to access the cluster. Refer to the [Access Cluster with CLI](palette-webctl.md) guide for detailed steps. + + +5. Review the deployment configuration of a workload using a registry or image impacted by the image swap configuration. Verify the image or registry is swapped to the expected configuration value you provided in the image swap configuration block. + + You can use the following command to verify the correct image and registry of the deployment. Change the `REPLACE_ME` value with the correct values from your environment. + +
+ + ```shell + kubectl get deployment REPLACE_ME \ + --namespace REPLACE_ME -o=jsonpath='{.spec.template.spec.containers[0].image}' + ``` + + :::tip + Use the command `kubectl get deployments --all-namespaces` to list all deployments in the cluster. + ::: \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-management/kubeconfig.md b/docs/docs-content/clusters/cluster-management/kubeconfig.md new file mode 100644 index 0000000000..abbbc48d51 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/kubeconfig.md @@ -0,0 +1,105 @@ +--- +sidebar_label: "Kubeconfig" +title: "Kubeconfig" +description: "Learn how to find the kubeconfig file for your cluster and how permissions are managed." +hide_table_of_contents: false +sidebar_position: 150 +tags: ["clusters", "cluster management", "kubeconfig"] +--- + + +A [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file is a configuration file used to access a Kubernetes cluster. It contains information such as the cluster's API server address, authentication credentials, and cluster-specific settings. The kubeconfig file allows you to authenticate and interact with the cluster using the kubectl CLI or other Kubernetes client tools. + + +The kubeconfig file is crucial in enabling you and other users to issue kubectl commands against the host cluster. It ensures you have the necessary permissions and access to the cluster's resources. Using the kubeconfig file, you can validate your access to the host cluster and perform various operations, such as deploying applications, managing resources, and monitoring the cluster. + +Overall, the kubeconfig file serves as a key component in connecting and interacting with a Kubernetes cluster, providing you with the necessary configuration and credentials to access the cluster's API server. + + +You can download the kubeconfig file from the cluster details page in Palette. Check out the [Access Cluster with CLI](palette-webctl.md) guide for steps on how to download your cluster's kubeconfig file and connect to your host cluster with the kubectl CLI. + +## Kubeconfig Files + +Palette exposes two kubeconfig files for each cluster deployed through Palette. + + +* Kubeconfig - This kubeconfig contains OIDC and Spectro Proxy configurations. The kubeconfig file is available for all users with proper access to the cluster. The kubeconfig file can be used to access the cluster's resources and perform operations on the cluster. Refer to the [Kubeconfig Access Permissions](#kubeconfig-access-permissions) section to learn more about access permissions for the kubeconfig file. + + +* Admin Kubeconfig - The admin kubeconfig is created without OIDC configurations. This file is ideal for those that need to access an intermediate host to access the cluster, such as a jump host. Refer to the [Kubeconfig Access Permissions](#kubeconfig-access-permissions) section to learn more about access permissions for the admin kubeconfig file. + +![The cluster details page with the two Kubeconfig files elements highlighted](/clusters_cluster--management_kubeconfig_cluster-details-kubeconfig-files.png) + +## Kubeconfig Access Permissions + +Palette exposes kubeconfig files for each cluster deployed through the paltform. Depending on the cluster's configuration, the kubeconfig file may contain different configurations, such as the cluster's API server address and authentication credentials. + +Your assigned [Palette permissions](../../user-management/palette-rbac/project-scope-roles-permissions.md) determine which clusters you can access and what operations you can perform on the cluster. The permissions assigned to you in Palette determine if you can download and access the kubeconfig files for a cluster. + +As a rule of thumb, users with the Palette role [*Cluster Admin*](../../user-management/palette-rbac/project-scope-roles-permissions#cluster) can access both kubeconfig files for all clusters in the project. Users with lower-level project roles such as the *Cluster Editor* or the *Cluster Viewer* may not be able to access the kubeconfig file of the cluster. + + + +:::info + +Palette has its own RBAC system that is separate from the [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) system in the host cluster. The permissions assigned to you in Palette determine what operations you can perform on the cluster. The permissions assigned to you in the host cluster through the Kubernetes RBAC system determine what operations you can perform inside the cluster and on its resources. Refer to the [Palette Roles and Kubernetes Roles](cluster-rbac#palette-roles-and-kubernetes-roles) for additional information. + +::: + + +The access you have as a user to the kubeconfig files for a cluster depends on the following factors: + + + +* Is OIDC configured for the cluster? OIDC is configured in the Kubernetes pack YAML file of the cluster profile. Refer to the respective Kubernetes distribution on the [Packs List](../../integrations/integrations.mdx) page to learn more about OIDC configuration. + + :::caution + + When enabling OIDC, ensure the parameter `oidc-issuer-url` and the `clientConfig` block are properly configured in the Kubernetes Pack YAML. Properly configuring both parameters ensures the kubeconfig file is available for all project users. Otherwise, the kubeconfig file will only be available for Cluster Admins or custom roles that have the *delete* permission for the resource key *cluster*. + + ::: + + +* Is the [Spectro Proxy](../../integrations/frp.md) enabled for the cluster? + + + +Use the tables below to help you identify which project role you need to access the kubeconfig file for a cluster. + + +### Cluster Admin + +The following table shows the *Cluster Admin* role or equivalent provides access to both the Kubeconfig file and the Admin Kubeconfig file whether OIDC and the Spectro Proxy are configured or not. If you use a custom Palette resource role instead of the Palette role Cluster Admin, ensure the custom Palette resource role has the *delete* permissions for the resource key *cluster* to access both kubeconfig files for a cluster. + + + **Is OIDC Configured?** | **Is Spectro Proxy Enabled?** | **Access to Kubeconfig File** | **Access to Admin Kubeconfig File** | + --- | --- | --- | --- | +Yes | Yes | ✅ | ✅ | +No | Yes| ✅ | ✅ | +Yes | No | ✅ | ✅ | + +### Non-Cluster Admin + +The table shows that lower-level project roles, such as the *Cluster Editor* or the *Cluster Viewer*, or custom Palette resource roles that do not have the *delete* permissions for the resource key *cluster* may have access to the kubeconfig file. + +If a cluster has OIDC and the Spectro Proxy enabled then the kubeconfig file is available. Or, if the cluster has OIDC enabled and the Spectro Proxy disabled, the kubeconfig file is available. + + + **Is OIDC Configured?** | **Is Spectro Proxy Enabled?** | **Access to Kubeconfig File** | **Access to Admin Kubeconfig File** | +| --- | --- | --- | --- |---| +|Yes | Yes | ✅ | ❌ +| No | Yes | ❌ | ❌ +| Yes | No | ✅ | ❌ + + +## API Access + +Palette exposes two API endpoints that you can use to access the kubeconfig file for a host cluster. The endpoints are: + + +* `GET https://api.spectrocloud.com/v1/spectroclusters/{clusterId}/assets/kubeconfig` + Returns the kubeconfig file for the cluster. The kubeconfig file is returned as a text string. Access to the kubeconfig file is determined by the permissions assigned to you in Palette. For additional information, refer to the [Kubeconfig Access Permissions](#kubeconfig-access-permissions) section. + + +* `GET https://api.spectrocloud.com/v1/spectroclusters/{clusterId}/assets/adminkubeconfig` + Returns the admin kubeconfig file for the cluster. The admin kubeconfig file is returned as a text string. Only users with the Palette project role *Cluster Admin* or with a custom Palette resource role with the resource key *cluster* and the *delete* permission can access the admin kubeconfig file for a cluster. \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-management/kubernetes-dashboard.md b/docs/docs-content/clusters/cluster-management/kubernetes-dashboard.md new file mode 100644 index 0000000000..8179d3072a --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/kubernetes-dashboard.md @@ -0,0 +1,13 @@ +--- +sidebar_label: 'Kubernetes Dashboard' +title: 'Kubernetes Dashboard' +description: 'Kubernetes Dashboard with Spectro Proxy' +hiddenFromNav: false +sidebar_position: 210 +tags: ["clusters", "cluster management", "k"] +--- + + +The [Kubernetes Dashboard](https://github.com/kubernetes/dashboard) is a general-purpose, web-based UI for Kubernetes clusters. You can use the dashboard to manage the cluster, deploy and manage applications, and troubleshoot issues. + +Use the [Spectro Kubernetes Dashboard](../../integrations/spectro-k8s-dashboard.md) pack to add the Kubernetes dashboard to your cluster. The pack documentation page has instructions on how to use the pack. diff --git a/docs/docs-content/clusters/cluster-management/macros.md b/docs/docs-content/clusters/cluster-management/macros.md new file mode 100644 index 0000000000..19e8b11915 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/macros.md @@ -0,0 +1,195 @@ +--- +sidebar_label: "Palette Macros" +title: "Macros Support" +description: "Apply Palette Macros to Cluster Profiles." +hide_table_of_contents: false +sidebar_position: 130 +tags: ["clusters", "cluster management"] +--- + +Palette supports placeholder variables as Macros in our cluster profile layers. These macros make regression and update of variables, across multiple-running clusters, easier. We encourage creating these macros and using them within any of our cluster profile layers. Hence, changes to the existing Macros get updated to the corresponding cluster profile and the clusters with these profiles attached. + +## Scope of Palette Macros + +Palette users can declare the Macros under three different scopes: + +1. **Project Scope**: Create `Macros` from the project dashboard with project privileges. + + +2. **Tenant Admin Scope**: Create macros from the `Tenant Admin Dashboard` with administrative privileges. + + +3. **System Scope**: Includes the default system macros and user-created system macros. + +The Macros must have unique names within a given application, but Macros with a different Scope can have a unique name. In such cases, the precedence followed is in decreasing order (the highest precedence being Project Scope). + + + **Project Scope** > **Tenant Scope** > **System Scope** + + +## Create your Macro + +Palette users can use Macros in three different Scopes. Following the user preferences and privileges, log in as a Tenant Admin or Project Admin, to create macros under Tenant Admin scope and Project Scope, respectively. System Scope Macros can be created via API's. The steps to create a macro are as below: + + + + + +1. Log in to the Palette Management Console as a **Tenant Admin**. + + +2. From the menu on the left-hand side, click on **Tenant Settings** and select the **Macros** tab. + + +3. Click on **+Add Macro**. + + +4. Complete the following details for the same: + - **Name**: A custom name for the Macro. + - **Value**: The value to be assigned to the placeholder variable. + + +5. Click the **Save changes** button to complete the wizard. + + + + + +1. Log in to the Palette Management Console as a **Project Admin**. + + +2. From the menu on the left-hand side, click on **Project Settings** and select the **Macros** tab. + + +3. Click on **+Add Macro**. + + +4. Complete the following details for the same: + * **Name**: A custom name for the Macro + * **Value**: The value to be assigned to the placeholder variable. + + +5. Click the **Save changes** button to complete the wizard. + + + + + +Create and list your System Level macros via an API. + + + + + + + +```yaml +manifests: + aws_ebs: + storageType: "gp2" + #Allowed reclaim policies are Delete, Retain + reclaimPolicy: "Delete" + #Toggle for Volume expansion + allowVolumeExpansion: "true" + #Toggle for Default class + isDefaultClass: "true" + #Supported binding modes are Immediate, WaitForFirstConsumer + #Setting this to WaitForFirstConsumer for AWS, so that the volumes gets created in the same AZ as that of the pods + volumeBindingMode: "{{.spectro.macro.volumeBindingMode}}" +``` +## Use your Macros + +The Macros are overridden into the Cluster Profile layers: +* During a Cluster Profile creation. + + +* For a Cluster Profile used by a running cluster or during cluster provisioning. + + + + +### Add a Macro to a Cluster Profile Pack: + +1. Log in to the Palette console and navigate to **Profiles**. + + +2. From the **Cluster Profiles** tab, select the **Cluster Profile** to which the Macro is to be added. + + **Note:** A macro can be attached to any Infrastructure or Add-on layers of a Profile. + + +3. Add the macro name to the desired layer of the profile in the format: + + `{{.spectro.macro.macro-name}}`, where the *macro-name* is the **Custom name**, created by the user. + + +4. Save the changes to the **Cluster Profile**. This Macro can be replaced or edited later. + +### Replace or Add a Macro to a running Cluster: + +1. ​​Log in to Palette Console and go to the **Clusters** tab. + + +2. Select the **Cluster Name** to which the Macro is to be updated and navigate to the **Cluster Details** page. + + +3. Go to the **Profiles** tab to select the layer to which the Macro is to be added. + + +4. In the desired existing pack, replace the value with the Macro name as: + + `{{.spectro.macro.macro-name}}` + + +5. Save the changes to the **Cluster Profile**. + +## Delete Macros + + + + +1. Log in to Palette Management Console as **Tenant Admin**. + + +2. From the menu on the left-hand side, go to **Tenant Settings** and select the **Macros** tab. + + +3. Click the **Delete** button to remove the macro. + + +4. Click the **Save changes** button to complete the wizard. + + + + + +1. Log in to Palette Management Console as **Project Admin**. + + +2. From the menu on the left-hand side, go to **Project Settings** and select the **Macros** tab. + + +3. Click on the **Delete** button to remove the macro. + + +4. Click the **Save changes** button to complete the wizard. + + + + + +
+Delete your system level macros via an API. + +
+ +
+ +:::caution +When a Macro is deleted from the UI, it needs to be cleared from the cluster profile to avoid Macro deletion anomalies in the running cluster. +::: + + + + + diff --git a/docs/docs-content/clusters/cluster-management/monitoring/_category_.json b/docs/docs-content/clusters/cluster-management/monitoring/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/monitoring/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/clusters/cluster-management/monitoring/deploy-agent.md b/docs/docs-content/clusters/cluster-management/monitoring/deploy-agent.md new file mode 100644 index 0000000000..1b597fe63d --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/monitoring/deploy-agent.md @@ -0,0 +1,288 @@ +--- +sidebar_label: 'Enable Monitoring on Host Cluster' +title: 'Enable Monitoring on Host Cluster' +description: 'Learn how to configure your host cluster to forward metrics to a Prometheus server.' +hiddenFromNav: false +hide_table_of_contents: false +sidebar_position: 10 +tags: ["clusters", "cluster management", "monitoring"] +--- + + +Observability (O11y) of Kubernetes clusters and their metrics is an important operational capability. Palette provides a pack that collects metrics from host clusters, which can be scraped by a monitoring stack. + +The steps below will teach you how to create a Prometheus agent cluster add-on profile to deploy on the host clusters you would like to monitor. Creating an add-on profile makes it easier for you to deploy the Prometheus agent to other host clusters in the future. You will use this add-on profile when deploying a new host cluster, but you can also apply the add-on profile to an existing cluster to send metrics to the monitoring stack. + + +## Create Add-on Profile and Deploy Cluster + +Use the following steps to create an add-on profile and deploy a host cluster with the Prometheus agent installed. You can choose to deploy the Prometheus agent with or without authentication and encryption. + +- [Without Authentication](#without-authentication) + +- [With Authentication and Encryption](#with-authentication-and-encryption) + + +### Prerequisites + + +- A monitoring stack. Check out the [Deploy Monitoring Stack](deploy-monitor-stack.md) guide to learn how to deploy a monitoring stack in your Palette environment. + + +- An infrastructure provider environment registered in Palette. Refer to the [Clusters](../../clusters.md) documentation for guidance on how to register your infrastructure provider environment in Palette. + + + +### Without Authentication + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Profiles**. + + +3. Click on **Add Cluster Profile** to create a new cluster profile. + + +4. Provide the cluster profile a name and select the type **Add-on**. Click on **Next**. + + +5. In the following screen, select **Add New Pack**. + + +6. Use the following information to find the Prometheus Agent pack. + - **Pack Type**: Monitoring + - **Registry**: Public Repo + - **Pack Name**: Prometheus Agent + - **Pack Version**: 19.0.X or newer. + + +7. Review the YAML configuration on the right. Navigate down the file until you find the parameter `url` in the `remoteWrite` section. The `remoteWrite.url` is exposed by the monitoring stack. The Prometheus server URL can be found by reviewing the details of the monitoring stack. Use the URL exposed by the Prometheus service. + + The following image displays the cluster details page of a monitoring stack. Use the URL exposed for port 9090 to populate the `remoteWrite.url` parameter. + +![A view of the cluster details page with a highlighted box around the Prometheus service URL](/integrations_prometheus-agent_cluster-detail-view.png) + +
+ +:::caution + +The Prometheus server URL must be in the format of `http://HOST:PORT/api/v1/write`. +Example: `http://a2c938972938b4f0daee5f56edbd40af-1690032247.us-east-1.elb.amazonaws.com:9090/api/v1/write` + +::: + +
+ +```yaml +charts: + prometheus: + server: + remoteWrite: + - url: "http://a2c938972938b4f0daee5f56edbd40af-1690032247.us-east-1.elb.amazonaws.com:9090/api/v1/write" +``` + +8. Confirm your changes by selecting **Confirm & Create**. + + +9. Click on **Add New Pack**. + + +10. Use the following information to find the Spectro Cluster Metrics pack. + - **Pack Type**: Monitoring + - **Registry**: Public Repo + - **Pack Name**: Spectro Cluster Metrics + - **Pack Version**: 3.3.X or newer + + +11. Use the default values. Confirm your changes by selecting **Confirm & Create**. + + +12. Click on **Next** to review the cluster profile. Save the cluster profile. + + +13. Navigate to the left **Main Menu** and select **Clusters**. + + +14. Click on **Add New Cluster**. Select **Deploy New Cluster**. + + +15. Pick the infrastructure provider you selected for the cluster profile you created earlier. + + +16. Assign a name to the host cluster and select the registered account that will deploy it. Click on **Next**. + + +17. Select a cluster profile to apply to your cluster. Click on **Next**. + + +18. The next screen displays all the layers of your cluster profile. You need to apply your add-on profile to this cluster profile. Click on the **+** button above the cluster profile layers. + + +19. Select the add-on profile you created earlier. Selecting the add-on profile ensures the Prometheus agent is installed with the correct configuration. Click on **Next** to proceed. + + +20. Complete the remainder of the cluster creation process. + +After the cluster deployment process, you will have a host cluster with the Prometheus agent installed and ready to send metrics to the monitoring stack. + + Refer to the [Validate](#validate) section to learn how to validate the Prometheus agent is successfully installed and sending metrics to the monitoring stack. +### With Authentication and Encryption + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Profiles**. + + +3. Click on **Add Cluster Profile** to create a new cluster profile. + + +4. Provide the cluster profile a name and select the type **Add-on**. Click on **Next**. + + +5. In the following screen, select **Add New Pack**. + + +6. Use the following information to find the Prometheus Agent pack. + - **Pack Type**: Monitoring + - **Registry**: Public Repo + - **Pack Name**: Prometheus Agent + - **Pack Version**: 19.0.X or newer. + + +7. Review the YAML configuration on the right. Scroll down in the file until you find the parameter `url` in the `remoteWrite` section. The `remoteWrite.url` is exposed by the monitoring stack. You can find the Prometheus server URL by reviewing the details of the monitoring stack. Use the URL exposed by the Prometheus service. + + The following image displays the cluster details page of a monitoring stack. Use the URL exposed for port 9090 to populate the `remoteWrite.url` parameter. + +![A view of the cluster details page with a highlighted box around the Prometheus service URL](/integrations_prometheus-agent_cluster-detail-view.png) + +
+ +:::caution + +The Prometheus server URL must be in the format `http://HOST:PORT/api/v1/write`. +Example: `https://metrics.example.com:9090/api/v1/write` + +::: + + ```yaml + charts: + prometheus: + server: + remoteWrite: + - url: "https://metrics.example.com:9090/api/v1/write" + ``` + +8. Add the `basic_auth` parameters shown below. Replace `` and `` with the actual credential values. Use the username you created to authenticate with the Prometheus API server. If you followed the [Deploy a Monitoring Stack](deploy-monitor-stack.md#deployamonitoringstack) with authentication guide, then the username is `agent`. + +
+ +```yaml +charts: + prometheus: + server: + remoteWrite: + - url: "http://metrics.example.com:9090/api/v1/write" + remote_timeout: "5s" + basic_auth: + username: "" + password: +``` + +8. Confirm your changes. + + +9. Click on **Add New Pack**. + + +10. Use the following information to find the Spectro Cluster Metrics pack. + - **Pack Type**: Monitoring + - **Registry**: Public Repo + - **Pack Name**: Spectro Cluster Metrics + - **Pack Version**: 3.3.X or newer + + +11. Use the default values. Confirm your changes by selecting **Confirm & Create**. + + +12. Click on **Next** to review the cluster profile. Save the cluster profile. + + +13. Navigate to the left **Main Menu** and select **Clusters**. + + +14. Click on **Add New Cluster**. Select **Deploy New Cluster**. + + +15. Pick the infrastructure provider you selected for the cluster profile you created earlier. + + +16. Assign a name to the host cluster and select the registered account that will deploy it. Click on **Next**. + + +17. Select a cluster profile to apply to your cluster. Click on **Next**. + + +18. The next screen displays all the layers of your cluster profile. You need to apply your add-on profile to this cluster profile. Click on the **+** button above the cluster profile layers. + + +19. Select the add-on profile you created earlier. Selecting the add-on profile ensures the Prometheus agent is installed with the correct configuration. Click on **Next** to proceed. + + +20. Complete the remainder of the cluster creation process. + +When you deploy the cluster deployment, you will have a host cluster with the Prometheus agent installed and ready to send metrics to the monitoring stack. + + +### Validate + +To validate that the Prometheus agent is successfully installed and sending metrics to the monitoring stack, use the following steps. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the monitoring stack cluster to review the details page. + + +4. Ensure the cluster is in the **Running** state. + + +5. Click on the exposed service URL for the service **prometheus-operator-kube-prometheus-stack-grafana**. + + +6. Log in to the Grafana dashboard using the user `admin` and the password you specified in the cluster profile. + + +7. In the Grafana dashboard, click on the left **Main Menu** and click on **Dashboards**. Palette exposes a set of Grafana dashboards by default. + + +8. Select the **Spectro Cloud/ Spectro Clusters** dashboard. + + +9. Use the **cluster** filter and review the list of available clusters. Select your newly deployed cluster to review its metrics. + + +![A grafana dashboard view of the cluster metric displaying pack status](/clusters_cluster-management_grafana_spectro_metrics.png) + +
+ +:::caution + +Pods without the defined attributes `request` and `limit` will display no metrics data in the Grafana out-of-the-box Kubernetes Pods dashboard. + +::: + + +Use the other dashboard created by Palette to learn more about your environment. + + +## Next Steps + +Visit your Grafana dashboard and explore the Palette-created dashboard to learn more about your environment. To learn how to create dashboards tailored to your environment, check out the [Grafana tutorials](https://grafana.com/tutorials/). diff --git a/docs/docs-content/clusters/cluster-management/monitoring/deploy-monitor-stack.md b/docs/docs-content/clusters/cluster-management/monitoring/deploy-monitor-stack.md new file mode 100644 index 0000000000..baa4853fdd --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/monitoring/deploy-monitor-stack.md @@ -0,0 +1,540 @@ +--- +sidebar_label: 'Deploy Monitoring Stack' +title: 'Deploy Monitoring Stack' +description: 'Learn how to deploy a monitoring stack in your Palette environment.' +hiddenFromNav: false +hide_table_of_contents: false +sidebar_position: 0 +tags: ["clusters", "cluster management", "monitoring"] +--- + +The monitoring stack you will deploy uses the open-source tool, [Prometheus](https://prometheus.io/docs/introduction/overview/), to support your environment's monitoring requirements. The monitoring stack is a centralized server or aggregation spot to which all other clusters will forward metrics. The monitoring stack is a dedicated Kubernetes cluster for monitoring and metrics aggregation in your Palette environment. + +The monitoring stack uses a server-client architecture. The monitoring stack uses the [Prometheus Operator](../../../integrations/prometheus-operator.md) pack to deploy all the dependencies the Prometheus server requires. The server exposes an API endpoint for all other clients to forward metrics. The clients are Kubernetes clusters with the [Prometheus Agent](../../../integrations/prometheus-agent.md) pack installed and configured. + +Use the following steps to deploy a monitoring stack, and learn how to configure a host cluster to forward metrics to the monitoring stack. + +
+ +:::caution + +We recommend you avoid installing applications in your monitoring stack. The monitoring stack will require all the allocated resources to support Prometheus and incoming metrics from all other clusters. + +::: + +## Deploy a Monitoring Stack + +The steps below will deploy a new host cluster with the Prometheus Operator pack. You can add the Prometheus Operator pack to an existing cluster if you already have a host cluster deployed in your environment. + +The Prometheus Operator pack will install an unsecured Prometheus server by default. Use the **With Authentication and Encryption** tab for guidance on how to enable authentication. + +You can choose to deploy the monitoring stack with or without authentication. +- [Without Authentication](#without-authentication) +- [With Authentication and Encryption](#with-authentication-and-encryption) + + +
+ + +### Without Authentication + +#### Prerequisites + +- An infrastructure provider environment registered in Palette. Refer to the [Clusters](../../clusters.md) documentation for guidance on how to register your infrastructure provider environment in Palette. + + +- The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: + + Recommended size: + - 8 CPU + - 16 GB Memory + - 20 GB Storage. + + + As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: + + Each added agent: + - 0.1 CPU + - 250 MiB Memory + - 1 GB Storage. + +
+ + Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. + +#### Create Cluster Profile and Deploy + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Profiles**. + + +3. Click on **Add Cluster Profile** to create a new cluster profile. + + +4. Provide the cluster profile a name and select the type **Full**. Click on **Next**. + + +5. Select the infrastructure provider and continue. + + +6. Go ahead and select the desired operating system, Kubernetes distribution, container network interface (CNI), and container storage interface (CSI). Click on **Next Layer** after each selection. When you have completed selecting core infrastructure for the profile, click **Confirm**. + + +7. In the next screen that displays, select **Add New Pack**. + + +8. Use the following information to find the Prometheus Operator pack. + - Pack Type: Monitoring + - Registry: Public Repo + - Pack Name: Prometheus Grafana + - Pack Version: 44.25.X or newer.
+ + + +9. Review the YAML configuration on the right. Scroll down in the file until you find the parameter `adminPassword`. Input the password value for the admin user. The default admin user name is `admin`. + + +10. Next, click on the **Presets** button to expand the options drawer. + + +11. Scroll down the presets option menu and enable **Remote Monitoring**. Confirm your changes. You can enable several options to expand the functionality of the monitoring stack. Review the [Prometheus Operator](../../../integrations/prometheus-operator.md) pack documentation to learn more about the available options. + + + +12. Confirm your changes by selecting **Confirm & Create**. + + +13. Click on **Add New Pack**. + + +14. Use the following information to find the Spectro Cluster Metrics pack. + - **Pack Type**: Monitoring + - **Registry**: Public Repo + - **Pack Name**: Spectro Cluster Metrics + - **Pack Version**: 3.3.X or newer + + +15. Use the default values. Confirm your changes by selecting **Confirm & Create**. + + +16. Click on **Add New Pack**. + + +17. Use the following information to find the Spectrocloud Grafana Dashboards pack. + - **Pack Type**: Monitoring + - **Registry**: Public Repo + - **Pack Name**: Spectrocloud Grafana Dashboards + - **Pack Version**: 1.0.X or newer + + +18. Use the default values. Confirm your changes by selecting **Confirm & Create**. + + +19. Click on **Next** to review the cluster profile and save it. + + +20. Navigate to the left **Main Menu** and select **Clusters**. + + +21. Click on **Add New Cluster**. Select **Deploy New Cluster**. + + +22. Choose the infrastructure provider you selected for the cluster profile you created earlier. + + +23. Assign a name to the host cluster and select the registered account you will deploy it to. Click on **Next**. + + +24. Choose the cluster profile you created earlier and complete the remainder of the cluster creation process. + +When you deploy the cluster, a host cluster with Prometheus will be installed and ready to receive information from Prometheus agents. + + +#### Validate + +To validate the monitoring stack is successfully deployed and ready to receive Prometheus agent requests, use the following steps. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the monitoring stack cluster to review the details page. + + +4. Ensure the cluster is in **Running** state. + + +5. Click on the exposed service URL for the service **prometheus-operator-kube-prometheus-stack-grafana**. + + +6. Log in to the Grafana dashboard using the user `admin` and the password you specified in the cluster profile. + + + +### With Authentication and Encryption + +To enable Hypertext Transfer Protocol Secure (HTTPS), you must make several architectural decisions and decide on various options for setting up the environment. These options range from choosing what will generate the Secure Socket Layer (SSL) certificates to how incoming requests for Grafana or Prometheus are handled. + +The approach presented in this guide is one pattern you can use. However, we encourage you to discuss this pattern with your system administrator or platform engineering team before changing your infrastructure and Domain Name System (DNS) resources. + +The following diagram represents the infrastructure pattern you will use in this guide to enable HTTPS. + +![An architecture diagram that displays the network flow and infrastructure components](/clusters_monitoring_deploy-monitor-stack_https-architecture.png) + +In this guide, the following domains are used to expose the monitoring stack: + +| Domain | Description| +|---|---| +|`metrics.example.com`| The endpoint that all host clusters will forward Prometheus metrics. | +| `monitoring.example.com` | The Grafana dashboard.| + +#### Prerequisites + + +- Experience with DNS and setting up custom domains that use SSL certificates are required for this guide. In addition, the following actions are needed. + + - Ability to create and update DNS record names. + + - Two custom domain names. One domain is for the Grafana dashboard, and another is for host clusters to forward metrics to the monitoring stack. + + - Ability to create a public certificate for each domain. + + +- An infrastructure provider environment registered in Palette. Refer to the [Clusters](../../clusters.md) documentation for guidance on how to register your infrastructure provider environment in Palette. + + +- [htpasswd](https://httpd.apache.org/docs/2.4/programs/htpasswd.html) or similar basic auth password file generator tool. + + +- The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: + + Recommended size: + - 8 CPU + - 16 GB Memory + - 20 GB Storage. + + + As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: + + Each added agent: + - 0.1 CPU + - 250 MiB Memory + - 1 GB Storage. + + Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. + + +#### Create Cluster Profile and Deploy + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Profiles**. + + +3. Click on **Add Cluster Profile** to create a new cluster profile. + + +4. Provide the cluster profile a name and select the type **Full**. Click on **Next**. + + +5. Select the infrastructure provider and continue. + + +6. Go ahead and select the desired operating system, Kubernetes distribution, container network interface (CNI), and container storage interface (CSI). Click on **Next Layer** after each selection. When you have completed selecting core infrastructure for the profile, click **Confirm**. + + +7. In the next screen that displays, select **Add New Pack**. + + +8. Use the following information to add the Nginx ingress controller pack. + - Pack Type: Ingress + - Registry: Public Repo + - Pack Name: Nginx + - Pack Version: 1.5.X or newer.
+ + +9. Review the YAML configuration on the right and add the following changes: + +
+ + ```yaml + charts: + ingress-nginx: + controller: + extraArgs: + enable-ssl-passthrough: true + ``` + +10. Click on **Confirm & Create**. + + +11. Select **Add New Pack**. + + +12. Use the following information to find the Prometheus Operator pack. + + - Pack Type: Monitoring + - Registry: Public Repo + - Pack Name: Prometheus Grafana + - Pack Version: 44.3.X or newer.
+ + +13. Next, click on the **Presets** button to expand the options drawer. + + +14. Scroll down the presets option menu and enable **Remote Monitoring**. + + +15. Review the YAML configuration on the right. Scroll down in the file until you find the parameter `grafana.adminPassword`. Input the password value for the admin user. The default admin user name is `admin`. + +
+ + ```yaml + charts: + kube-prometheus-stack: + grafana: + adminPassword: "YourPassword" + ``` + +16. Next, update the `prometheus.service.type` parameter to `ClusterIP`. + +
+ + ```yaml + charts: + kube-prometheus-stack: + prometheus: + service: + type: ClusterIP + ``` + +17. Confirm your changes by selecting **Confirm & Create**. You can enable several options to expand the functionality of the monitoring stack. Review the [Prometheus Operator](../../../integrations/prometheus-operator.md) pack documentation to learn more about the available options. + + +18. Click on **Add New Pack**. + + +19. Use the following information to find the Spectro Cluster Metrics pack. + - **Pack Type**: Monitoring + - **Registry**: Public Repo + - **Pack Name**: Spectro Cluster Metrics + - **Pack Version**: 3.3.X or newer + + +20. Use the default values. Confirm your changes by selecting **Confirm & Create**. + + +21. Click on **Add New Pack**. + + +22. Use the following information to find the Spectrocloud Grafana Dashboards pack. + - **Pack Type**: Monitoring + - **Registry**: Public Repo + - **Pack Name**: Spectrocloud Grafana Dashboards + - **Pack Version**: 1.0.X or newer + + +23. Use the default values. Confirm your changes by selecting **Confirm & Create**. + + +24. Click on **Next** to review the cluster profile and save it. + + +25. Navigate to the left **Main Menu** and select **Clusters**. + + +26. Click on **Add New Cluster**. Select **Deploy New Cluster**. + + +27. Choose the infrastructure provider you selected for the cluster profile you created earlier. + + +28. Assign a name to the host cluster and select the registered account you will deploy it to. Click on **Next**. + + +29. Choose the cluster profile you created earlier and complete the remainder of the cluster creation process. + + +30. Once the host cluster is deployed, navigate to the left **Main Menu** and select **Clusters**. Click on your cluster to display the details page and ensure its status is **Running**. + + + +31. Download the Kubernetes configuration file. Click on the URL that has the name of your cluster followed by a period and the word *kubeconfig*. Refer to the [Access Cluster with CLI](../palette-webctl.md#access-cluster-with-cli) for additional guidance. + + +32. Open a terminal window and set the environment variable `KUBECONFIG` to point to kubeconfig file you downloaded. + +
+ + ```shell + export KUBECONFIG=~/Downloads/dev-monitoring-stack.config + ``` + +33. Create an htpasswd file for the user `agent` and assign a password. You can choose a different username if you prefer. + +
+ + ```shell + htpasswd -c auth agent + ``` + + Output: + ```shell + New password: [agent_password_here] + New password: + Re-type new password: + Adding password for user agent + ``` + +34. Convert the htpasswd file into a Kubernetes secret. + +
+ + ```shell + kubectl create secret generic basic-auth --from-file=auth --namespace monitoring + ``` + + Output: + ```shell + secret "basic-auth" created + ``` + +35. Navigate back to Palette, and review the cluster profile you created for the monitoring stack. From the left **Main Menu** > **Profiles** > select your cluster profile. Click on the Prometheus operator layer to edit the YAML. + + +36. Locate the `prometheus.ingress` section near the end of the file. Update the ingress configuration with the values provided below. Replace the `hosts` parameter with your custom domain. + +
+ + ```yaml + ingress: + enabled: true + ingressClassName: nginx + annotations: + nginx.ingress.kubernetes.io/auth-type: basic + nginx.ingress.kubernetes.io/auth-secret: basic-auth + nginx.ingress.kubernetes.io/auth-realm: "Authentication Required" + hosts: + - metrics.example.com + ``` + +37. Confirm your updates on the next screen that displays. + + + +38. From the left **Main Menu**, select **Clusters** and access the monitoring stack host cluster. + + + +39. Click on the **Updates Available** button to review the changes. + + + +40. Accept the changes and select **Confirm Updates**. + + + + :::caution + + The following steps can be complex, depending on your environment and your access. Discuss the remaining step with your network administrator team if you need additional guidance. + + ::: + + +41. Create a Canonical Name (CNAME) record for each of the following services and add the load balancer hostname to the CNAME's record value. Use the table below to identify which mapping to use between the domain and each load balancer hostname. + + | Service | Domain| CNAME Value Example | + |---|---|---| + |`nginx-ingress-controller`| `metrics.example.com` | `a57b622a0c0a148189ed00df614481c9-1803006767.us-east-1.elb.amazonaws.com` + |`prometheus-operator-kube-prometheus-stack-grafana` | `monitoring.example.com` | `a702f8a14b9684a30b18b875d2cca997-1676466159.us-east-1.elb.amazonaws.com` | + + ![A screenshot of the Palette user interface with two boxes highlighting the load balancers that you need to add to your CNAME.](/clusters_monitoring_deploy-monitor-stack_loadbalancers.png) + + + :::info + + + You can also use `kubectl` to retrieve the load balancer hostname. + + Grafana: + + ```shell + kubectl get service prometheus-operator-kube-prometheus-stack-grafana -n monitoring -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' + ``` + + Prometheus: + ```shell + kubectl get service nginx-ingress-controller --namespace nginx -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' + ``` + + ::: + +42. Create a public certificate for each domain. If you are using a public cloud provider, use the native certificate manager service to generate a public certificate that you can attach to the load balancers with minimal overhead. On-prem, use the default certificate manager used by your organization. + + + +43. Update the network rules for each of the load balancers to allow inbound port 443. + +
+ + +44. Next, update the load balancer listeners to forward requests from port 443 to the respective target port on the monitoring stack. The following table will map the service's load balancer listener with the respective configuration. Refer to the architecture diagram from the introduction to help you visualize the mapping. + + | Inbound Load Balancer Port | Domain | Monitoring Stack Port | Service | + |---|---|---|---| + |443| `monitoring.example.com` | Use the same instance port the original entry for port 80 is using. | `prometheus-operator-kube-prometheus-stack-grafana` | + |443| `metrics.example.com`| Use the same instance port the original entry for port 80 is using.| `nginx-ingress-controller` | + + +45. Wait for the DNS changes to propagate. This can take up to five minutes. + + +Your monitoring stack is now enabled with authentication and network encryption. + + +#### Validate + +To validate the monitoring stack is successfully deployed and ready to receive Prometheus agent requests, use the following steps. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the monitoring stack cluster to review the details page. + + +4. Ensure the cluster is in **Running** state. + + +5. Next, open up your web browser and visit the domain name you specified for the Grafana dashboard. Example: `https://monitoring.example.com`. + +
+ +6. Log in to the Grafana dashboard using the user `admin` and the password you specified in the cluster profile. + + +7. After you have verified you can log in to the Grafana dashboard, open a new tab and visit the Prometheus endpoint. Example: `https://metrics.example.com` + + + +8. Log in with the user `agent` and use the password you specified in the htpasswd file. + + +:::info + + A common error is not updating the network rules to allow inbound connections for port 443 to the load balancers. Ensure you have updated all the required network rules to allow inbound network requests for port 443. + +::: + + + +## Next Steps + +Now that you have a monitoring stack deployed and available in your environment, start adding the Prometheus agent to new and existing clusters. Check out the [Enable Monitoring on Host Cluster](deploy-agent.md) to get started. \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-management/monitoring/monitoring.md b/docs/docs-content/clusters/cluster-management/monitoring/monitoring.md new file mode 100644 index 0000000000..52d3f9941b --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/monitoring/monitoring.md @@ -0,0 +1,32 @@ +--- +sidebar_label: 'Cluster Monitoring' +title: 'Cluster Monitoring' +description: 'Learn how to set up cluster monitoring with Prometheus' +hiddenFromNav: false +tags: ["clusters", "cluster management", "monitoring"] +--- + +Palette exposes a set of [workload metrics](../workloads.md) out-of-the-box to help cluster administrators better understand the resource utilization of the cluster. The workload metrics Palette exposes are a snapshot in time and offer a limited ability to review past values. Administrators who want more information or a better understanding of their cluster metrics should consider using a dedicated monitoring system. + +Several Packs are available in the [monitoring](../../../integrations/integrations.mdx) category that you can use to add additional monitoring capabilities to your cluster and help you get answers to questions. For a more robust and scalable solution, we recommend creating a dedicated monitoring stack for your environment. You can deploy a monitoring stack that uses [Prometheus](https://prometheus.io/) to collect metrics from all clusters in your environment. + +To help you get started with deploying a monitoring stack to your Palette environment, check out the [Deploy Monitoring Stack](deploy-monitor-stack.md) and the [Enable Monitoring on Host Cluster](deploy-agent.md) guide. + +## Resources + +- [Deploy Monitoring Stack](deploy-monitor-stack.md) + + +- [Enable Monitoring on Host Cluster](deploy-agent.md) + + +- [Prometheus Operator Pack](../../../integrations/prometheus-operator.md) + + +- [Prometheus Agent Pack](../../../integrations/prometheus-agent.md) + + +- [Prometheus Cluster Metrics](../../../integrations/prometheus-cluster-metrics.md) + + +- [Spectro Cloud Grafana Dashboards](../../../integrations/grafana-spectrocloud-dashboards.md) \ No newline at end of file diff --git a/docs/docs-content/clusters/cluster-management/namespace-management.md b/docs/docs-content/clusters/cluster-management/namespace-management.md new file mode 100644 index 0000000000..b4ef736748 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/namespace-management.md @@ -0,0 +1,187 @@ +--- +sidebar_label: "Namespace Management" +title: "Namespace Management" +description: "Learn how to create and delete a namespace and assign resource quotas." +hide_table_of_contents: false +sidebar_position: 120 +tags: ["clusters", "cluster management"] +--- + + +In Kubernetes, namespaces provide a way to isolate groups of resources within a single cluster. Some of the benefits of namespaces are: + +
+ +- They can be used to partition resources among multiple users via resource quota – where each namespace has its own set of resources – without having to set up multiple physical clusters. + + +- You can configure Role-Based Access Control (RBAC) based on namespaces. For information about configuring namespaces and RBAC, check out [RBAC and NS Support](cluster-rbac). + + +- Namespaces can be used for different purposes such as testing, development, and production. + + +- You can use namespaces to help prevent resource naming conflicts. Resource names must be unique within a namespace but not across namespaces. + + +- In environments that have hybrid containerized and virtualized applications, a separate namespace can be used to isolate virtual machines (VMs). For information about a VM environment in Palette, check out [Virtual Machine Management](../../vm-management/vm-management.md). + + + +## Create a Namespace + +The following steps will guide you on how to create a namespace. + + + +### Prerequisites + +- An active cluster. + + +- Permission to create a namespace. + + +- A unique namespace name. + + + +### Create a Namespace in a Cluster + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Select the cluster in which you want to create a namespace. + + +4. Navigate to the **Workloads** > **Namespaces** tab, and click the **Manage Namespaces** button. + +
+ + The **Settings** pane displays with **RBAC** preselected and the **Namespaces** tab opened by default. + + ![Cluster Settings pane with three arrows that point respectively to Namespace Name field, Add to List button, and the location where the namespace is listed](/clusters_cluster-management_namespace-create.png) + + +5. Type a unique namespace name in the **Namespace name or Regex** field and click **Add to List** at right. + + + +6. You can assign resource quotas now or at a later time. To learn how, check out [Assign Resource Quotas](namespace-management.md#assign-resource-quotas). + +
+ + For details on how to configure RBAC for namespaces, check out the [RBAC and NS Support](cluster-rbac.md#palette-roles-and-kubernetes-roles) guide. + + + +### Validate + +Validate that the namespace was successfully created. + +
+ +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Select the cluster that contains the namespace you created and view its details. + + +4. In the **Settings** pane, click **RBAC** > **Namespaces** tab. + +
+ + The namespace you created will be listed under **Workspace Quota**. + + + +## Assign Resource Quotas + +You can assign resource quotas for resource sharing among multiple users who have access to a namespace. + + + +### Prerequisites + +- A running cluster with at least one namespace. + + + +### Assign Resource Quotas to a Namespace + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Select the cluster with the namespace to which you will assign workspace quotas. + + +4. Navigate to the **Workloads** > **Namespaces** tab, and click the **Manage Namespaces** button. + + +5. The **Settings** pane displays with **RBAC** > **Namespaces** preselected. + + +6. Select the namespace listed in the **Workspace Quota** section. + + +![Cluster Settings pane displaying Workspace Quota section of Namespaces tab](/clusters_cluster-management_ns-resource-quota.png) + +
+ +7. Type the number of CPU and Memory to allocate to the namespace, and save your changes. + + + +## Delete a Namespace + +When you delete a namespace, all the resources that were created within the namespace will also be deleted, such as pods, services and endpoints, config maps, and more. + + + +### Prerequisites + +- Ensure that no other resources depend on the namespace being deleted. + +### Delete a Namespace from a Cluster + +1. Navigate to the left **Main Menu** and click on **Clusters**. + + +2. Select the cluster in which you want to create a namespace. + + +3. Navigate to the **Workloads** > **Namespaces** tab, and click the **Manage Namespaces** button. The **Settings** pane displays with **RBAC** preselected and the **Namespaces** tab opened by default. + + +4. Select the namespace you want to delete, which is listed in the **Workspace Quota** section, and click the trash can icon. + + +### Validate + +Validate that the namespace was successfully deleted. + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Select the cluster that contains the namespace you want to delete and view its details. + + +4. In the **Settings** pane, click **RBAC** > **Namespaces** tab. + +The namespace you created is no longer listed under **Workspace Quota**. + + + + diff --git a/docs/docs-content/clusters/cluster-management/noc-ui.md b/docs/docs-content/clusters/cluster-management/noc-ui.md new file mode 100644 index 0000000000..97a5335c9f --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/noc-ui.md @@ -0,0 +1,115 @@ +--- +sidebar_label: "NOC-UI" +title: "Clusters Location view on Map - NOC UI" +description: "Clusters Location View on Map - NOC UI" +hide_table_of_contents: false +sidebar_position: 180 +tags: ["clusters", "cluster management"] +--- + + +Palette provides an intuitive user interface (UI) based on location monitoring for the clusters running at multiple locations. The Palette +UI displays the region set during the cluster creation process for public cloud clusters and the location on the UI map. You can set the location for private cloud clusters through the Palette UI. You can also monitor the location details of all the clusters running under a specific scope. + +## Set the Cluster Location + +The private cloud clusters must set the location explicitly. To set the location: + +* Login to [Palette](https://console.spectrocloud.com). + + +* Select the cluster to which the location needs to be updated and go to the **Cluster details** page of the cluster. + + +* Open **Settings** and then navigate to the **Cluster Settings**. + + +* Select **Location** from the left menu, set the cluster's location, and save the changes. + + +* The location is then visualized on the UI map display. + + +## Monitor your Cluster Location + + +To monitor the cluster location follow the below steps: + + +* Log in to Palette and select **Clusters** from the left **Main Menu**. + + +* Go to **Map View Icon** below the **Clusters** tab. + + +The map will display all the cluster locations under that user’s scope. + +## Map Filters + +Palette Map Filters filter out specific clusters using built-in or custom filters for an enhanced user experience. The map filter allows you to narrow down clusters that may be dispersed geographically, across multiple scopes, or different cloud providers. You have two types of filters:- **Built-in Filters** and **Custom Filters**. + +### Built-In Filters + +Built-in filters are available in the Palette console by default and can be selected from the **Add Filter** drop-down menu. You can use the following built-in filters. + + +|**Built-In Filters** |Description| +|---------------------|-----------| +|Deleted Only| To dispaly the deleted Clusters for the last 72 hours| +|Imported Only| To display the brown field clusters| +|Updates Pending| To display the clusters with pending updates| + + +### Custom Filters + +Palette supports a wide range of custom filters in a fixed format. To add a custom filter: +
+ +* Log in to Palette and select **Clusters** from the left **Main Menu**. + + +* Click on the **+Add Filter** button on the top menu and select **+ Add custom filter** from the top menu. + + +* The format for adding a cluster is as follows: + + `Conjunction - Condition - Operator - Value` + + +* You can add more than one custom filter simultaneously, and they work together with the chosen conjunction. + +
+ +You can apply these filters for both map view and cluster listing view. + + +|Conjunction| Condition |Operator|Value +|--|--|--|---| +|and/or|Cloud Account|[operator](#operators) |Custom value | +|and/or|Name|[operator](#operators) | Custom value| +|and/or|Profiles|[operator](#operators) |Custom value | +|and/or|Status|[operator](#operators) |Custom value| +|and/or|Environment|[operator](#operators) |Custom value| +|and/or|Environment|[operator](#operators) |Custom value| +|and/or|Health Status|[operator](#operators) |Custom value| +|and/or|Deleted|[operator](#operators) |Custom value| +|and/or|Read Only Import| [operator](#operators)|Custom value| +|and/or|Imported|[operator](#operators) |Custom value| +|and/or|Updates Pending|[operator](#operators) |Custom value| +|and/or|Tags|[operator](#operators) |Custom value| +|and/or|Region| [operator](#operators)|Custom value| + + +### Operators + + +| **Operator** | **Description** | +|----------|-------------| +|is|The value is equal to the custom value.| +|is not|The value is not equal to the custom value.| +|contains|The value contains the custom value.| +|does not contain|The value does not contain the custom value.| +|begins with|The value begins with the custom value.| +|does not begin|The value does not begin with the custom value.| + + diff --git a/docs/docs-content/clusters/cluster-management/node-pool.md b/docs/docs-content/clusters/cluster-management/node-pool.md new file mode 100644 index 0000000000..ec9baf57a0 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/node-pool.md @@ -0,0 +1,189 @@ +--- +sidebar_label: "Node Pools" +title: "Node Pools" +description: "Learn about the node pools and applying changes to a node pool." +hide_table_of_contents: false +sidebar_position: 190 +tags: ["clusters", "cluster management"] +--- + +A node pool is a group of nodes within a cluster that all have the same configuration. You can use node pools for different workloads. For example, you can create a node pool for your production workloads and another for your development workloads. You can update node pools for active clusters or create a new one for the cluster. + + +:::caution + +Ensure you exercise caution when modifying node pools. We recommend creating a [backup](backup-restore/backup-restore.md) before you make a change in the event a configuration change causes an issue. + +::: + +## Repave Behavior and Configuration + +In Kubernetes, the term "repave" refers to the process of replacing a node with a new node. [Repaving](../../glossary-all.md#repavement) is a common practice in Kubernetes to ensure that nodes are deployed with the latest version of the operating system and Kubernetes. Repaving is also used to replace nodes that are unhealthy or have failed. You can configure the repave time interval for a node pool. + +Different types of repaving operations may occur, depending on what causes them: + +* **Control plane repave**: This takes place when certain changes are made to the Kubernetes configuration, such as changing the **apiServer** specification. This type of repave also occurs when there are changes in the hardware specifications of the control plane nodes, such as during a node scale-up operation or when changing from one instance type to another. Control plane nodes are repaved sequentially. + +* **Worker node pool repave**: This happens when changes to a node pool's specifications cause the the existing nodes to become incompatible with the pool's specified criteria. For instance, changing the hardware specifications of a worker pool. Nodes within the affected pool are sequentially replaced with new nodes that meet the updated specifications. + +* **Full cluster repave**: This occurs if changes are made to the Operating System (OS) layer or if modifications to the Kubernetes layer impact all nodes, such as when upgrading to a different Kubernetes version. All nodes across all pools are sequentially repaved starting with the control plane. + +You can customize the repave time interval for all node pools except the master pool. The default repave time interval is 0 seconds. You can adjust the node repave time interval during or after cluster creation. If you need to modify the repave time interval post-cluster creation, follow the [Change a Node Pool](#change-a-node-pool) instructions below. + +## Node Pool Configuration Settings + +The following tables contain the configuration settings for node pools. Depending on the type of node pool, some of the settings may not be available. + +
+ +### Master Node Pool + +| **Property** | **Description** | +|-----------|-------------| +| **Node pool name** | A descriptive name for the node pool. | +| **Number of nodes in the pool** | Number of nodes to be provisioned for the node pool. For the master pool, this number can be 1, 3, or 5. | +| **Allow worker capability** | Select this option to allow workloads to be provisioned on master nodes. | +| **Additional Labels** | Optional labels apply placement constraints on a pod. For example, you can add a label to make a node eligible to receive the workload. To learn more, refer to the [Overview on Labels](taints.md#labels). | +| **Taints** | Sets toleration to pods and allows (but does not require) the pods to schedule onto nodes with matching taints. To learn more, refer to the [Overview on Taints](taints.md#taints).| +| **Availability Zones** | The Availability Zones from which to select available servers for deployment. If you select multiple zones, Palette will deploy servers evenly across them as long as sufficient servers are available to do so. | +| **Disk Size** | Give the required storage size. | + + +### Worker Node Pool + +| **Property** | **Description** | +|-----------|-------------| +| **Node pool name** | A descriptive name for the worker pool. | +| **Number of nodes in the pool** | Number of nodes to be provisioned for the node pool. | +| **Node repave interval** | The time interval in seconds between repaves. The default value is 0 seconds. | +| **Additional Labels** | Optional labels apply placement constraints on a pod. For example, you can add a label to make a node eligible to receive the workload. To learn more, refer to the [Overview on Labels](taints.md#labels). | +| **Taints** | Sets toleration to pods and allows (but does not require) the pods to schedule onto nodes with matching taints. To learn more, refer to the [Overview on Taints](taints.md#apply-taints-to-nodes).| +| **Rolling update** | Apply the update policy. **Expand first** launches new nodes and then terminates old notes. **Contract first** terminates old nodes and then launches new ones. | +| **Instance Option** | AWS options for compute capacity. **On Demand** gives you full control over the instance lifecycle without long-term commitment. **Spot** allows the use of spare EC2 capacity at a discount but which can be reclaimed if needed. | +| **Instance Type** |The compute size. | +| **Availability Zones** | The Availability Zones from which to select available servers for deployment. If you select multiple zones, Palette will deploy servers evenly across them as long as sufficient servers are available to do so. If you select public subnets, ensure those subnets have automatic public IP addresses assigned. Otherwise, node deployment errors will occur. Automatic public IP address assignment is typically handled by the infrastructure provider Palette is deploying a cluster to. Discuss this with your network team for additional guidance. | +| **Disk Size** | Give the required storage size. | + + +
+ + +:::caution + +Some features may not be available for all infrastructure providers. Review each infrastructure provider's node pool configuration settings to learn more. + +::: + +
+ + +## Create a New Node Pool + + +### Prerequisites + + +* A Palette-deployed cluster. + + +* Sufficient permissions to edit the cluster. + + +### Create Node Pool + + +You can create a new node pool for an active cluster. To create a new node pool follow the steps below. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Click on the row of the cluster you want to edit the node pool. + + +4. Click on the **Nodes** tab. + + +5. Click on **New Node Pool**. + + +6. Fill out the input fields in the **Add node pool** page. Refer to the [Node Pool Configuration Settings](#node-pool-configuration-settings) tables for more information on each field. + + +7. Click on **Confirm** to create the new node pool. + + +### Validate + +After you create a new node pool, you can validate the node pool by following the steps below. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Click on the row of the cluster you added the new node pool. + + +4. Click on the **Nodes** tab. + + +5. Ensure the new node pool is listed in the **Node Pools** section and that all compute instances are in the healthy status. + +## Change a Node Pool + + +You can apply changes to a node pool after a cluster is created and deployed. You can change the node pool's taints label, node repavement interval, number of compute instances in the node pool and more. To make changes to an active cluster's node pools, follow the steps below. + +### Prerequisites + + +* A Palette deployed cluster. + + +* Sufficient permissions to edit the cluster. + + +### Edit Node Pool + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Select a cluster to edit the node pool. + + +4. Click on the **Nodes** tab. + + +5. The nodes details page is where you can review the existing node pools and their configuration. You can also add a new node pool from this page. Click on the **Edit** button to make changes to the node pool. + + +6. Make the changes as needed. Refer to the [Node Pool Configuration Settings](#node-pool-configuration-settings) tables for more information on each field. + + +7. Click on **Confirm** to update the node pool. + +### Validate + +After you have modified a new node pool, you can validate the node pool by following the steps below. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Select the cluster with the new node pool. + + +4. Click on the **Nodes** tab. + + +5. Ensure the new node pool is listed in the **Node Pools** section and that all compute instances are in the healthy status. diff --git a/docs/docs-content/clusters/cluster-management/os-patching.md b/docs/docs-content/clusters/cluster-management/os-patching.md new file mode 100644 index 0000000000..8683f12cdd --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/os-patching.md @@ -0,0 +1,64 @@ +--- +sidebar_label: "OS Patching" +title: "OS Patching" +description: "Learn how to patch the operating system on your cluster nodes." +hide_table_of_contents: false +sidebar_position: 60 +tags: ["clusters", "cluster management"] +--- + + +Palette deploys Kubernetes clusters using pre-built VM images. The operating system (OS) on these images is the latest patch version when building the image for the supported major-minor streams. For example, if Ubuntu 18.04 is selected for the OS layer during provisioning, the OS on the cluster nodes might be using 18.04.3 LTE, assuming that was the latest version available at the time the VM image was built. However, newer versions continue to be published in the upstream repositories as improvements, bug fixes, and security patches are released. + +OS Patching allows the operating system on the running cluster nodes to be updated to the latest patch version so that it is up-to-date with the latest fixes. Assume Ubuntu 18.04.4 LTE and 18.04.5 LTE are released over time to fix important security issues. OS Patching operation will identify 18.04.5 as the latest version and upgrade it on the cluster nodes. The following choices are available for patching the operating system to the latest version. + +## Patch on Boot + +During the cluster creation, while configuring the cluster, you can select **Patch OS on boot**. In this case, the operating system on all cluster nodes will be updated to the latest when the cluster VMs are initially deployed. + +To enable **Patch OS on boot**, ensure you are in the **Settings** step of the cluster creation wizard. Next, click on the **Manage machines** tab, and select the check box for **Patch OS on boot**. + +## Reboot + +Palette supports the **Reboot if Required** feature to control the system reboot as part of cluster upgrades. Some system upgrades will require a reboot to apply the changes to the cluster. You need to check the **Reboot if Required** checkbox to allow the reboot. If this option is unchecked, the system reboot will be restricted. + +To enable **Reboot if Required**, ensure you are in the **Settings** step of the cluster creation wizard. Next, click on the **Manage machines** tab, and select the check box for **Reboot if Required**. + +## Scheduled +Palette also supports OS patching through a schedule. The patching schedule can be set initially when creating a cluster as well as at any given point later. The following scheduling options are available. + + +* Never +* Every week on Sunday at midnight +* Every two weeks at midnight +* Every month on the 1st at midnight +* Every two months on the 1st at midnight +* Custom OS patch for an exact month, day, hour and minute of your choice + + +To enable **OS Patching Schedule**, ensure you are in the **Settings** step of the cluster creation wizard. Next, click on the **Manage machines** tab, and select the drop-down input for **OS Patching Schedule**. + + +To enable **OS Patching Schedule**, for an active cluster. Navigate to the **Main Menu** and click on **Clusters**. In the cluster view page, find the row for the respective cluster you want to configure OS patching for. Click on the three dots at the end of row to access the cluster settings. Next, click on the **Machine Management** tab, and select the drop-down input for **OS Patching Schedule**. + +# On-Demand +This option provides a way for you to perform an immediate update. + +To perform an **On-Demand** update for an active cluster. Navigate to the **Main Menu** and click on **Clusters**. In the cluster view page, find the row for the respective cluster you want to configure OS patching for. Click on the three dots at the end of row to access the cluster settings. Next, click on the **Machine Management** tab, and select the drop-down input for **OS Patching Schedule**. + +
+ +:::caution +This operation is not available for existing Kubernetes clusters imported into Palette. +This operation is not available for managed Kubernetes Services such as EKS and AKS. For EKS clusters, an OS update can be triggered from Palette. This would initiate a request on AWS to update cluster node groups to the latest patch version. +::: + +## Monitoring + +The clusters' OS patching status can be monitored through the **Node** tab of cluster details page. The following are the patch details available for the customer to monitor: + +| **Field** | **Description** | +|---|---| +| Last Applied Patch Time | The date and time of the last OS patch.| +| | | +| Patched Version| The latest patched version.| diff --git a/docs/docs-content/clusters/cluster-management/pack-monitoring.md b/docs/docs-content/clusters/cluster-management/pack-monitoring.md new file mode 100644 index 0000000000..1f42c54433 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/pack-monitoring.md @@ -0,0 +1,27 @@ +--- +sidebar_label: "Pack Monitoring" +title: "Pack Monitorin" +description: "Learn how-to to monitor the status of packs in Palette" +hide_table_of_contents: false +sidebar_position: 140 +--- + +Palette provides a color scheme to help you monitor pack installation progress during Palette Workload Cluster deployment. Different colors represent stages of pack installation so you can track the progress of packs as they are added to a cluster. + +The Cluster Profile page displays the list of packs associated with the cluster you are monitoring. In addition, the page also includes information on the status and the installation progress of the installed packs. The following are the possible pack statuses. + + +| **Indicator Status** | **Description** | +| ------------------------------------ | ------------------------------------------------------------------- | +|

**Gray**

| The pack is onboarding, and it's right before the deployment stage. | +|

**Blue**

| The pack is in processing mode. | +|

**Green**

| The pack installation is successful. | +|

**Red**

| The pack installation has failed. | + + + +## Cluster Profiles Pack Status + +The following image shows the pack status for a cluster profile. + +![An image of a cluster profile with various pack statuses.](/pack_status.png) diff --git a/docs/docs-content/clusters/cluster-management/palette-lock-cluster.md b/docs/docs-content/clusters/cluster-management/palette-lock-cluster.md new file mode 100644 index 0000000000..ebf61bfe53 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/palette-lock-cluster.md @@ -0,0 +1,107 @@ +--- +sidebar_label: "Platform Settings" +title: "Platform Settings" +description: "Platform Settings on Palette" +hide_table_of_contents: false +sidebar_position: 170 +tags: ["clusters", "cluster management"] +--- + +Palette provides the following platform settings: + +* [Pause Platform Updates](#pause-platform-updates) +* [Auto Remediation](#auto-remediation) + +## Pause Platform Updates + +Palette supports the **Pause Platform Updates** feature to exclude a cluster or a group of clusters from getting upgraded when Palette is upgraded. The use cases of this feature are: + + + +* Pause Updates for Single Cluster +* Pause Updates for all the Clusters within the Project Scope +* Pause Updates for all Clusters within the Tenant Scope + + +
+ +### Pause Updates for Single Cluster + +Individual cluster under Project scope and Tenant scope can be locked to restrict them from the Palette upgrades. To lock a cluster follow the below steps: + + +1. Log in to Palette console as Tenant or Project administrator. + + +2. Go to the `Clusters` page from the left ribbon menu and select the cluster to be paused with updates. + + +3. From the cluster details page, click `Settings -> Cluster Settings` + + +4. Toggle the `Pause Platform Updates` button to pause updates of the cluster so that the cluster management services are not upgraded on the upgrade of the Palette. + + +5. To unpause the cluster updates, toggle the `Pause Platform Updates` back and deselect. + +
+ +### Pause Updates for all the Clusters within the Project Scope + +All the clusters under a Project can be paused for updates to restrict them from the Palette upgrades. To pause updates for all clusters under Project scope: + +1. Log in to Palette console as Project administrator. + + +2. Select `Project Settings` from the left ribbon menu. + + +3. From `Project Settings` page, select `Platform Updates` and toggle the `Pause Platform Updates` button. This restricts all the clusters under that project scope from being upgraded from cluster management services upgrade on the upgrade of the Palette. + + +4. To unpause the clusters updates, toggle the `Pause Platform Updates` back and deselect. + + +
+ +### Pause Updates for all Clusters within the Tenant Scope + + +All the clusters under a Tenant can be update paused to restrict them from the Palette upgrades. To lock all clusters under a Tenant scope: + +
+ +1. Log in to the Palette console as a `Tenant administrator`. + + +2. Select `Tenant Settings` from the left ribbon menu. + + +3. From `Tenant Settings,` select `Platform Updates` and toggle the `Pause Platform Updates` button.This restricts all the clusters under that tenant scope from being upgraded from cluster management services upgrade on the upgrade of the Palette. + + +4. To unlock the clusters, toggle the `Pause Platform Updates` back and deselect. + + +
+ +## Auto Remediation + +Palette provides Cluster Auto Remediation as a node reconciliation operation. When Cluster Auto Remediation is on, unhealthy nodes in all the Palette-provisioned clusters will automatically be replaced with new nodes. Turning off this feature will disable auto remediation. +This feature can work under the scope of: + +* Tenant + +* Project + +To enable auto remediation: + +* Login to Palette console as Tenant/Project admin. + +* Go to `Tenant Settings`/`Project Settings` as per the user scope. + +* Select `Platform Settings` from the left menu and toggle `Cluster Auto Remediation` toggle button. + +:::info +This does not apply to EKS, AKS or TKE clusters. +::: diff --git a/docs/docs-content/clusters/cluster-management/palette-webctl.md b/docs/docs-content/clusters/cluster-management/palette-webctl.md new file mode 100644 index 0000000000..852b5b809a --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/palette-webctl.md @@ -0,0 +1,204 @@ +--- +sidebar_label: "Kubectl" +title: "Kubectl" +description: "Learn how to access your Kubernetes cluster with the kubectl CLI." +hide_table_of_contents: false +sidebar_position: 160 +tags: ["clusters", "cluster management", "kubectl"] +--- + + +You can access your Kubernetes cluster by using the [kubectl CLI](https://kubernetes.io/docs/reference/kubectl/). Palette automatically generates a **kubeconfig** file for your cluster that you can download and use to connect with your host cluster. + + +## Access Cluster with CLI + +Use the following steps to connect to your host cluster with the kubectl CLI. + + +:::info + +If you are using Palette Virtual Machine (VM) Management, you can find steps on how to connect to your virtual machines with the [virtctl CLI](https://kubevirt.io/user-guide/operations/virtctl_client_tool/) in the [Access VM Cluster with virtctl](../../vm-management/create-manage-vm/access-cluster-with-virtctl.md) guide. The virtctl CLI facilitates some of the VM operations you will perform, such as copying, pasting, or transferring files to and from a virtual machine using Secure Copy Protocol (SCP). + +::: + +### Prerequisites + +- Kubectl installed locally. Use the Kubernetes [Install Tools](https://kubernetes.io/docs/tasks/tools/) for additional guidance. + + +- A host cluster that is either publicly accessible OR a private host cluster that has the [Spectro Proxy](../../integrations/frp.md) installed. + + +:::caution + +If you are using [OIDC](/clusters/cluster-management/cluster-rbac#userbacwithoidc) with your host cluster, you will need the kubelogin plugin. Refer to the kubelogin GitHub repository [README](https://github.com/int128/kubelogin#setup) for installation guidance. + +::: + + +### Set up Kubectl + +1. Log in to [Palette](https://spectrocloud.com). + + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + + +3. Select the host cluster you want to access. + + +4. From the cluster overview page, navigate to the middle column containing cluster details and locate the **Kubernetes Config File** row. + + +5. Click on the kubeconfig link to download the file. + + ![Arrow pointing to the kubeconfig file](/clusters_cluster-management_palette-webctl_cluster-details-overview.png) + + +6. Open a terminal window and set the `KUBECONFIG` environment variable to the file path of the **kubeconfig** file. + + Example: + ```shell + export KUBECONFIG=~/Downloads/dev-cluster.kubeconfig + ``` + + +You can now issue kubectl commands against your host cluster. + + +### Validate + +Verify you have access to your host cluster by issuing kubectl commands against it. + + + + diff --git a/docs/docs-content/clusters/cluster-management/reconfigure.md b/docs/docs-content/clusters/cluster-management/reconfigure.md new file mode 100644 index 0000000000..a358bb7e70 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/reconfigure.md @@ -0,0 +1,51 @@ +--- +sidebar_label: "Reconfigure" +title: "Reconfigure" +description: "Reconfiguration-scaling Events on Palette" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["clusters", "cluster management"] +--- + +Scaling a cluster up or down involves changing the size of node pools. The following are the steps to scale up/down a cluster: +* Access the ‘Nodes’ view of the cluster. +* For the desired node pool, change the size directly from the nodes panel or edit node pool settings. +* After the node pool configuration is updated, the scale-up/down operation is initiated in a few minutes. +* Provisioning status is updated with the ongoing progress of the scale operation. + +:::info +The master node pool is scaled from 1 to 3 or 3 to 5 nodes, etc. However, the scale-down operation is not supported for master nodes. +::: + +## Reconfiguring the Cluster Nodes + +The following are the steps to reconfigure worker pool nodes: +* Access the 'Nodes' view for the cluster. +* Edit the settings of the desired node pool. +* Change the number of nodes, rolling update setting, availability zones, flavor, and Disk size to the desired settings. +* Save the node pool settings. After the node pool settings are updated, the node pool reconfiguration begins within a few minutes. The older nodes in the node pool are deleted and replaced by new nodes launched with a new instance type configured. +* Provisioning status is updated with the ongoing progress of nodes being deleted and added. + +## Adding a New Worker Pool + +The following are the steps to add a new worker node pool to a cluster: +* Invoke the option to ‘Add Node Pool’ from the cluster’s node information page. +* Provide node pool settings as follows: + * A descriptive name for the node pool + * The number of nodes in the node pool + * Rolling update setting, availability zones, flavor, and Disk size settings + * Save the node pool settings + +The new worker pool settings are updated, and cluster updates begin within a few minutes. Provisioning status updates will be available with the ongoing progress of tasks related to adding new nodes. + + +## Removing a Worker Pool +The following steps need to be performed to remove a worker pool from the cluster: +* Access the ‘Nodes’ view of the cluster +* Delete the desired worker pool and confirm the deletion +* Upon confirmation, the worker node deletion begins in a few minutes + + +:::info + Support of reconfiguration is not available for existing clusters imported into Palette for any cloud type. +::: diff --git a/docs/docs-content/clusters/cluster-management/remove-clusters.md b/docs/docs-content/clusters/cluster-management/remove-clusters.md new file mode 100644 index 0000000000..c7edb6fb9d --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/remove-clusters.md @@ -0,0 +1,113 @@ +--- +sidebar_label: "Cluster Removal" +title: "Cluster Removal" +description: "Learn how to remove a cluster deployed and managed by Palette." +hide_table_of_contents: false +sidebar_position: 220 +tags: ["clusters", "cluster management"] +--- + + +When you delete a cluster it results in the removal of all compute instances and associated resources created for the cluster. Use the following steps to delete a cluster. + +### Prerequisites + +* A host cluster. + + + +## Removal + +1. Log in to [Palette](https://console.spectrocloud.com) and ensure you are in the correct project scope. + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Click on the cluster you want to delete. + + +4. Click on the **Settings drop-down Menu**. + + +5. Click on **Delete Cluster**. + + +6. Type the cluster name and click on **OK**. + +The cluster status is updated to **Deleting** while cluster resources are removed. When all resources are successfully deleted, the cluster status is updated to **Deleted**, and the cluster is removed from the cluster list. + + +## Validate + +To validate the host cluster is deleted, use the following steps. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and click on **Cluster**. + + +4. Check the box labeled **Deleted only** to view all the clusters deleted in the last 72 hours. + +The cluster you deleted is now listed along with other previously deleted clusters. + + + +## Force Delete a Cluster + +If a cluster is stuck in the **Deleting** state for a minimum of 15 minutes, it becomes eligible for force deletion. You can force delete a cluster from the tenant and project admin scope. + +To force delete a cluster, follow the same steps outlined above. After 15 minutes, a **Force Delete Cluster** option is available in the **Settings drop-down Menu**. The drop-down menu will provide you with an estimated remaining time left before the force deletion becomes available. + +
+ + +A force delete can result in Palette-provisioned resources being missed in the removal process. Verify there are no remaining resources. Use the following list to help you identify resources to remove. + +
+ +:::caution + +Failure in removing provisioned resources can result in unexpected costs. + +::: + +
+ +**Azure** + +- Virtual Network (VNet) +- Static Public IPs +- Virtual Network Interfaces +- Load Balancers +- VHD +- Managed Disks +- Virtual Network Gateway + + + +**AWS** + +- VPC +- Elastic IP +- Elastic Network Interfaces +- Internet Gateway +- Elastic Load Balancers +- EBS Volumes +- NAT Gateway + + +**GCP** + +- Virtual Private Cloud (VPC) Network +- Static External IP Address +- Network Interfaces +- Cloud NAT +- Cloud Load Balancing +- Persistent Disks +- Cloud Router + + + + diff --git a/docs/docs-content/clusters/cluster-management/ssh-keys.md b/docs/docs-content/clusters/cluster-management/ssh-keys.md new file mode 100644 index 0000000000..45bbb02a1d --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/ssh-keys.md @@ -0,0 +1,98 @@ +--- +sidebar_label: "SSH Keys" +title: "SSH Keys" +description: "Learn how to create and manage SSH keys in Palette." +hide_table_of_contents: false +sidebar_position: 10 +tags: ["clusters", "cluster management"] +--- + + +Palette supports SSH (Secure Shell) to establish, administer, and communicate with remote clusters. This section describes creating and managing SSH Keys in the Palette Management Console. + +## Scope of SSH Key + +Palette groups clusters for logical separation into [Projects](../../projects.md). Users and teams can be assigned roles within a project for granular control over permissions within the project scope. SSH key authentication is scoped to a project. Multiple users can gain access to a single project. To access a cluster with SSH, you need a public SSH key registered in Palette. + +## Prerequisites + +* Access to a terminal window. + + +* The utility ssh-keygen or similar SSH key generator software. + + +## Create and Upload an SSH Key + +Follow these steps to create an SSH key using the terminal and upload it to Palette: + +1. Open the terminal on your computer. + + +2. Check for existing SSH keys by invoking the following command. + +
+ + ```shell + ls -la ~/.ssh + ``` + If you see files named **id_rsa** and **id_rsa.pub**, you already have an SSH key pair and can skip to step 8. If not, proceed to step 3. + + +3. Generate a new SSH key pair by issuing the following command. + +
+ + ```shell + ssh-keygen -t rsa -b 4096 -C "your_email@example.com" + ``` + + Replace `your_email@example.com` with your actual email address. + + +4. Press Enter to accept the default file location for the key pair. + + + +5. Enter a passphrase (optional) and confirm it. We recommend using a strong passphrase for added security. + + +6. Copy the public SSH key value. Use the `cat` command to display the public key. + +
+ + ```shell + cat ~/.ssh/id_rsa.pub + ``` + Copy the entire key, including the `ssh-rsa` prefix and your email address at the end. + + +7. Log in to [Palette](https://console.spectrocloud.com). + + +8. Navigate to the left **Main Menu**, select **Project Settings**, and then the **SSH Keys** tab. + + +9. Open the **Add New SSH Key** tab and complete the **Add Key** input form: + * **Name**: Provide a unique name for the SSH key. + + + * **SSH Key**: Paste the SSH public key contents from the key pair generated earlier. + + +10. Click **Confirm** to complete the wizard. + +
+ +:::info + +You can edit or delete SSH keys later by using the **three-dot Menu** to the right of each key. + +::: + +During cluster creation, assign your SSH key to a cluster. You can use multiple keys to a project, but only one key can be assigned to an individual cluster. + +## Validate + +You can validate that the SSH public key is available in Palette by attempting to deploy a host cluster. During the host cluster creation wizard, you will be able to assign the SSH key to the cluster. Refer to the [Deploy a Cluster](../public-cloud/deploy-k8s-cluster.md) tutorial for additional guidance. + diff --git a/docs/docs-content/clusters/cluster-management/taints.md b/docs/docs-content/clusters/cluster-management/taints.md new file mode 100644 index 0000000000..befc6fa888 --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/taints.md @@ -0,0 +1,50 @@ +--- +sidebar_label: "Node Labels and Taints" +title: "Node Labels and Taints" +description: "Learn how to apply labels and taints to nodes in a cluster, and how to specify Namespace labels and annotations to Add-on packs and packs for Container Storage Interface (CSI) and Container Network Interface (CNI) drivers." +hide_table_of_contents: false +sidebar_position: 100 +tags: ["clusters", "cluster management"] +--- + + + + +## Taints + +Node affinity is a property of Pods that attracts them to a set of nodes (either as a preference or a hard requirement. Taints are the opposite -- they allow a node to repel a set of pods. + +Tolerations are applied to pods and allow (but do not require) the pods to schedule onto nodes with matching taints. + +Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a node; this marks that the node should not accept any pods that do not tolerate the taints. + +Palette enables Taints to be applied to a node pool to restrict a set of intolerant pods getting scheduled to a Palette node pool. Taints can be applied during initial provisioning of the cluster and modified later. + +### Apply Taints to Nodes + +Taints can be applied to worker pools while creating a new cluster from the node pool configuration page as follows: + +* Enable the “Taint” select button. +* To apply the Taint, set the following parameters: + * Key: Custom key for the Taint + * Value: Custom value for the Taint key + * Effect: The effects define what will happen to the pods that do not tolerate a Taint. There are 3 Taint effects: + * NoSchedule: A pod that cannot tolerate the node Taint, should not be scheduled to the node. + * PreferNoSchedule: The system will avoid placing a non-tolerant pod to the tainted node but is not guaranteed. + * NoExecute: New pods will not be scheduled on the node, and existing pods on the node, if any will be evicted if they do not tolerate the Taint. + +Eg: Key = key1; + Value = value1; + Effect = NoSchedule + +Taints can also be updated on a running cluster by editing a worker node pool from the 'Nodes' tab of the cluster details page. + +## Labels + +You can constrain a Pod to only run on a particular set of Node(s). There are several ways to do this and the recommended approaches such as, nodeSelector, node affinity, etc all use label selectors to facilitate the selection. Generally, such constraints are unnecessary, as the scheduler will automatically do a reasonable placement (e.g. spread your pods across nodes so as not place the pod on a node with insufficient free resources, etc.) but there are some circumstances where you may want to control which node the pod deploys to - for example to ensure that a pod ends up on a machine with an SSD attached to it, or to co-locate pods from two different services that communicate a lot into the same availability zone. + +Palette enables our users to Label the nodes of a master and worker pool by using key/value pairs. These labels do not directly imply anything to the semantics of the core system but are intended to be used by users to drive use cases where pod affinity to specific nodes is desired. Labels can be attached to node pools in a cluster during creation and can be subsequently added and modified at any time. Each node pool can have a set of key/value labels defined. The key must be unique across all node pools for a given cluster. + +### Apply Labels to Nodes + +Labels are optional and can be specified in the **Additional Labels** field of the node pool configuration form. Specify one or more values as 'key:value'. You can specify labels initially during cluster provisioning and update them any time by editing a node pool from the **Nodes** tab of the cluster details page. diff --git a/docs/docs-content/clusters/cluster-management/workloads.md b/docs/docs-content/clusters/cluster-management/workloads.md new file mode 100644 index 0000000000..58639c91cf --- /dev/null +++ b/docs/docs-content/clusters/cluster-management/workloads.md @@ -0,0 +1,27 @@ +--- +sidebar_label: "Workload Visibility" +title: "Workload Visibility" +description: "Browse all cluster resources such as pods, deployment sets, etc." +hide_table_of_contents: false +sidebar_position: 90 +tags: ["clusters", "cluster management"] +--- + + +Palette provides visibility into the resources running inside workload clusters. Workloads are displayed for all infrastructure providers. +These resources are displayed on the cluster details page. Following is the list of resources shown in the workload browser: + + +| **Resource** | **Description** | +|----------|-------------| +| Namespaces | Namespaces are a way to divide cluster resources between multiple users (via resource quota). | +| Pods | Pods are the smallest deployable units of computing that can be created and managed in Kubernetes. | +| DeploymentSets | DeploymentSets are a way to create and manage groups of identical pods. | +| DaemonSets | DaemonSets are a way to create and manage pods that are deployed on all, or some nodes. | +| StatefulSets | StatefulSets are a way to create and manage pods that have persistent storage and are deployed in order. | +| Jobs | Jobs are a way to create and manage pods that are active until completion. | +| CronJobs | CronJobs are a way to create and manage pods that deploy on a schedule. | +| Role Bindings | Role Bindings are a way to create and manage access to cluster resources. | +| Cluster Role Bindings | Cluster Role Bindings are a way to create and manage access to cluster resources. | + + diff --git a/docs/docs-content/clusters/clusters.md b/docs/docs-content/clusters/clusters.md new file mode 100644 index 0000000000..577cae674c --- /dev/null +++ b/docs/docs-content/clusters/clusters.md @@ -0,0 +1,282 @@ +--- +sidebar_label: "Clusters" +title: "Creating clusters on Palette" +description: "The methods of creating clusters for a speedy deployment on any CSP" +hide_table_of_contents: false +sidebar_custom_props: + icon: "clusters" +--- + + +Kubernetes clusters in Palette are instantiated from cluster profiles. A cluster definition in Palette consists of a reference to a cluster profile, cloud configuration, as well as the cluster size and placement configuration. The following high-level tasks are performed as part of the cluster creation: + +* Orchestration of computing, network, and storage resources on the cloud environments along with the required placement infrastructure. + + +* Installation and configuration of various Kubernetes components like Kubelet, API servers, etcd, or scheduler. + + +* Installation and configuration of the cloud-specific network (CNI) and storage (CSI) plugins. + + +* Securing the cluster infrastructure and configuration according to the relevant OS, Kubernetes, and cloud security best practices. + + +* Deployment of additional Add-ons such as Prometheus, Permissions Manager, or Vault, as specified in the Cluster Profile. + +## Images + +Palette provides Virtual Machine (VM) images for cluster-computing infrastructure out of the box for the most recent versions of Operating Systems such as Ubuntu or CentOS. These images are security-hardened based on the respective CIS Benchmarks. In addition, Kubernetes components such as kubelet, kubeadm, etc. are preinstalled in these images. The specific image for a cluster is derived from the Operating System and Kubernetes packs configured in the cluster profile. + +The out-of-the-box images are hosted in the public cloud (AWS - AMI, Azure - VHD) or Palette's storage repository (vSphere - OVA). During provisioning, the image is copied (if missing) to the desired cloud region or downloaded onto a private data center. + +### Customization + +Palette provides various forms of customization options for VM images. All these customization options require a private pack registry to be set up with customized OS packs. + +#### Customize Out-of-the Box Images + +The Palette out-of-the-box images are security-hardened and have Kubernetes components preinstalled. Additional components can be installed on the images at runtime by defining one or more Ansible roles in the customized OS pack. Palette's orchestration engine creates a new image by instantiating a VM instance from the out-of-the-box image and executing the specified Ansible roles on the instance. This custom image is used for cluster provisioning. The customized image is tagged with a unique signature generated from the pack definition so that it can be reused for future cluster provisioning requests. + +## Security + +Palette secures the Kubernetes clusters provisioned by following security best practices at the Operating System, Kubernetes, and Cloud Infrastructure levels. + +### Operating System + +The Palette out-of-the-box VM images are hardened in accordance with the relevant OS CIS benchmark. Additionally, the images are scanned for vulnerabilities regularly, and fixes are applied to these images when available from the provider. The upgraded images are released in the form of updated OS packs in the Palette Pack Registry and are available to the users to apply to their existing clusters at a time convenient to them. + +### Kubernetes + +Kubernetes components and configuration are hardened in accordance with the Kubernetes CIS Benchmark. Palette executes Kubebench, a CIS Benchmark scanner by Aqua Security, for every Kubernetes pack to ensure the master and worker nodes are configured securely. + +### Cloud Infrastructure + +Palette follows security best practices recommended by the various cloud providers when provisioning and configuring the computing, network, and storage infrastructure for the Kubernetes clusters. These include practices such as isolating master and worker nodes in dedicated network domains and limiting access through the use constructs like security groups. + +:::info + The security measures mentioned above are implemented for Palette's out-of-the-box OS and Kubernetes packs. For customized OS Kubernetes packs, users are responsible for taking the relevant measures to secure their clusters. +::: + +## Day-2 Management + +Palette provides several options to manage Kubernetes clusters on an ongoing basis. These include opportunities to scale up/down the cluster by adding/reducing the number of nodes in a node pool, add extra worker pools, resizing nodes in a node pool by modifying the instance type, and adding additional fault domains such as availability zones to a node pool. + +:::info + Cluster management operations result updating cluster definitions in Palette's database. The updated definition is retrieved by the management agent running in the cluster. A rolling upgrade is then performed to bring associated clusters to their desired state. +::: + +## Cluster Health + +Palette monitors the cluster infrastructure regularly and reports health on the management console. + + +Overall health is computed based on the following factors: + +* **Heartbeat** - The Palette management agent, which runs inside the cluster, periodically sends a heartbeat to the management console. Missing heartbeats typically indicate of a problem such as a cluster infrastructure going down or lack of network connectivity. Failure to detect heartbeats over a 10 minute period results in an unhealthy status for the cluster. + + Palette provides health check information for your workload clusters. This information is a value add to the cluster monitoring activities. In case a cluster goes to an unhealthy state, the last received healthy heartbeat can help you in troubleshooting. + +![Cluster_Health_Heart_Beat](/doc_cluster_clusters-cluster-heart-beat.png) + +* **Node Conditions** - Kubernetes maintains the status for each cluster node in the form of conditions such as DiskPressure, MemoryPressure, or NetworkUnavailable. Palette monitors these conditions and reports back to the management console. Any node condition indicating a problem with the node, results in an unhealthy status for the cluster. + + +* **Metrics** - Palette collects usage metrics such as CPU, Disk, or Memory. The cluster is marked as unhealthy if the usage metrics cross specific thresholds over some time. + +# Usage Monitoring + +Palette continuously monitors cluster resources and reports the usage for the cluster as well as individual nodes. The following metrics are reported on the Cluster Overview page of the management console. By default, the metrics are only displayed for the worker nodes in the cluster: + +* **Cores Used** - A cluster-wide breakdown of the number of cores used. + + +* **CPU Usage** - Current CPUs used across all cluster nodes. Additionally, usage over some time is presented as a chart. + + +* **Memory Usage** - Current memory used across all cluster nodes. Additionally, usage over a while is presented as a chart. + + +* **CPU Requests** - Total CPUs requested across all pods. + + +* **Memory Requests** - Total memory requested across all pods. + +![Cluster Update Details](/cluster_usage_metrics.png) + +Additionally, usage metrics for individual nodes and node conditions are accessible from the Node Details page. + +## Application Services + +Palette enables quick access to the application services installed on the Kubernetes clusters by providing a link to those on the management console. These include the applications and services deployed through Palette and the ones deployed through any other means. Services are monitored on an ongoing basis, and all services of the type LoadBalancer or NodePort are displayed on the management console. + +![Cluster Update Details](/cluster_services.png "#width=500px") + +## Troubleshooting + +Typically, when a cluster lifecycle action such as provisioning, upgrade, or deletion runs into a failure, it does not result in an outright error on the cluster. Instead, the Palette orchestration engine follows the reconciliation pattern, wherein the system repeatedly tries to perform various orchestration tasks to bring the cluster to its desired state until it succeeds. Initial cluster provisioning or subsequent updates can run into various issues related to cloud infrastructure availability, lack of resources, or networking issues. + +## Cluster Conditions + +Palette maintains specific milestones in a lifecycle and presents them as **Conditions**. + +Examples include: + + - Creating Infrastructure + + + - Adding Control Plane Node + + + - Customizing Image + +The active condition indicates what task Palette orchestration system is trying to perform. If a task fails, the condition is marked as *failed*, with relevant error messages. Reconciliation, however, continues behind the scenes, and continuous attempts are made to perform the task. Failed conditions are a great source of troubleshooting provisioning issues. + +![Cluster Update Details](/cluster_conditions.png "#width=400px") + +For example, failure to create a Virtual Machine in AWS due to the exceeded vCPU limit will cause this error to be shown to the end-users. Then, they can choose to bring down some workloads in the AWS cloud to free up space. The next time a VM creation task is attempted, it will succeed, and the condition will be marked as a success. + +## Rolling upgrade + +Palette will perform a rolling upgrade on the nodes for any fundamental changes to the Cluster Configuration file. Below are some of the actions that will result in a rolling upgrade: + +* OS layer changes + + +* Kubernetes layer changes + + +* Kubernetes version upgrade + + +* Kubernetes control plane upsize + + +* Machine pool updates for disk size + + +* Changes in availability zones + + +* Changes in instance types + + +* Certificate renewal and many more. + +Palette keeps track of the reason that triggered the rolling upgrade on the nodes in the cluster and is made accessible under **Cluster Overview** > **Upgrade details**. + +![upgrade-details1.png](/upgrade-details1.png) + +Besides actions taken by the user, Palette also performs a rolling upgrade if the cluster nodes' health degrades. Palette keeps track of the nodes machine's health and will relaunch the node when the machine health check fails. + +![upgrade-details2.png](/upgrade-details2.png) + +The following are some sample scenarios where the node health is considered as degraded: + + * Kubelet not up for 10 mins. + + + * Network unavailability for 10 mins. + + + * New node doesn't get ready in 30 mins. + +## Event Stream + +Palette maintains an event stream with low-level details of the various orchestration tasks being performed. This event stream is a good source for identifying issues in the event an operation does not complete for a long time. + +
+ +:::info + +* Cluster events are retained for the last 1000 events. + + +* Due to the Palette reconciliation logic, intermittent errors appear in the event stream.

As an example, errors might show up in the event stream regarding being unable to reach the node after launching a node. However, the errors clear up once the node comes up. +
+
+ Error messages that persist over a long time or errors indicating issues with underlying infrastructure are an indication of a real problem. + +::: + +## Download Cluster Logs + +At times, you may be required to work with the Palette Support Team to troubleshoot an issue. Palette provides the ability to gather logs from the clusters it manages. Problems that occur during the orchestration lifecycle, may require access to the various containers, nodes, and Kube system logs. Palette automates this log-collection process and provides an easy download option from the Palette UI console. Hence, it reduces the burden on the operator to log in to various cluster nodes individually and fetch these logs. +
+ +### Collect the Logs + +1. Select the running cluster. + + +2. Go to **Settings* and select **Download Logs**. + + +3. Choose the desired log from the below options: + * Kube-System Logs + * Logs of all the Kubernetes components. + * Logs + * Spectro namespace logs for the last one hour. + * Node Logs + * Contains the Spectro log, system log, and the cloud-init log information collected for the last ten thousand lines of code. + +4. Click **Download Logs**. + + This message will display on the UI: "The request was sent successfully. The download will be available soon." There is an average wait time of five (5) minutes. + + At the end of this short log fetching interval, the message will be displayed on the UI: "The logs archive for {Cluster-name} was created successfully." + + +5. Click **Download <*cluster-name*> logs** to download the **Logs** folder to your local machine. + + +6. Unzip and rename the Logs folder as per the customer's choice. + +:::info + +* In addition to the log contents briefed above, the folder will also contain a Manifest.yaml file describing the CRDs, Deployments, Pods, ConfigMap, Events, and Nodes details of the Cluster. + +* Palette recommends its users attach these logs, along with the support request, for accelerated troubleshooting. + +* Expect an average log fetching time of five (5) minutes for the ready-to-download message to appear on the UI, once the download log is clicked. + +* The downloaded Zip file will be by default named as **spectro_logs.zip**. The users can unzip and choose a name of convenience. + +::: + +## Proxy Whitelist + +This table lists the proxy requirements for enabling the Palette management console. + +| Top-level Domain | Port | Description | +| ------------------------- | ---- | -------------------------------------------- | +| docker.io | 443 | Third party container images. | +| docker.com | 443 | Third party container images. | +| gcr.io | 443 | Spectro Cloud and 3rd party container images.| +| ghcr.io | 443 | Third party container images. | +| github.com | 443 | Third party content. | +| googleapis.com | 443 | Spectro Cloud images. | +| grafana.com | 443 | Grafana container images and manifests. | +| k8s.gcr.io | 443 | Third party container images. | +| projectcalico.org | 443 | Calico container images. | +| registry.k8s.io | 443 | Third party container images. | +| raw.githubusercontent.com | 443 | Third party content. | +| spectrocloud.com | 443 | Spectro Cloud Palette SaaS. | +| s3.amazonaws.com | 443 | Spectro Cloud VMware OVA files. | +| quay.io | 443 | Third party container images. | +| ecr.us-east-1.amazonaws.com | 443 | OCI Pack images. | +| ecr.us-west-2.amazonaws.com | 443 | OCI Pack images. | + +## Scope + +Clusters are launched from within Projects in Palette, and they belong to a single project. In the **Project** view, all clusters that are launched from that project are listed for users with the Project Administrator role or Cluster Administrator role. A Tenant Administrator can get an aggregated view of clusters running across all projects from the **Organization** view, as follows: + +1. Log in to the **Palette Management Console** as a Tenant Administrator. + +2. Go to the **Clusters** option from the sidebar to list all the clusters belonging to all the users under that Tenant Administrator. + + +# Additional Resources + +The next section provides details for setting workload clusters in various environments. + diff --git a/docs/docs-content/clusters/data-center/_category_.json b/docs/docs-content/clusters/data-center/_category_.json new file mode 100644 index 0000000000..094470741d --- /dev/null +++ b/docs/docs-content/clusters/data-center/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 10 +} diff --git a/docs/docs-content/clusters/data-center/data-center.md b/docs/docs-content/clusters/data-center/data-center.md new file mode 100644 index 0000000000..2781b4ddad --- /dev/null +++ b/docs/docs-content/clusters/data-center/data-center.md @@ -0,0 +1,33 @@ +--- +sidebar_label: "Data Center Clusters" +title: "Data Center Clusters" +description: "The methods of creating clusters for a speedy deployment on any CSP" +hide_table_of_contents: false +sidebar_position: 10 +sidebar_custom_props: + icon: "database" +tags: ["data center"] +--- + +Palette supports provisioning and end-to-end lifecycle management of Kubernetes workload clusters on various private clouds, bare metal servers, and in self-hosted environments. + + +:::info + +Workload clusters are instantiated from cloud specific cluster profiles. You can use one of the cluster profiles provided out-of-the-box or create a new one. + +::: + + +## Resources + +The following pages provide detailed instructions for setting up new workload clusters in various data center environments. + +- [Canonical MAAS](maas/maas.md) + + +- [OpenStack](openstack.md) + + +- [VMware](vmware.md) + diff --git a/docs/docs-content/clusters/data-center/maas/_category_.json b/docs/docs-content/clusters/data-center/maas/_category_.json new file mode 100644 index 0000000000..3fca6fb9f9 --- /dev/null +++ b/docs/docs-content/clusters/data-center/maas/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 0 +} diff --git a/docs/docs-content/clusters/data-center/maas/architecture.md b/docs/docs-content/clusters/data-center/maas/architecture.md new file mode 100644 index 0000000000..ab0c811e65 --- /dev/null +++ b/docs/docs-content/clusters/data-center/maas/architecture.md @@ -0,0 +1,82 @@ +--- +sidebar_label: "Architecture" +title: "Architecture" +description: "Learn about the architecture used to support MAAS using Palette" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["data center", "maas", "architecture"] +--- + + +Canonical MAAS is an open-source tool that lets you discover, commission, deploy and re-deploy operating systems to physical servers. The following are some architectural highlights of bare-metal Kubernetes clusters that Palette deploys using Canonical MAAS. Refer to the PCG deployment options section below to learn more about PCG deployment. + + +- Palette integrates with MAAS through Spectro Cloud’s open-source Cloud Native Computing Foundation (CNCF) [Cluster API provider](https://github.com/spectrocloud/cluster-api-provider-maas). +Refer to the table below + + +- Palette provides a cloud-like experience for deploying clusters on bare metal servers. The result is increased performance at minimal cost and operational effort. + + +- A Private Cloud Gateway (PCG) that you install in a MAAS cloud using a local installer facilitates communication between Palette and MAAS. The PCG is necessary in MAAS environments where Palette does not have direct network access to the MAAS server. Since MAAS environments are typically in a private network without a central endpoint, the PCG provides this endpoint and also wraps the MAAS environment into a cloud account that you can target for cluster deployment in Palette. Refer to the section below to learn about the PCG deployment options you have. + + +- When the PCG is installed, it registers itself with a Palette instance and enables secure communication between the SaaS portal and the private cloud environment. The gateway enables installation and end-to-end lifecycle management of Kubernetes clusters in private cloud environments from Palette's SaaS portal. + + The diagram below illustrates how MAAS works with Palette using a PCG. + + ![Network flow from an architectural perspective of how MAAS works with Palette](/maas_cluster_architecture.png) + +
+ +## PCG Deployment Options + +Palette can communicate with MAAS using the following deployment options. + +
+ + +- **Private Cloud Gateway** + + +- **System Private Gateway** + +### Private Cloud Gateway + +When a user wants to deploy a new cluster on a bare metal cloud using MAAS with Palette, Palette needs connectivity to MAAS. Often, MAAS is behind a firewall or a Network Address Translation (NAT) gateway, and Palette needs help to reach MAAS directly. + +To address these network challenges, you can deploy a PCG. The PCG will maintain a connection to Palette and directly connect to MAAS. The direct communication channel allows Palette to create clusters using the PCG to facilitate communication with MAAS. The PCG also supports using a proxy server to access the internet if needed. + +Once Palette deploys clusters, the clusters require connectivity to Palette. The clusters communicate with Palette directly via an internet gateway, or if a proxy has been configured on the PCG, the clusters will inherit the proxy configuration. Deployed and active clusters maintain their connectivity with Palette. Any actions taken on these clusters using Palette will not require PCG's participation. This means that if the PCG becomes unavailable, any clusters that are currently deployed will remain operational and still be managed by Palette. + +All Palette deployed clusters will use the PCG cluster during the creation and deletion phase. Once a host cluster is available, the internal Palette agent will communicate with Palette directly. The Palette agent inside each cluster is the originator of all communication, so the network requests are outbound toward Palette. The exception is a host cluster creation or deletion request, where the PCG must be involved because it needs to acquire and release machines provided by MAAS. + +Typically, the PCG is used with Palette SaaS. However, a PCG is also required if you have a self-hosted Palette instance and it does not have direct access to the MAAS environment. You can utilize the System Private Gateway if there is direct network connectivity access with the MAAS environment. Refer to the [System Private Gateway](#system-private-gateway) section to learn more. + +
+ + +### System Private Gateway + +A System Private Gateway can be used if a self-hosted Palette instance can communicate directly with a MAAS installation. A System Private Gateway is a PCG service that is enabled inside the self-hosted Palette instance. + +
+ +:::caution + +Only self-hosted Palette instances support the option of using the System Private Gateway. Use the default [PCG deployment](#private-cloud-gatewayy) option if you have NAT gateways or network firewalls between Palette and MAAS. + +::: + +
+ +When registering a MAAS cloud account with Palette, toggle on **Use System Private Gateway** to enable direct communication between Palette and MAAS. Refer to the [Register and Manage MAAS Cloud Account](register-manage-maas-cloud-accounts.md) guide to learn more. + +The following table explains the different use cases for when a PCG and System Private Gateway are eligible. + +
+ +| Scenario | Use Private Cloud Gateway | Use System Private Gateway | +|-----------|----|----------------| +| Firewall or NAT between MAAS and a self-hosted Palette instance | ✅ | ❌ | +| Direct connectivity between MAAS and a Palette instance | ✅ | ✅ | diff --git a/docs/docs-content/clusters/data-center/maas/create-manage-maas-clusters.md b/docs/docs-content/clusters/data-center/maas/create-manage-maas-clusters.md new file mode 100644 index 0000000000..141c397b32 --- /dev/null +++ b/docs/docs-content/clusters/data-center/maas/create-manage-maas-clusters.md @@ -0,0 +1,139 @@ +--- +sidebar_label: "Create and Manage MAAS Clusters" +title: "Create and Manage MAAS Clusters" +description: "Learn how to create and manage MAAS clusters in Palette." +hide_table_of_contents: false +tags: ["data center", "maas"] +--- + + + +Palette supports creating and managing Kubernetes clusters deployed to a MAAS account. This section guides you on how to create a Kubernetes cluster in MAAS that is managed by Palette. + +## Prerequisites + +- An installed PCG if you do not have a direct connection to the MAAS environment. Review [Install and Manage MAAS Gateway](install-manage-maas-pcg.md) for guidance. + + If are self-hosting Palette and have a direct connection to the MAAS environment, you can select **Use System Private Gateway**. To learn more about when you would use Palette's PCG or the System Private Gateway, refer to the [Architecture](architecture.md) page to learn more. + + +- A MAAS account registered in Palette. Refer to the [Register and Manage MAAS Cloud Accounts](register-manage-maas-cloud-accounts.md) if you need to register a MAAS account in Palette. + + +- A cluster profile for the MAAS environment. Review [Cluster Profiles](../../../cluster-profiles/cluster-profiles.md) for more information. + + +- Verify that the required Operating System (OS) images you use in your cluster profiles are downloaded and available in your MAAS environment. Review the [How to use standard images](https://maas.io/docs/how-to-use-standard-images) for guidance on downloading OS images for MAAS. + + +## Deploy a MAAS Cluster + +To deploy a new MAAS cluster: + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the **Main Menu** and click **Clusters**. Then click the **Add New Cluster** button. + + +3. Click **Deploy New Cluster** on the Create a New Cluster page. + + +4. Select **MAAS** and click the **Start MAAS Configuration** button. + + +5. Provide basic cluster information: **Cluster name**, **Description**, and **Tags**. + + +6. Select your MAAS cloud account from the **drop-down Menu** and click **Next**. + + +7. Select the cluster profile for your MAAS cluster. + + +8. Review and override pack parameters as desired and click **Next**. By default, parameters for all packs are set with values defined in the cluster profile. + + +9. Select a domain from the **Domain drop-down Menu** and click **Next**. + + +10. Configure the master and worker node pools. The following input fields apply to MAAS master and worker node pools. For a description of input fields that are common across target platforms refer to the [Node Pools](../../cluster-management/node-pool.md) management page. Click **Next** when you are done. + + #### Master Pool configuration + + - Cloud configuration: + + - Resource Pool: The MAAS resource pool from which to select available servers for deployment. Filter available servers to only those that have at least the amount of CPU and Memory selected. + +
+ + #### Worker Pool configuration + + - Cloud configuration: + + - Resource Pool: The MAAS resource pool from which to select available servers for deployment. Filter available servers to only those that have at least the amount of CPU and Memory selected. + + +11. You can configure the following cluster management features now if needed, or you can do it later: + + - Manage machines + - Schedule scans + - Schedule backups + - Role-based access control (RBAC) + - Location + + +12. Review settings and deploy the cluster. + + +## Validate + +You can validate your cluster is available by reviewing the cluster details page. Navigate to the left **Main Menu** and click **Clusters**. The **Clusters** page lists all available clusters that Palette manages. Select the cluster to review its details page. Ensure the **Cluster Status** field contains the value **Running**. + +
+ +## Delete a MAAS Cluster + +When you delete a MAAS cluster, all machines and associated storage disks that were created for the cluster are removed. + +Follow these steps to delete a MAAS cluster. + +
+ +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the **Main Menu** and click **Clusters**. + + +3. Select the cluster you want to delete. + + +4. Click the **Settings drop-down Menu**, and choose **Delete**. + +The cluster status is updated to **Deleting** while cluster resources are being deleted. When all resources are successfully deleted, the cluster status is updated to **Deleted** and the cluster is removed from the list. The delete operation returns the edge hosts to the **Ready** state. All the artifacts related to the Kubernetes distribution are removed. + +
+ + +## Upgrade a MAAS Cluster + +Upgrade a MAAS cluster to enhance the performance and functionality of the cluster. To learn more about managing a MAAS cluster, refer to [Manage Clusters](../../cluster-management/cluster-updates.md). + +To protect your data, we recommend you create a backup of your MAAS cluster before proceeding with any upgrades or infrastructure changes. Review instructions provided in the [Backup and Restore](../../cluster-management/backup-restore/backup-restore.md). + +
+ +:::caution + +Ensure that the Operating System (OS) image selected for your cluster are downloaded and available for your MAAS configuration to eliminate errors in Palette. You can refer to the [How to use standard images](https://maas.io/docs/how-to-customise-images) guide for instructions on downloading OS images compatible with their respective MAAS environment. + + + +::: + + + +## Next Steps + +Now that you’ve deployed a MAAS cluster, you can start developing and deploying applications to your cluster. We recommend you review the Day-2 operations and become familiar with the cluster management tasks. Check out the [Manage Clusters](../../cluster-management/cluster-management.md) documentation to learn more about Day-2 responsibilities. \ No newline at end of file diff --git a/docs/docs-content/clusters/data-center/maas/install-manage-maas-pcg.md b/docs/docs-content/clusters/data-center/maas/install-manage-maas-pcg.md new file mode 100644 index 0000000000..51dffd9657 --- /dev/null +++ b/docs/docs-content/clusters/data-center/maas/install-manage-maas-pcg.md @@ -0,0 +1,676 @@ +--- +sidebar_label: "Install and Manage MAAS Gateway" +title: "Install and Manage MAAS Private Cloud Gateway" +description: "Learn how to install and manage the MAAS Private Cloud Gateway in Palette." +hide_table_of_contents: false +sidebar_position: 10 +toc_min_heading_level: 2 +toc_max_heading_level: 3 +tags: ["data center", "maas"] +--- + +The Private Cloud Gateway (PCG) supports private cloud and data center environments. Its function is similar to that of a reverse proxy. The PCG facilitates connectivity between Palette and a private cloud that exists behind a NAT gateway or firewall. It traverses any NAT gateways or firewalls to establish a permanent connection with Palette. + +The PCG is a Kubernetes cluster that supports Palette in a private network environment. All host clusters deployed through Palette communicate with PCG. + +At a high level, the following occurs during a successful MAAS PCG installation: + +
+ +- Use the Palette CLI on a laptop, workstation, or Bastion host. + + +- Provide information to the CLI so that it can connect to both a local MAAS installation and a Palette account. + + +- The installation process uses MAAS to obtain machines and install a PCG on them. + + +- The PCG then facilitates all communication between Palette and MAAS, enabling Palette to create new clusters on machines that MAAS provides. + +You can set up the PCG as a single- or three-node cluster based on your requirements for high availability (HA). + +As the following diagram shows, Palette provides an installer in the form of a Docker container that is temporarily deployed on your laptop, workstation, or jump box. You can use the installer on any Linux x86-64 system with a Docker daemon installed and connectivity to Palette and the MAAS identity endpoint. + + + +![An architecture diagram of MaaS with PCG.](/clusters_maas_install-manage-mass-pcg_diagram-of-mass-with-pcg.png) + + +## Install PCG + +Use the following steps to install a PCG cluster in your MAAS environment. You can use the [Palette CLI](/palette-cli) or the PCG Installer Image to deploy a PCG cluster. Review the prerequisites for each option to help you identify the correct install method. + + + + + + + +### Prerequisites + + +- Palette version 4.0.X or greater. + + +- Canonical [MAAS installed](https://maas.io/docs/how-to-install-maas), set up, and available in your environment. + + +- Download the Palette CLI from the [Downloads](../../../spectro-downloads#palette-cli) page and install the CLI. Refer to the [Palette CLI Install](../../../palette-cli/install-palette-cli.md) guide to learn more. + + +- A Palette API key. Refer to the [Create API Key](../../../user-management/user-authentication.md#apikey) page for guidance. + + :::caution + + The installation does not work with Single Sign-On (SSO) credentials. You must use an API key from a local tenant admin account in Palette to deploy the PCG. After the PCG is configured and functioning, this local account is no longer used to keep the PCG connected to Palette, so you can disable the account if desired. + + ::: + +- A Linux environment with a Docker daemon installed and a connection to Palette and the MAAS endpoint. The installation must be invoked on an up-to-date Linux system with an x86-64 architecture. ARM architecture is currently not supported. + + +- PCG IP address requirements: + + - For a single-node gateway, one IP address must be available in the MAAS subnet for the PCG, or three available IP addresses for a three-node gateway. + + - One IP address must be available in the MAAS subnet for the Kubernetes API-server endpoint when deploying a three-node gateway. + + +- Sufficient available IPs within the configured MAAS subnets. + + :::caution + + By default, the MAAS Kubernetes pack uses a pod classless inter-domain routing (CIDR) range of 192.168.0.0/16. Ensure that the pod CIDR range for any clusters you deploy after setting up the PCG does not overlap with the network used by the bare metal machines that MAAS manages. + + ::: + +- Each node in the PCG cluster requires a machine from MAAS in a ready state with the following resources: + + - CPU: 4 + - Memory: 8192 MiB + - Storage: 60 GiB + + For production environments, we recommend using three nodes, each with 100 GiB of storage, as nodes can exhaust the 60 GiB storage with prolonged use. If you initially set up the gateway with one node, you can resize it at a later time. + + +- An active [MAAS API key](https://maas.io/docs/api-authentication-reference) can be generated in the MAAS web console under **My Preferences** > **API keys**. The following is an example key: + + ``APn53wz232ZwBMxDp5:MHZIbUp3e4DJTjZEKg:mdEv33WAG536MhNC8mIywNLtjcDTnFAQ`` + + For details, refer to the MAAS document on [how to add an API key](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key). + + +- The DNS server that the PCG installer will use, must be able to resolve the DNS names of machines that MAAS deploys so it can connect to them. The default setup is to use the MAAS server as the DNS server for any bare metal servers that it deploys. The default MAAS DNS zone is ``.maas``. You can use ``.maas`` or you can use the MAAS web console to create a new DNS zone. When you deploy the PCG and clusters, you can select the desired DNS zone in which DNS name records should be created. + + In the MAAS subnets configuration, you can specify which DNS servers those servers in the MAAS subnet should use. + +:::caution + +If you configure a different DNS server than the MAAS DNS server, you must be sure to create a DNS delegation in the other DNS server, so that it can forward DNS requests for zones that are hosted by MAAS to the MAAS DNS server. + +::: + + +The installation process first requests machines from MAAS and then must connect to them. To connect, the install process attempts to use the fully qualified domain name (FQDN) of the server. If you used ``.maas`` as the default DNS zone, the FQDN would be ``machine-hostname.maas``. + +The diagram below shows an example of using an external DNS server for servers that MAAS deploys in addition to a DNS delegation. This ensures all servers in the network can resolve the DNS names of servers deployed by MAAS. Note that it is not required for the DNS records to be accessible from the internet. + + + ![Image showing external DNS server machines that MAAS deploys in addition to a DNS delegation](/clusters_maas_maas-dns-setup.png) + + +### Install + +The following steps will guide you on how to install a PCG cluster. + +1. In an x86 Linux host, open up a terminal session. + + +2. Use the [Palette CLI](../../../palette-cli/install-palette-cli.md) `login` command to authenticate the CLI with Palette. When prompted, enter the information listed in the following table. + +
+ + ```shell + palette login + ``` + +
+ + |**Parameter** | **Description**| + |:-----------------------------|---------------| + |**Spectro Cloud Console** |Enter the Palette endpoint URL. When using the Palette SaaS service, enter ``https://console.spectrocloud.com``. When using a self-hosted instance of Palette, enter the URL for that instance. | + |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if you are using a self-hosted Palette instance with self-signed TLS certificates. Otherwise, enter `n`.| + |**Spectro Cloud API Key** |Enter your Palette API Key.| + |**Spectro Cloud Organization** |Enter your Palette Organization name.| + |**Spectro Cloud Project** |Enter your desired project name within the selected Organization.| + + +3. Once you have authenticated successfully, invoke the PCG installer by issuing the following command. When prompted, enter the information listed in each of the following tables. + +
+ + ```bash + palette pcg install + ``` + +
+ + |**Parameter** | **Description**| + |:-----------------------------|---------------| + |**Cloud Type**| Choose MAAS.| + |**Private Cloud Gateway Name** | Enter a custom name for the PCG. Example: `maas-pcg-1`.| + |**Share PCG Cloud Account across platform Projects** |Enter `y` if you want the Cloud Account associated with the PCG to be available from all projects within your organization. Enter `n` if you want the Cloud Account to only be available at the tenant admin scope.| + + + +4. Next, provide environment configurations for the cluster. Refer to the following table for information about each option. + +
+ + |**Parameter**| **Description**| + |:-------------|----------------| + |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: ``https://USERNAME:PASSWORD@PROXYIP:PROXYPORT``.| + |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: ``http://USERNAME:PASSWORD@PROXYIP:PROXYPORT``.| + |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: ``maas.company.com,10.10.0.0/16``.| + |**Proxy CA Certificate Filepath**|The default is blank. You can provide the file path of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| + |**Pod CIDR**|Enter the CIDR pool that will be used to assign IP addresses to pods in the PCG cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| + |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the PCG cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.|.| + +
+ + +5. After the environment options, the next set of prompts is for configuring the PCG cluster for the MAAS environment. The following table contains information about each prompt. + +
+ + |**Parameter**| **Description**| + |-------------|----------------| + | **MAAS API Endpoint** |Enter the MAAS API endpoint. This can be a domain or IP address. Example: `http://10.11.12.13:5240/MAAS`.| + | **MAAS API Key** |Enter an active MAAS API key to use for authentication.| + + + +6. Next, select the appropriate option for each of the following items to define which machines should be selected on the MAAS server for deployment as a PCG. + +
+ + |**Parameter**| **Description**| + |-------------|----------------| + | **Domain** | Select the MAAS domain. | + | **Patch OS on boot** | This parameter indicates whether or not to patch the OS of the PCG hosts on the first boot.| + | **Reboot nodes once OS patch is applied** | This parameter indicates whether or not to reboot PCG nodes after OS patches are applied.| + | **Availability Zone** | Select the availability zones for the PCG cluster. | + | **Resource Pool** | Select the MAAS resource pool. | + | **Cluster Size** | The number of nodes that will make up the cluster. Available options are **1** or **3** . Use three nodes for a High Availability (HA) cluster. | | + + :::caution + + Ensure the MAAS server has one or more machines in the **Ready** state for the chosen availability zone + and resource pool combination. + + ::: + + +7. A new PCG configuration file is generated and its location is displayed on the console. You will receive an output similar to the following. + +
+ + ```bash hideClipboard + ==== PCG config saved ==== + Location: :/home/spectro/.palette/pcg/pcg-20230706150945/pcg.yaml + ``` + + :::info + + The ``CloudAccount.apiKey`` and ``Mgmt.apiKey`` values in the **pcg.yaml** are encrypted and cannot be manually updated. To change these values, restart the installation process using the `palette pcg install` command. + + ::: + + +The Palette CLI will now provision a PCG cluster in your MAAS environment. +If the deployment fails due to misconfiguration, update the PCG configuration file and restart the install process. Refer to the Edit and Redeploy PCG section below. For additional assistance, visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. + +### Validate + +Once installed, the PCG registers itself with Palette. To verify the PCG is registered, use the following steps. + + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and select **Tenant Settings** + + +3. From the **Tenant Settings Menu** click on **Private Cloud Gateways**. Verify your PCG cluster is available from the list of PCG clusters displayed. + + +4. When you install the PCG, a cloud account is auto-created. To verify the cloud account is created, go to **Tenant Settings > Cloud Accounts** and locate **MAAS** in the table. Verify your MAAS account is listed. + + + +### Edit and Redeploy PCG + +To change the PCG install values, restart the installation process using the `palette pcg install` command. Use the following steps to redeploy the PCG or restart the install process. + +
+ +1. Make the necessary changes to the PCG configuration file the CLI created during the installation, if needed. Use a text editor, such as Vi or Nano to update the PCG install configuration file. + +
+ + ```shell hideClipboard + ==== Create PCG reference config ==== + ==== PCG config saved ==== + Location: /Users/demo/.palette/pcg/pcg-20230717114807/pcg.yaml + ``` + + ```bash hideClipboard + vi /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml + ``` + + + +2. To redeploy the PCG, use the `install` command with the flag `--config-file`. Provide the file path to the generated PCG config file that was generated and displayed in the output. + +
+ + ```bash hideClipboard + palette pcg install --config-file /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml + ``` + + +
+ +
+ + + +## PCG Installer Image + +### Prerequisites + +- Palette version 3.4.X or older. + + + +- Canonical [MAAS installed](https://maas.io/docs/how-to-install-maas), set up, and available in your environment. + + +- A Linux environment with a Docker daemon installed and a connection to Palette and the MAAS endpoint. The installer must be invoked on an up-to-date Linux system with an x86-64 architecture. ARM architecture is currently not supported. + + +- PCG IP address requirements:

+ + - For a single-node gateway, one IP address must be available in the MaaS subnet for the PCG, or three available IP addresses for a three-node gateway. +
+ + - One IP address must be available in the MAAS subnet for the Kubernetes api-server endpoint when deploying a three-node gateway. + + +- Sufficient available IPs within the configured MAAS subnets. + +:::caution + +By default, the MAAS Kubernetes pack uses a pod classless inter-domain routing (CIDR) range of 192.168.0.0/16. Ensure that the pod CIDR range for any clusters you deploy after setting up the PCG does not overlap with the network used by the bare metal machines that MAAS manages. + +::: + +- Each node in the PCG cluster requires a machine from MAAS in a ready state with the following resources: + +
+ + - CPU: 4 + - Memory: 8192 MiB + - Storage: 60 GiB + + For production environments, we recommend using three nodes, each with 100 GiB of storage, as nodes can run out of 60 GiB with prolonged use. If you initially set up the gateway with one node, you can resize it at a later time. + + +- An active [MAAS API key](https://maas.io/docs/api-authentication-reference) which can be generated in the MAAS web console under **My Preferences** > **API keys**. The following is an example key: + + ``APn53wz232ZwBMxDp5:MHZIbUp3e4DJTjZEKg:mdEv33WAG536MhNC8mIywNLtjcDTnFAQ`` + + For details, refer to the MAAS document on [how to add an API key](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key). + +
+ +- The DNS server that the PCG installer will use, must be able to resolve the DNS names of machines that MAAS deploys so it can connect to them. The default setup is to use the MAAS server as the DNS server for any bare metal servers that it deploys. The default MAAS DNS zone is ``.maas``. You can use ``.maas`` or you can use the MAAS web console to create a new DNS zone. When you deploy the PCG and clusters, you can select the desired DNS zone in which DNS name records should be created. + + In the MAAS subnets configuration, you can specify which DNS servers those servers in the MAAS subnet should use. + +:::caution + +If you configure a different DNS server than the MAAS DNS server, you must be sure to create a DNS delegation in the other DNS server, so that it can forward DNS requests for zones that are hosted by MAAS to the MAAS DNS server. + +::: + +
+ +The installer first requests machines from MAAS and then must connect to them. To connect, the installer attempts to use the fully qualified domain name (FQDN) of the server. If you used ``.maas`` as the default DNS zone, the FQDN would be ``machine-hostname.maas``. + +The diagram shows an example of using an external DNS server for servers that MAAS deploys in addition to a DNS delegation. This ensures all servers in the network can resolve the DNS names of servers deployed by MAAS. Note that it is not required for the DNS records to be accessible from the internet. + + +![Image showing external DNS server machines that MAAS deploys in addition to a DNS delegation](/clusters_maas_maas-dns-setup.png) + + +### Understand the Gateway Installation Process + +The following steps outline the overall process to install the PCG. + +For detailed steps, refer to the **Install PCG** section below, which describes a single-step installation that creates the PCG configuration file and installs the PCG. + +If you have already installed the PCG and are experiencing issues that you want to fix by editing the PCG configuration file directly, refer to the **Edit PCG Config** section below. + +
+ +1. You obtain a pairing code in Palette that you will use later. + + +2. Use the Docker image to start the installation on the installer host. + + +3. The installer prompts you for information, including the pairing code you obtained in step **1**. + + +4. The installer generates the PCG configuration file from information you provide in step **3**. + +
+ + The installer needs access to your Palette account and to your MAAS environment. Additionally, one (no HA) or three (HA) machines must be in ready state and have internet access in MAAS. If you select one machine in step 3, then you need one in MAAS. Likewise, if you select three machines in step 3, you need three in MAAS. +
+ +5. The installer installs the MAAS machines and uses the configuration file to build a new cluster to host the PCG application. + +
+ + + + +### Install the PCG + +The following steps will guide you to install the PCG. +
+ +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. If you have Single or Social Sign-On (SSO) enabled, you will need to use or create a local non-SSO tenant admin account in Palette and use the credentials for that account in step **7**. + +:::caution + +The installer does not work with SSO or Social sign on credentials. You must use a username and password from a local tenant admin account in Palette to deploy the PCG. After the PCG is configured and functioning, this local account is no longer used to keep the PCG connected to Palette, so you can disable the account if desired. + +::: + + +3. Navigate to the **Main Menu** and select **Tenant Settings > Private Cloud Gateway**. + + +4. Click the **Create Private Cloud Gateway** button and select **MAAS**. Private Gateway installation instructions are displayed. + + +5. Note the pairing code displayed in the Instructions section of the page. You will input this code when you use the installer. This pairing code is valid for 24 hours. + + +6. To invoke the installer, copy the following code snippet to your terminal. +
+ + ```bash + docker run -it --rm \ + --net=host \ + --volume /var/run/docker.sock:/var/run/docker.sock \ + --volume /tmp:/opt/spectrocloud \ + gcr.io/spectro-images-public/release/spectro-installer:v1.0.12 + ``` + +7. When prompted, enter the pairing code and information listed in each of the following tables. The installer will generate the gateway configuration file. +
+ + +#### Palette Parameters + +|**Parameter** | **Description**| +|:-----------------------------|---------------| +|**Install Type**| Choose **Private Cloud Gateway**.
You can change your selection with the up or down keys.| +|**Cloud Type**| Choose MAAS.| +|**Name** | Enter a custom name for the PCG. Example: ``maas-pcg-1``.| +|**Endpoint** |Enter the Palette endpoint URL. When using the Palette SaaS service, enter ``https://console.spectrocloud.com``. When using a dedicated instance of Palette, enter the URL for that instance. | +|**Username** |Enter your Palette username. This is your sign-in email address. Example: ``user1@company.com``. | +|**Password** |Enter your Palette Password. This is your sign-in password.| +|**Pairing Code** |Enter the pairing code you noted from the instructions page in step **5**. | + +
+ +#### Environment Configuration + + +|**Parameter**| **Description**| +|:-------------|----------------| +|**HTTPS Proxy (--https_proxy)**| Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: ``https://USERNAME:PASSWORD@PROXYIP:PROXYPORT``.| +| **HTTP Proxy(--http_proxy)**| Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: ``http://USERNAME:PASSWORD@PROXYIP:PROXYPORT``.| +| **No Proxy(--no_proxy)**| The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: ``maas.company.com,10.10.0.0/16``.| +| **Pod CIDR (--pod_cidr)**|Enter the CIDR pool that will be used to assign IP addresses to pods in the PCG cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| +| **Service IP Range (--svc_ip_range)**|Enter the IP address range that will be used to assign IP addresses to services in the PCG cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| + +
+ + +#### MAAS Account Information + +|**Parameter**| **Description**| +|-------------|----------------| +| **API Endpoint** |Enter the MAAS API endpoint (syntax is important). This can be a domain or IP address. Example: ``http://10.11.12.13:5240/MAAS``.| +| **API Key** |Enter an active MAAS API key to use for authentication.| + +
+ +1. When the installer prompts you, select the appropriate option for each of the following items to define which machines should be selected on the MAAS server for deployment as a PCG: + + - Domain + - Availability Zone + - Resource Pool + - One node (no HA) or three nodes (HA) + +
+ + :::caution + + Ensure the MAAS server has one or more machines in the **Ready** state for the chosen availability zone + and resource pool combination. + + ::: + +When you have entered all the configuration values, the installer saves the gateway configuration file to disk and prints its location before proceeding with the installation. For example: + +``/tmp/install-user-defined-MaaS-Gateway_Name-20210805155034/pcg.yaml`` + +
+ +:::info + +The **/opt/spectrocloud** folder is volume mapped to the installer's **/tmp** folder. + +::: + +The installer then requests available bare metal machines in your MAAS environment on which to install the gateway. The ``password`` and ``API key`` values in the ``pcg.yaml`` are encrypted and cannot be manually updated. To change these values, copy the code snippet in step **6** to rerun the installer. + +If the deployment fails due to misconfiguration, update the gateway configuration file and rerun the installer. Refer to the **Edit PCG Config** tab above. + +If you need assistance, please visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. + +
+ +### Validate + +Once installed, the gateway registers itself with Palette. To verify the gateway is registered, navigate to **Tenant Settings > Private Cloud Gateways** and ensure the gateway is listed on the **Manage Private Cloud Gateways** page. + +When you install the gateway, a cloud account is auto-created. To verify the cloud account is created, go to **Tenant Settings > Cloud Accounts** and locate **MAAS** in the table. Verify your MAAS account is listed. + + + + +### Edit PCG Configuration File + +Use the following steps if you want to edit the PCG configuration file directly. + +
+ +1. Copy the ``pcg.yaml`` file out of ``/tmp/install-user-defined-MaaS-Gateway_Name-20210805155034/pcg.yaml`` and into ``/tmp`` as follows. + + +```bash +cp /tmp/install-User-define-MaaS-Gateway-Name-20210805155034/pcg.yaml /tmp +``` + + +2. Make the necessary changes to the configuration file. + + +3. Before you redeploy the gateway, do the following: + +
+ + - Ensure the pairing code in the configuration file is the same as the pairing code displayed in the installation instructions in Palette. To verify the pairing code, click the **Create Private Cloud Gateway** button and select **MAAS**. Note the pairing code and verify it is the same code in the configuration file. + +
+ + - If the codes do not match, modify the code in the configuration file so it matches the code displayed in Palette. + +
+ +:::caution + +Issues can occur with the PCG installation if the pairing code in Palette changes during the time it takes to modify the configuration file. Ensure pairing codes in Palette and the configuration file match before you redeploy the gateway. + +If you stop the installation or it fails due to mismatched pairing codes, the gateway might display as **Pending (unnamed)** on the **Private Cloud Gateways** page. If this happens, delete the gateway and ensure pairing codes in Palette and the configuration file match before redeploying the gateway. + +::: + +
+ +4. To redeploy the gateway, copy the following code snippet to your terminal and provide the gateway configuration file as input. + + +```bash +docker run -it –rm \ +–net-host \ +-v /var/run/docker.sock:/var/run/docker.sock \ +-v /tmp:/opt/spectrocloud \ +gcr.io/spectro-images-public/release/spectro-installer:1.0.12 \ +-s true \ +-c /opt/spectrocloud/pcg.yaml +``` + +The installer requests available bare metal machines in your MAAS environment on which to install the gateway. + +If you need assistance, please visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. + + + +
+ +
+
+ + + + +## Update and Manage the PCG + +Palette maintains the Operating System (OS) image and all configurations for the PCG. Periodically, the OS images, configurations, and other components need to be updated to resolve security or functionality issues. Palette releases updates when required, and informs you with an update notification when you click on the gateway in the **Manage Cloud Gateways** page. + +Review the changes in the update notification, and apply the update when you are ready. + +Updating the cloud gateway does not result in any downtime for the tenant clusters. During the update process, new cluster provisioning is unavailable. New cluster requests are queued and processed when the gateway update is complete. + +
+ +## Delete the MAAS Gateway + +Follow these steps to delete a MAAS gateway. +
+ +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the **Main menu** and select **Tenant Settings > Private Cloud Gateways**. + + +3. Click the **three-dot Menu** for the gateway instance you want to delete and choose **Delete**. + + Palette checks for running tenant clusters associated with the gateway instance and displays an error message if it detects any. +
+ +4. If there are running clusters, delete them and retry deleting the gateway instance. + +
+ +## Resize the MAAS Gateway + +You can set up a PCG as a single-node (no HA) or three-node (HA) cluster. You can set up a PCG initially with one node and resize it to three nodes at a later time. + +
+ +:::info + +For production environments, we recommend setting up three nodes. + +::: + +### Prerequisites + +- Each PCG node requires the following: + + - 4 CPUs + - 8192 MiB memory + - 60 GiB storage + +Follow these steps to resize a single-node gateway to three nodes. + + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the **Main Menu** and select **Tenant Settings > Private Cloud Gateways**. + + +3. Click the **three-dot Menu** for the gateway instance you want to resize and choose **Set number of nodes**. + + +4. Change the number of nodes to 3. + +Two new nodes will be added to the PCG cluster. + + +:::caution + +Ensure the MAAS server has two more machines in the **Ready** state in the same Availability Zone and Resource Pool combination. + +::: + + +### Validate + +You can validate that your PCG has been resized by navigating to the **Private Cloud Gateways** page. Select the resized gateway instance and click the **Nodes** tab. Two additional nodes are displayed along with their health status. Three nodes in total will be listed. + + +## Next Steps + +You can now create tenant clusters in the auto-created cloud account. To get started, check out [Create and Manage MAAS Clusters](create-manage-maas-clusters.md). + +You can also create additional cloud accounts if you need them. Refer to [Register and Manage MAAS Cloud Accounts](register-manage-maas-cloud-accounts.md). + + + +# Resources + + - [Install MAAS](https://maas.io/) + + + - [MAAS Fresh Install](https://maas.io/docs/how-to-install-maas) + + + - [Manage MAAS User Accounts](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key) + diff --git a/docs/docs-content/clusters/data-center/maas/maas.md b/docs/docs-content/clusters/data-center/maas/maas.md new file mode 100644 index 0000000000..4bed8085a8 --- /dev/null +++ b/docs/docs-content/clusters/data-center/maas/maas.md @@ -0,0 +1,25 @@ +--- +sidebar_label: "MAAS" +title: "MAAS" +description: "Learn how to configure MAAS and create MAAS clusters in Palette" +hide_table_of_contents: false +tags: ["data center", "maas"] +--- + +Palette enables seamless integration with Canonical MAAS, allowing you to deploy and manage Kubernetes clusters directly on bare metal servers. Palette achieves this through the Private Cloud Gateway (PCG), establishing a secure connection from the internal network to the internet-accessible Palette instance and effectively bypassing NAT gateways and firewalls. + + +Palette also supports self-hosted deployment of Kubernetes clusters in the MAAS environment, allowing direct access to MAAS through a private network without the need for a PCG. This setup ensures network connectivity and flexibility in managing Kubernetes clusters on bare metal servers, either through a VPN or by directly accessing the Palette instance in a private network. + +## Resources + +- [MAAS Bare-Metal Architecture](architecture.md) + + +- [Install and Manage MAAS Gateway](install-manage-maas-pcg.md) + + +- [Register and Manage MAAS Cloud Accounts](register-manage-maas-cloud-accounts.md) + + +- [Create and Manage MAAS Cluster](create-manage-maas-clusters.md) diff --git a/docs/docs-content/clusters/data-center/maas/register-manage-maas-cloud-accounts.md b/docs/docs-content/clusters/data-center/maas/register-manage-maas-cloud-accounts.md new file mode 100644 index 0000000000..45346be954 --- /dev/null +++ b/docs/docs-content/clusters/data-center/maas/register-manage-maas-cloud-accounts.md @@ -0,0 +1,79 @@ +--- +sidebar_label: "Register and Manage MAAS Cloud Accounts" +title: "Register and Manage MAAS Cloud Accounts" +description: "Learn how to register and manage your MAAS cloud accounts in Palette." +hide_table_of_contents: false +sidebar_position: 20 +tags: ["data center", "maas"] +--- + + +When you install the Private Cloud Gateway (PCG), a cloud account is auto-created in every project in Palette. You can use this cloud account to create clusters at either the tenant or the project level. If desired, you can create additional cloud accounts that reference specific PCGs. + +## Prerequisites + +- An installed PCG if you do not have a direct connection to the MAAS environment. Review [Install and Manage MAAS Gateway](install-manage-maas-pcg.md) for guidance. + + If are self-hosting Palette and have a direct connection to the MAAS environment, you can select **Use System Private Gateway**. To learn more about when you would use Palette's PCG or the System Private Gateway, refer to the [Architecture](architecture.md) page. + + + +- An active [MAAS API key](https://maas.io/docs/api-authentication-reference) which can be generated in the MAAS web console under **My Preferences** > **API keys**. The following is an example key: + + ``APn53wz232ZwBMxDp5:MHZIbUp3e4DJTjZEKg:mdEv33WAG536MhNC8mIywNLtjcDTnFAQ`` + + For details, refer to the MAAS document on [how to add an API key](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key). + + +## Register a MAAS Cloud Account + +Follow these steps to create additional MAAS cloud accounts. + +
+ +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the **Main Menu** and select **Tenant Settings > Cloud Accounts**. + + +3. Locate **MAAS** on the **Cloud Accounts** page and click **Add MAAS Account**. + + +4. In the next window that displays, enter values for properties listed in the following table. + + In a self-hosted environment where Palette has direct network access to MAAS, you can register a MAAS cloud account without installing the PCG. Note the **Use System Private Gateway** setting listed in the table. refer to the System Private Gateway section in the [Architecture](architecture.md) page to learn more about System Private Gateway. + +Refer to the Deploy with PCG and system PCG in the [Architecture](architecture.md) page to learn more about system PCG. + +
+ +:::info + +For the self-hosted Palette instance, MAAS is reachable on port 5240. + +::: + +
+ +| Property | Description | +|-----------|-------------| +| Account Name | Custom name for the cloud name. | +| Use System Private Gateway | This setting is for self-hosted environments that do not require a PCG. Toggle this option to bypass installing the PCG.| +| Select Private Cloud Gateway | Select your MAAS cloud gateway from the **drop-down Menu**. | +| API Endpoint | API endpoint of the gateway. | +| API Key | The MAAS API key. | + +5. Click **Confirm** to register your MAAS cloud account. + + +## Validate + +You can validate your MAAS cloud account is registered by reviewing the **Cloud Accounts** page. Ensure your account is listed under **MAAS**. + +## Next Steps + +Deploy a Kubernetes cluster to one of your MAAS accounts. Check out [Create and Manage MAAS Cluster](create-manage-maas-clusters.md) for guidance. + + + diff --git a/docs/docs-content/clusters/data-center/openstack.md b/docs/docs-content/clusters/data-center/openstack.md new file mode 100644 index 0000000000..2e862af6bc --- /dev/null +++ b/docs/docs-content/clusters/data-center/openstack.md @@ -0,0 +1,869 @@ +--- +sidebar_label: "OpenStack" +title: "OpenStack" +description: "The methods of creating clusters for a speedy deployment on any CSP" +hide_table_of_contents: false +sidebar_position: 20 +toc_min_heading_level: 2 +toc_max_heading_level: 3 +tags: ["data center", "openstack"] +--- + +The following are some highlights of OpenStack clusters provisioned by Palette: + +1. Palette enables the ability to use OpenStack as an application platform for engineering team. + + + +2. To facilitate communication between Palette and the OpenStack controllers installed in the private data center, a Private Cloud Gateway (PCG) must be set up within the environment. + + +3. Private Cloud Gateway (PCG) is Palette's self-hosted component to support isolated private cloud or data center environments. Once installed, the PCG registers itself with Palette's SaaS portal and enables secure communication between the SaaS portal and the private cloud environment. The PCG enables installation and end-to-end lifecycle management of Kubernetes clusters in private cloud environments from Palette's SaaS portal. + +![openstack_cluster_architecture.png](/openstack_cluster_architecture.png) + +## Prerequisites + +The following prerequisites must be met before deploying a Kubernetes clusters in OpenStack: + +1. OpenStack Victoria (recommended). + + +2. NTP configured on all Hosts. + + +3. Shared Storage between OpenStack hosts. + + +4. You must have an active OpenStack account with access to all the projects that you would like to provision clusters into. The account should have all the permissions listed below in the "OpenStack Cloud Account Permissions" section. + + +5. You should have an Infrastructure cluster profile created in Palette for OpenStack. + + +6. Install a Private Cloud Gateway for OpenStack as described in the **Installing Private Cloud Gateway - OpenStack** section below. Installing the Private Cloud Gateway will automatically register a cloud account for OpenStack in Palette. You can register your additional OpenStack cloud accounts in Palette as described in the **Creating a OpenStack Cloud account** section below. + + +7. Egress access to the internet (direct or via proxy): + * For proxy: HTTP_PROXY, HTTPS_PROXY (both required) + * Outgoing internet connection on port 443 to api.spectrocloud.com + + +8. DNS to resolve public internet names (e.g.: api.spectrocloud.com). + + +9. Sufficient IPs for application workload services (e.g.: Load Balancer services). + + +10. Per workload cluster IP requirements: + * One (1) per cluster node + * One (1) Kubernetes control-plane VIP + + +## OpenStack Cloud Account Permissions + + + + + + + +### Cinder Service + +**Last Update**: June 28, 2021 + +``` json +"volume:attachment_update": "rule:admin_or_owner" +"volume:attachment_delete": "rule:admin_or_owner" +"volume:attachment_complete": "rule:admin_or_owner" +"volume:multiattach_bootable_volume": "rule:admin_or_owner" +"message:get_all": "rule:admin_or_owner" +"message:get": "rule:admin_or_owner" +"message:delete": "rule:admin_or_owner" +"volume:get_snapshot_metadata": "rule:admin_or_owner" +"volume:update_snapshot_metadata": "rule:admin_or_owner" +"volume:delete_snapshot_metadata": "rule:admin_or_owner" +"volume:get_all_snapshots": "rule:admin_or_owner" +"volume_extension:extended_snapshot_attributes": "rule:admin_or_owner" +"volume:create_snapshot": "rule:admin_or_owner" +"volume:get_snapshot": "rule:admin_or_owner" +"volume:update_snapshot": "rule:admin_or_owner" +"volume:delete_snapshot": "rule:admin_or_owner" +"backup:get_all": "rule:admin_or_owner" +"backup:get": "rule:admin_or_owner" +"backup:update": "rule:admin_or_owner" +"backup:delete": "rule:admin_or_owner" +"backup:restore": "rule:admin_or_owner" +"group:get_all": "rule:admin_or_owner" +"group:get": "rule:admin_or_owner" +"group:update": "rule:admin_or_owner" +"group:get_all_group_snapshots": "rule:admin_or_owner" +"group:get_group_snapshot": "rule:admin_or_owner" +"group:delete_group_snapshot": "rule:admin_or_owner" +"group:update_group_snapshot": "rule:admin_or_owner" +"group:reset_group_snapshot_status": "rule:admin_or_owner" +"group:delete": "rule:admin_or_owner" +"group:enable_replication": "rule:admin_or_owner" +"group:disable_replication": "rule:admin_or_owner" +"group:failover_replication": "rule:admin_or_owner" +"group:list_replication_targets": "rule:admin_or_owner" +"volume_extension:quotas:show": "rule:admin_or_owner" +"limits_extension:used_limits": "rule:admin_or_owner" +"volume_extension:volume_type_access": "rule:admin_or_owner" +"volume:extend": "rule:admin_or_owner" +"volume:extend_attached_volume": "rule:admin_or_owner" +"volume:revert_to_snapshot": "rule:admin_or_owner" +"volume:retype": "rule:admin_or_owner" +"volume:update_readonly_flag": "rule:admin_or_owner" +"volume_extension:volume_actions:upload_image": "rule:admin_or_owner" +"volume_extension:volume_actions:initialize_connection": "rule:admin_or_owner" +"volume_extension:volume_actions:terminate_connection": "rule:admin_or_owner" +"volume_extension:volume_actions:roll_detaching": "rule:admin_or_owner" +"volume_extension:volume_actions:reserve": "rule:admin_or_owner" +"volume_extension:volume_actions:unreserve": "rule:admin_or_owner" +"volume_extension:volume_actions:begin_detaching": "rule:admin_or_owner" +"volume_extension:volume_actions:attach": "rule:admin_or_owner" +"volume_extension:volume_actions:detach": "rule:admin_or_owner" +"volume:get_all_transfers": "rule:admin_or_owner" +"volume:create_transfer": "rule:admin_or_owner" +"volume:get_transfer": "rule:admin_or_owner" +"volume:delete_transfer": "rule:admin_or_owner" +"volume:get_volume_metadata": "rule:admin_or_owner" +"volume:create_volume_metadata": "rule:admin_or_owner" +"volume:update_volume_metadata": "rule:admin_or_owner" +"volume:delete_volume_metadata": "rule:admin_or_owner" +"volume_extension:volume_image_metadata": "rule:admin_or_owner" +"volume:get": "rule:admin_or_owner" +"volume:get_all": "rule:admin_or_owner" +"volume:update": "rule:admin_or_owner" +"volume:delete": "rule:admin_or_owner" +"volume_extension:volume_tenant_attribute": "rule:admin_or_owner" +"volume_extension:volume_encryption_metadata": "rule:admin_or_owner" +"volume:multiattach": "rule:admin_or_owner" + +``` + + + + + + + +### Neutron Service + +**Last Update**: June 28, 2021 + +``` json + "create_subnet": "rule:admin_or_network_owner", + "get_subnet": "rule:admin_or_owner or rule:shared", + "update_subnet": "rule:admin_or_network_owner", + "delete_subnet": "rule:admin_or_network_owner", + "get_subnetpool": "rule:admin_or_owner or rule:shared_subnetpools", + "update_subnetpool": "rule:admin_or_owner", + "delete_subnetpool": "rule:admin_or_owner", + "get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes", + "update_address_scope": "rule:admin_or_owner", + "delete_address_scope": "rule:admin_or_owner", + "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", + "update_network": "rule:admin_or_owner", + "delete_network": "rule:admin_or_owner", + "network_device": "field:port:device_owner=~^network:", + "create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner", + "create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner", + "create_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared", + "create_port:fixed_ips:ip_address": "rule:context_is_advsvc or rule:admin_or_network_owner", + "create_port:fixed_ips:subnet_id": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared", + "create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", + "create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", + "create_port:allowed_address_pairs": "rule:admin_or_network_owner", + "create_port:allowed_address_pairs:mac_address": "rule:admin_or_network_owner", + "create_port:allowed_address_pairs:ip_address": "rule:admin_or_network_owner", + "get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner", + "update_port": "rule:admin_or_owner or rule:context_is_advsvc", + "update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner", + "update_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared", + "update_port:fixed_ips:ip_address": "rule:context_is_advsvc or rule:admin_or_network_owner", + "update_port:fixed_ips:subnet_id": "rule:context_is_advsvc or rule:admin_or_network_owner or rule:shared", + "update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", + "update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner", + "update_port:allowed_address_pairs": "rule:admin_or_network_owner", + "update_port:allowed_address_pairs:mac_address": "rule:admin_or_network_owner", + "update_port:allowed_address_pairs:ip_address": "rule:admin_or_network_owner", + "delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner", + "create_router:external_gateway_info": "rule:admin_or_owner", + "create_router:external_gateway_info:network_id": "rule:admin_or_owner", + "get_router": "rule:admin_or_owner", + "update_router": "rule:admin_or_owner", + "update_router:external_gateway_info": "rule:admin_or_owner", + "update_router:external_gateway_info:network_id": "rule:admin_or_owner", + "delete_router": "rule:admin_or_owner", + "add_router_interface": "rule:admin_or_owner", + "remove_router_interface": "rule:admin_or_owner", + "update_floatingip": "rule:admin_or_owner", + "delete_floatingip": "rule:admin_or_owner", + "get_floatingip": "rule:admin_or_owner", + "update_rbac_policy": "rule:admin_or_owner", + "update_rbac_policy:target_tenant": "rule:restrict_wildcard and rule:admin_or_owner", + "get_rbac_policy": "rule:admin_or_owner", + "delete_rbac_policy": "rule:admin_or_owner", + "get_auto_allocated_topology": "rule:admin_or_owner", + "get_trunk": "rule:admin_or_owner", + "delete_trunk": "rule:admin_or_owner", + "add_subports": "rule:admin_or_owner", + "remove_subports": "rule:admin_or_owner", + "get_security_groups": "rule:admin_or_owner", + "get_security_group": "rule:admin_or_owner", + "create_security_group": "rule:admin_or_owner", + "update_security_group": "rule:admin_or_owner", + "delete_security_group": "rule:admin_or_owner", + "get_security_group_rules": "rule:admin_or_owner", + "get_security_group_rule": "rule:admin_owner_or_sg_owner", + "create_security_group_rule": "rule:admin_or_owner", + "delete_security_group_rule": "rule:admin_or_owner", + +``` + + + + + + +### Glance Service + +**Last Update**: June 28, 2021 + +``` json + "add_image": "role:admin or role:member", + "delete_image": "role:admin or role:member", + "get_image": "role:admin or role:member", + "get_images": "role:admin or role:member", + "publicize_image": "role:admin or role:member", + "download_image": "role:admin or role:member", + "upload_image": "role:admin or role:member", + "get_image_location": "role:admin or role:member", + "set_image_location": "role:admin or role:member", +``` + + + + + + + +### Nova Compute Service + +**Last Update**: June 28, 2021 + +``` json + "os_compute_api:os-admin-password": "rule:admin_or_owner", + "os_compute_api:os-attach-interfaces": "rule:admin_or_owner", + "os_compute_api:os-attach-interfaces:create": "rule:admin_or_owner", + "os_compute_api:os-attach-interfaces:delete": "rule:admin_or_owner", + "os_compute_api:os-availability-zone:list": "rule:admin_or_owner", + "os_compute_api:os-config-drive": "rule:admin_or_owner", + "os_compute_api:os-console-output": "rule:admin_or_owner", + "os_compute_api:os-consoles:create": "rule:admin_or_owner", + "os_compute_api:os-consoles:show": "rule:admin_or_owner", + "os_compute_api:os-consoles:delete": "rule:admin_or_owner", + "os_compute_api:os-consoles:index": "rule:admin_or_owner", + "os_compute_api:os-create-backup": "rule:admin_or_owner", + "os_compute_api:os-deferred-delete": "rule:admin_or_owner", + "os_compute_api:os-extended-availability-zone": "rule:admin_or_owner", + "os_compute_api:os-extended-status": "rule:admin_or_owner", + "os_compute_api:os-extended-volumes": "rule:admin_or_owner", + "os_compute_api:extensions": "rule:admin_or_owner", + "os_compute_api:os-flavor-access": "rule:admin_or_owner", + "os_compute_api:os-flavor-extra-specs:show": "rule:admin_or_owner", + "os_compute_api:os-flavor-extra-specs:index": "rule:admin_or_owner", + "os_compute_api:os-flavor-rxtx": "rule:admin_or_owner", + "os_compute_api:flavors": "rule:admin_or_owner", + "os_compute_api:os-floating-ip-dns": "rule:admin_or_owner", + "os_compute_api:os-floating-ip-pools": "rule:admin_or_owner", + "os_compute_api:os-floating-ips": "rule:admin_or_owner", + "os_compute_api:os-fping": "rule:admin_or_owner", + "os_compute_api:image-size": "rule:admin_or_owner", + "os_compute_api:os-instance-actions": "rule:admin_or_owner", + "os_compute_api:ips:show": "rule:admin_or_owner", + "os_compute_api:ips:index": "rule:admin_or_owner", + "os_compute_api:os-keypairs": "rule:admin_or_owner", + "os_compute_api:limits": "rule:admin_or_owner", + "os_compute_api:os-lock-server:lock": "rule:admin_or_owner", + "os_compute_api:os-lock-server:unlock": "rule:admin_or_owner", + "os_compute_api:os-multinic": "rule:admin_or_owner", + "os_compute_api:os-networks:view": "rule:admin_or_owner", + "os_compute_api:os-pause-server:pause": "rule:admin_or_owner", + "os_compute_api:os-pause-server:unpause": "rule:admin_or_owner", + "os_compute_api:os-quota-sets:show": "rule:admin_or_owner", + "os_compute_api:os-quota-sets:detail": "rule:admin_or_owner", + "os_compute_api:os-remote-consoles": "rule:admin_or_owner", + "os_compute_api:os-rescue": "rule:admin_or_owner", + "os_compute_api:os-security-groups": "rule:admin_or_owner", + "os_compute_api:os-server-groups": "rule:admin_or_owner", + "os_compute_api:server-metadata:index": "rule:admin_or_owner", + "os_compute_api:server-metadata:show": "rule:admin_or_owner", + "os_compute_api:server-metadata:create": "rule:admin_or_owner", + "os_compute_api:server-metadata:update_all": "rule:admin_or_owner", + "os_compute_api:server-metadata:update": "rule:admin_or_owner", + "os_compute_api:server-metadata:delete": "rule:admin_or_owner", + "os_compute_api:os-server-password": "rule:admin_or_owner", + "os_compute_api:os-server-tags:delete_all": "rule:admin_or_owner", + "os_compute_api:os-server-tags:index": "rule:admin_or_owner", + "os_compute_api:os-server-tags:update_all": "rule:admin_or_owner", + "os_compute_api:os-server-tags:delete": "rule:admin_or_owner", + "os_compute_api:os-server-tags:update": "rule:admin_or_owner", + "os_compute_api:os-server-tags:show": "rule:admin_or_owner", + "os_compute_api:os-server-usage": "rule:admin_or_owner", + "os_compute_api:servers:index": "rule:admin_or_owner", + "os_compute_api:servers:detail": "rule:admin_or_owner", + "os_compute_api:servers:show": "rule:admin_or_owner", + "os_compute_api:servers:create": "rule:admin_or_owner", + "os_compute_api:servers:create:attach_volume": "rule:admin_or_owner", + "os_compute_api:servers:create:attach_network": "rule:admin_or_owner", + "os_compute_api:servers:delete": "rule:admin_or_owner", + "os_compute_api:servers:update": "rule:admin_or_owner", + "os_compute_api:servers:confirm_resize": "rule:admin_or_owner", + "os_compute_api:servers:revert_resize": "rule:admin_or_owner", + "os_compute_api:servers:reboot": "rule:admin_or_owner", + "os_compute_api:servers:resize": "rule:admin_or_owner", + "os_compute_api:servers:rebuild": "rule:admin_or_owner", + "os_compute_api:servers:create_image": "rule:admin_or_owner", + "os_compute_api:servers:create_image:allow_volume_backed": "rule:admin_or_owner", + "os_compute_api:servers:start": "rule:admin_or_owner", + "os_compute_api:servers:stop": "rule:admin_or_owner", + "os_compute_api:servers:trigger_crash_dump": "rule:admin_or_owner", + "os_compute_api:os-shelve:shelve": "rule:admin_or_owner", + "os_compute_api:os-shelve:unshelve": "rule:admin_or_owner", + "os_compute_api:os-simple-tenant-usage:show": "rule:admin_or_owner", + "os_compute_api:os-suspend-server:resume": "rule:admin_or_owner", + "os_compute_api:os-suspend-server:suspend": "rule:admin_or_owner", + "os_compute_api:os-tenant-networks": "rule:admin_or_owner", + "os_compute_api:os-virtual-interfaces": "rule:admin_or_owner", + "os_compute_api:os-volumes": "rule:admin_or_owner", + "os_compute_api:os-volumes-attachments:index": "rule:admin_or_owner", + "os_compute_api:os-volumes-attachments:create": "rule:admin_or_owner", + "os_compute_api:os-volumes-attachments:show": "rule:admin_or_owner", + "os_compute_api:os-volumes-attachments:delete": "rule:admin_or_owner" + + +``` + + + + + + + + +## Installing Private Cloud Gateway - OpenStack + + +Use the following steps to install a PCG cluster in your OpenStack environment. You can use the [Palette CLI](../../palette-cli/palette-cli.md) or the PCG Installer Image to deploy a PCG cluster. Review the prerequisites for each option to help you identify the correct installation method. + + +
+ + + + + + +### Prerequisites + + +The following system prerequisites are required to install an OpenStack PCG. + +- Palette version 4.0.X or greater. + + +- A Palette API key. Refer to the [Create API Key](../../user-management/user-authentication.md#api-key) page for guidance. + + +- Download the Palette CLI from the [Downloads](../../spectro-downloads.md#palette-cli) page and install the CLI. Refer to the [Palette CLI Install](../../palette-cli/install-palette-cli.md) guide to learn more. + + +The following system requirements should be met in order to install a private cloud gateway for OpenStack: + +* Private cloud gateway IP requirements: + * 1 IP for a 1 node PCG or 3 IPs for a 3 node PCG + * 1 IP for Kubernetes control-plane + +Palette provides an installer in the form of a docker container. This installer can be run on any system that has docker daemon installed and has connectivity to the Palette Management console as well as OpenStack controller. +- One additional Kubernetes control plane IP address for rolling upgrades. +- A Linux x86-64 host with the Docker daemon installed. + + + + +### Install PCG + + +1. In an x86 Linux host, open up a terminal session. + + + +2. Use the Palette CLI `login` command to authenticate the CLI with Palette. When prompted, enter the information listed in the following table. + +
+ + ```shell + palette login + ``` + +
+ + |**Parameter** | **Description**| + |:-----------------------------|---------------| + |**Spectro Cloud Console** |Enter the Palette endpoint URL. When using the Palette SaaS service, enter ``https://console.spectrocloud.com``. When using a self-hosted instance of Palette, enter the URL for that instance. | + |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if you are using a self-hosted Palette instance with self-signed TLS certificates. Otherwise, enter `n`.| + |**Spectro Cloud API Key** |Enter your Palette API Key.| + |**Spectro Cloud Organization** |Enter your Palette Organization name.| + |**Spectro Cloud Project** |Enter your desired project name within the selected Organization.| + + +3. Once you have authenticated successfully, invoke the PCG installer by issuing the following command. When prompted, enter the information listed in each of the following tables. + +
+ + ```bash + palette pcg install + ``` + +
+ + |**Parameter** | **Description**| + |:-----------------------------|---------------| + |**Cloud Type**| Choose OpenStack.| + |**Private Cloud Gateway Name** | Enter a custom name for the PCG. Example: `openstack-pcg-1`.| + |**Share PCG Cloud Account across platform Projects** |Enter `y` if you want the Cloud Account associated with the PCG to be available from all projects within your organization. Enter `n` if you want the Cloud Account to only be available at the tenant admin scope.| + + +4. Next, provide environment configurations for the cluster. Refer to the following table for information about each option. + +
+ + |**Parameter**| **Description**| + |:-------------|----------------| + |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: `https://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| + |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| + |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: `my.company.com,10.10.0.0/16`.| + |**Proxy CA Certificate Filepath**|The default is blank. You can provide the file path of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| + |**Pod CIDR**|Enter the CIDR pool that will be used to assign IP addresses to pods in the PCG cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| + |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the PCG cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| + + + +5. After the environment options, the next set of prompts is for configuring the PCG cluster for the OpenStack environment. The following table contains information about each prompt. + +
+ + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + |**OpenStack Identity Endpoint** | OpenStack Identity endpoint. Domain or IP address.
Example: `https://openstack.mycompany.com/identity`.| + |**OpenStack Account Username** | OpenStack account username.| + |**OpenStack Account Password** | OpenStack account password.| + |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if you are using an OpenStack instance with self-signed TLS certificates. Otherwise, enter `n`.| + |**CA Certificate** |This is only required when using TLS, in which case you would provide a base64-encoded CA certificate for your OpenStack instance. | + +6. Next, fill out additional OpenStack configurations. + +
+ + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **Default Domain** | OpenStack Domain. Example: `Default`.| + | **Default Region** | OpenStack Region. Example: `RegionOne`.| + | **Default Project** | OpenStack Project. Example: `dev`.| + | **Placement Type** | Placement can be static or dynamic. For static placement, VMs are placed into existing networks. For dynamic placement, a new network is created.| + | **Network** | Select an existing network. This is only required for static placement.| + | **Subnet** | Select an existing subnet. This is only required for static placement.| + | **DNS Server(s)** | Enter a comma-separated list of DNS server IPs . This is only required for dynamic placement.| + | **Node CIDR** | Enter a node CIDR. This is only required for dynamic placement. Example: `10.55.0.0/24`.| + | **SSH Public Key** | Provide the public OpenSSH key for the PCG cluster. Use this key when establishing an SSH connection with the PCG cluster. This prompt will result in the default text editor for the Operating System to open. Vi is the more common text editor used in Linux environments.| + | **Patch OS on boot** | This parameter indicates whether or not to patch the OS of the PCG hosts on the first boot.| + | **Reboot nodes once OS patch is applied** | This parameter indicates whether or not to reboot PCG nodes after OS patches are complete. This only applies if the **Patch OS on boot** parameter is enabled.| + + +7. Configure the OpenStack PCG Machine by answering the following prompts. + +
+ + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **Availability Zone** | Select the availability zone. | + | **PCG Cluster Size** | Select the node size of the PCG cluster. You can choose between **1** node or **3** nodes for High Availability (HA). | + + +8. A new PCG configuration file is generated and its location is displayed on the console. You will receive an output similar to the following. + +
+ + ```bash hideClipboard + ==== PCG config saved ==== + Location: :/home/spectro/.palette/pcg/pcg-20230706150945/pcg.yaml + ``` + + :::info + + The `CloudAccount.apiKey` and `Mgmt.apiKey` values in the **pcg.yaml** are encrypted and cannot be manually updated. To change these values, restart the installation process using the `palette pcg install` command. + + ::: + +
+ +The Palette CLI will now provision a PCG cluster in your OpenStack environment. +If the deployment fails due to misconfiguration, update the PCG configuration file and rerun the installer. Refer to the [Edit and Redeploy PCG](#edit-and-redeploy-pcg) section below. For additional assistance, visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. + + + +### Validate + +Once installed, the PCG registers itself with Palette. To verify the PCG is registered, use the following steps. + + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and select **Tenant Settings** + + +3. From the **Tenant Settings Menu** click on **Private Cloud Gateways**. Verify your PCG cluster is available from the list of PCG clusters displayed. + + +4. When you install the PCG, a cloud account is auto-created. To verify the cloud account is created, go to **Tenant Settings > Cloud Accounts** and locate **OpenStack** in the table. Verify your OpenStack account is listed. + + + +### Edit and Redeploy PCG + +To change the PCG install values, restart the installation process using the `palette pcg install` command. Use the following steps to redeploy the PCG or restart the install process. + +
+ +1. Make the necessary changes to the PCG configuration file the CLI created during the installation, if needed. Use a text editor, such as Vi or Nano to update the PCG install configuration file. + +
+ + ```shell hideClipboard + ==== Create PCG reference config ==== + ==== PCG config saved ==== + Location: /Users/demo/.palette/pcg/pcg-20230717114807/pcg.yaml + ``` + + ```bash hideClipboard + vi /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml + ``` + + + +2. To redeploy the PCG, use the `install` command with the flag `--config-file`. Provide the file path to the generated PCG config file that was generated and displayed in the output. + +
+ + ```bash hideClipboard + palette pcg install --config-file /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml + ``` + +
+ + + + + + + + + +## PCG Installer Image + +### Prerequisites + +The following system requirements are required to deploy a PCG cluster. + +- Palette version 3.4.X or older. + +- A Linux environment with a Docker daemon installed and a connection to Palette and the OpenStack environment. The installer must be invoked on an up-to-date Linux system with an x86-64 architecture. ARM architecture is currently not supported. + + +- Private Cloud Gateway IP requirements: + * One IP address for a single-node PCG or three IP addresses for a three-node PCG cluster. + * One IP address for the Kubernetes control plane. + + +### Generate pairing code + +Navigate to the Private Cloud Gateway page under Administration and Create a new OpenStack gateway. Copy the pairing code displayed on the page. This will be used in subsequent steps. + +### Generate gateway config + +Invoke the gateway installer in interactive mode to generate the gateway configuration file. Follow the prompts to provide the Palette Management, OpenStack cloud account, Environment and Placement information as requested. + +```bash +docker run -it --rm \ + --net=host \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v /tmp:/opt/spectrocloud \ + gcr.io/spectro-images-public/release/spectro-installer:1.0.12 \ + -o true +``` + +#### Enter Palette Management Information: + +|**Parameter**| **Description**| +|----------------------------------------|:----------------| +|**Palette Console** | Management Console endpoint e.g. https://console.spectrocloud.com| +|**Palette Username** | Login email address
e.g. user1@company.com| +|**Palette Password** | Login password| +|**Private Cloud Gateway pairing code**| The unique authentication code
generated in the previous step.| + +#### Enter Environment Configuration: + +| **Parameter** | **Description** | + |------------------------------------|----------------| + |**HTTPS Proxy(--https_proxy)**|The endpoint for the HTTPS proxy server. This setting will be
propagated to all the nodes launched in the proxy network.
e.g., http://USERNAME:PASSWORD@PROXYIP:PROXYPORT| + |**HTTP Proxy(--http_proxy)**|The endpoint for the HTTP proxy server. This setting will be
propagated to all the nodes launched in the proxy network.
e.g., http://USERNAME:PASSWORD@PROXYIP:PROXYPORT| + |**No Proxy(--no_proxy)** |A comma-separated list of local network CIDRs, hostnames,
domain names that should be excluded from proxying.
This setting will be propagated to all the nodes to bypass the proxy server.
e.g., maas.company.com,10.10.0.0/16| + |**Pod CIDR (--pod_cidr)**|The CIDR pool is used to assign IP addresses to pods in the cluster.
This setting will be used to assign IP
addresses to pods in Kubernetes clusters.
The pod IP addresses should be unique and
should notoverlap with any
Virtual Machine IPs in the environment.| + |**Service IP Range (--svc_ip_range)**|The IP address that will be assigned to
services created on Kubernetes. This setting will be used
to assign IP addresses to services in Kubernetes clusters.
The service IP addresses should be unique and not
overlap with any virtual machine IPs in the environment.| + +#### Enter OpenStack Account Information: + +|**Parameter** | **Description**| +|-----------------------------------------|----------------| +|**OpenStack Identity Endpoint** | OpenStack Identity endpoint. Domain or IP address.
e.g. https://openstack.mycompany.com/identity| +|**OpenStack Account Username** | OpenStack account username| +|**OpenStack Account Password** | OpenStack account password| +|**Default Domain** | Default OpenStack domain. e.g. Default| +|**Default Region** | Default OpenStack region. e.g. RegionOne| +|**Default Project** | Default OpenStack project. e.g. dev| + + +#### Enter OpenStack cluster configuration for the Private Cloud Gateway: + +1. Verify the following parameters: + * Default Domain + * Default Region + * Default Project + + +2. Enter the values for: + +|**Parameter** | **Description**| +|-----------------------------------------|----------------| + | **SSH Key** | Select a key.| + | **Placement option as Static or Dynamic** | For static placement, VMs are placed into existing
networks whereas, for dynamic placement, new network is created.| + | **Network** | Select an existing network. | + | **Sub Network** | Select a sub network.| + +#### Enter OpenStack Machine configuration for the Private Cloud Gateway: + +* Select the availability zone +* Choose flavor +* Number of nodes: Choose between **1** and **3** + +After this step, a new gateway configuration file is generated and its location is displayed on the console. +e.g.: Config created:/opt/spectrocloud//install-pcg-ar-dev-os-gw-02-aug-01-20210802062349/pcg.yaml + + +## Copy Configuration File + +Copy the pcg.yaml file to a known location for easy access and updates. + + +```bash +cp /tmp/install-pcg-xxx/pcg.yaml /tmp +``` + + +## Deploy Private Cloud Gateway + +Invoke the gateway installer in *silent mode*, providing the gateway config file as input to deploy the gateway. New VM(s) will be launched in your OpenStack environment and a gateway will be installed on those VM(s). If deployment fails due to misconfiguration, update the gateway configuration file and rerun the command. + +```bash +docker run -it --rm \ + --net=host \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v /tmp:/opt/spectrocloud \ + gcr.io/spectro-images-public/release/spectro-installer:1.0.12 \ + -s true \ + -c //opt/spectrocloud/pcg.yaml +``` + +
+ +
+ + + + + +## Upgrade PCG +Palette maintains the OS image and all configurations for the PCG. Periodically, the OS images, configurations, or other components need to be upgraded to resolve security or functionality issues. Palette releases such upgrades when required and in an upgrade notification on the PCG. + +Administrators should review the changes and apply them at a suitable time. Upgrading a PCG does not result in any downtime for the tenant clusters. During the upgrade process, the provisioning of new clusters might be temporarily unavailable. New cluster requests are queued while the PCG is being upgraded and are processed as soon as the PCG upgrade is complete. + +## Delete the PCG +The following steps need to be performed to delete a PCG: + +1. As a tenant admin, navigate to the Private Cloud Gateway page under settings. + + +2. Invoke the **Delete** action on the cloud gateway instance that needs to be deleted. + + +3. The system performs a validation to ensure that there are no running tenant clusters associated with the gateway instance being deleted. If such instances are found, the system presents an error. Delete relevant running tenant clusters and retry the deletion of the cloud gateway. + + +4. Delete the gateway. + +:::info +The delete gateway operation deletes the gateway instance registered in the management console, however the gateway infrastructure such as Load Balancers, VMs, Networks (if dynamic provision was chosen), etc. need to be deleted on the OpenStack console +::: + +## Resize the PCG +You can set up the PCG as a single-node or three-node cluster for high availability (HA). For production environments, we recommend three nodes. A PCG can be initially set up with one node and resized to three nodes later. Use the following steps to resize a single-node PCG cluster to a three-node PCG cluster. + +1. As a tenant administrator, navigate to the Private Cloud Gateway page under settings. + + +2. Invoke the resize action for the relevant cloud gateway instance. + + +3. Update the size from 1 to 3. + + +4. The gateway upgrade begins shortly after the update. Two new nodes are created, and the gateway is upgraded to a 3-node cluster. + + +# Creating an OpenStack Cloud Account + +A default cloud account is automatically created when the private cloud gateway is configured. This cloud account can be used to create tenant clusters. Additional cloud accounts may be created if desired within the same gateway. + +1. To create an OpenStack cloud account, proceed to project settings and select 'create cloud account' under OpenStack. + + +2. Fill the following values to the cloud account creation wizard. + + |**Property**|**Description** | + |:---------------|:-----------------------| + | **Account Name** | Custom name for the cloud account | + | **Private cloud gateway**| Reference to a running cloud gateway | + | **Username** | OpenStack Username | + | **Password**| OpenStack Password | + | **Identity Endpoint** | Identity Endpoint of the gateway | + | **CA Certificate** | Digital certificate of authority | + | **Parent Region** | OpenStack Region to be used | + | **Default Domain** | Default OpenStack domain | + | **Default Project** | Default OpenStack project | + + +# Deploying an OpenStack Cluster + + + + +The following steps need to be performed to provision a new OpenStack cluster: + +1. Provide basic cluster information like Name, Description, and Tags. Tags are currently not propagated to the VMs deployed on the cloud/data center environments. + + +2. Select a Cluster Profile created for the OpenStack environment. The profile definition will be used as the cluster construction template. + + +3. Review and override Pack Parameters as desired. By default, Parameters for all packs are set with values defined in the Cluster Profile. + + +4. Provide an OpenStack Cloud account and placement information. + + * **Cloud Account** - Select the desired cloud account. OpenStack cloud accounts with credentials need to be preconfigured in project settings. An account is auto-created as part of the cloud gateway setup and is available for provisioning of tenant clusters if permitted by the administrator. + * Domain + * Region + * Project + * SSH Key + * Placement + * If the user choice of placement is Static then: + * Network + * Subnet + * If the user choice of placement is NOT Static then: + * Subnet CIDR + * DNS Name Server + +5. Configure the master and worker node pools. Fill out the input fields in the **Add node pool** page. The following table contains an explanation of the available input parameters. + +### Master Pool + +|**Parameter** | **Description**| +|------------------|---------------| +|**Name** |A descriptive name for the node pool.| +|**Size** |Number of VMs to be provisioned for the node pool. For the master pool, this number can be 1, 3, or 5.| +|**Allow worker capability**|Select this option for allowing workloads to be provisioned on master nodes.| +|**[Labels](../cluster-management/taints.md#labels)**| Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. +|**[Taints](../cluster-management/taints.md#taints)**|To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints.| +|**Instance type** |Select the compute instance type to be used for all nodes in the node pool.| +|**Availability Zones**| Choose one or more availability zones. Palette provides fault tolerance to guard against hardware failures, network failures, etc., by provisioning nodes across availability zones if multiple zones are selected.| +|**Disk Size**|Give the required storage size| + +### Worker Pool + +|**Parameter** | **Description**| +|------------------|---------------| +|**Name** |A descriptive name for the node pool.| +|**Enable Autoscaler**|You can enable the autoscaler, by toggling the **Enable Autoscaler** button. Autoscaler scales up and down resources between the defined minimum and the maximum number of nodes to optimize resource utilization.| +||Set the scaling limit by setting the **Minimum Size** and **Maximum Size**, as per the workload the number of nods will scale up from minimum set value to maximum set value and the scale down from maximum set value to minimum set value| +|**Size** |Number of VMs to be provisioned for the node pool.| +|**Rolling Update**| Rolling update has two available options. Review the [Update Parameter](#update-parameter-table) table below for more details. +|**[Labels](../cluster-management/taints.md#labels)**|Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. +|**[Taints](../cluster-management/taints.md#taints)**|To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints.| +|**Instance type** |Select the compute instance type to be used for all nodes in the node pool.| +|**Availability Zones**| Choose one or more availability zones. Palette provides fault tolerance to guard against hardware failures, network failures, etc., by provisioning nodes across availability zones if multiple zones are selected.| +|**Disk Size**|Provide the required storage size + + + +6. Configure the cluster policies/features. + * Manage Machines + * Scan Policies + * Backup Policies + + +7. Click to get details on [cluster management feature](../cluster-management/cluster-management.md). + + +8. Review settings and deploy the cluster. Provisioning status with details of ongoing provisioning tasks is available to track progress. + +## Deleting an OpenStack Cluster + +The deletion of an OpenStack cluster results in the removal of all Virtual machines and associated storage disks created for the cluster. The following tasks need to be performed to delete an OpenStack cluster: + +1. Select the cluster to be deleted from the **Cluster** **View** page and navigate to the **Cluster Overview** page. + + +2. Invoke a delete action available on the page: **Cluster** > **Settings** > **Cluster** **Settings** > **Delete** **Cluster**. + + +3. Click **Confirm** to delete. + + +The Cluster Status is updated to **Deleting** while cluster resources are being deleted. Provisioning status is updated with the ongoing progress of the delete operation. Once all resources are successfully deleted, the cluster status changes to **Deleted** and is removed from the list of clusters. + + +:::info +Delete action is only available for clusters that are fully provisioned. For clusters that are still in the process of being provisioned, the 'Abort' action is available to stop provisioning and delete all resources. +::: + + +# Force Delete a Cluster + +A cluster stuck in the **Deletion** state can be force deleted by the user through the User Interface. The user can go for a force deletion of the cluster, only if it is stuck in a deletion state for a minimum of **15 minutes**. Palette enables cluster force delete from the Tenant Admin and Project Admin scope. + +1. Log in to the Palette Management Console. + + +2. Navigate to the **Cluster Details** page of the cluster stuck in deletion. + + - If the deletion is stuck for more than 15 minutes, click the **Force Delete Cluster** button from the **Settings** dropdown. + + - If the **Force Delete Cluster** button is not enabled, wait for 15 minutes. The **Settings** dropdown will give the estimated time for the auto-enabling of the **Force Delete** button. + +:::caution +If there are any cloud resources still on the cloud, the user should cleanup those resources before going for the force deletion. +::: \ No newline at end of file diff --git a/docs/docs-content/clusters/data-center/vmware.md b/docs/docs-content/clusters/data-center/vmware.md new file mode 100644 index 0000000000..ee60145beb --- /dev/null +++ b/docs/docs-content/clusters/data-center/vmware.md @@ -0,0 +1,1193 @@ +--- +sidebar_label: "VMware" +title: "VMware" +description: "Learn how to configure VMware to create VMware clusters in Palette." +hide_table_of_contents: false +sidebar_position: 30 +toc_min_heading_level: 2 +toc_max_heading_level: 3 +tags: ["data center", "vmware"] +--- + + +The following are some architectural highlights of Kubernetes clusters provisioned by Palette on VMware: + + +- Kubernetes nodes can be distributed across multiple-compute clusters, which serve as distinct fault domains. + + +- Support for static IP as well as DHCP. If your are using DHCP, Dynamic DNS is required. + + +- IP pool management for assigning blocks of IPs dedicated to clusters or projects. + + +- A Private Cloud Gateway (PCG) that you set up within the environment facilitates communications between the Palette management platform and vCenter installed in the private data center. + + The PCG is Palette's on-prem component to enable support for isolated, private cloud, or data center environments. When the PCG is installed on-prem, it registers itself with Palette's SaaS portal and enables secure communications between the SaaS portal and private cloud environment. + + ![vmware_arch_oct_2020.png](/vmware_arch_oct_2020.png) + +## Prerequisites + +The following prerequisites must be met before deploying a Kubernetes clusters in VMware: + +- vSphere version 7.0 or above. vSphere 6.7 is supported but we do not recommend it, as it reached end of general support in 2022. + + Palette supports port groups as follows. Opaque networks in vCenter Server are *not* supported. + + - Virtual machine port groups on vSphere standard switch + - Distributed port groups on vSphere distributed switch + - NSX-T distributed virtual port group + + +- A Resource Pool configured across the hosts onto which the workload clusters will be provisioned. Every host in the Resource Pool will need access to shared storage, such as vSAN, to be able to make use of high-availability (HA) control planes. + + +- Network Time Protocol (NTP) configured on each ESXi host. + + +- An active vCenter account with all the permissions listed in [VMware Privileges](vmware.md#vmware-privileges). + + +- Installed PCG for VMware. Installing the PCG will automatically register a cloud account for VMware in Palette. You can register your additional VMware cloud accounts in Palette as described in the [Create VMware Cloud Account](vmware#create-a-vmware-cloud-account) section. + + +- A subnet with egress access to the internet (direct or via proxy): + + - For proxy: HTTP_PROXY, HTTPS_PROXY (both required). + - Outgoing internet connection on port 443 to api.spectrocloud.com. + + +- PCG IP requirements are: + + - One node with one IP address or three nodes for HA with three IP addresses. + - One Kubernetes control-plane (VIP). + - One Kubernetes control-plane (extra). + + +- IPs for application workload services, such as LoadBalancer service. + + +- A DNS to resolve public internet names, such as `api.spectrocloud.com`. + + +- Shared Storage between vSphere hosts. + + +- A cluster profile created in Palette for VMWare. + + +- Zone tagging for dynamic storage allocation for persistent storage. + + +:::info + +The following naming conventions apply to vSphere Region and Zone tags: + +
+ +- Valid tags consist of alphanumeric characters. + + +- Tags must start and end with an alphanumeric character. + + +- The regex used for validation is `(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?` + +Some example Tags are: `MyValue`, `my_value`, and `12345`. + +::: + +## Zone Tagging + +Zone tagging is required for dynamic storage allocation across fault domains when you provision workloads that require persistent storage. This is required for Palette installation and useful for workloads deployed in tenant clusters that require persistent storage. Use unique vSphere tags on data centers (k8s-region) and compute clusters (k8s-zone) to create distinct zones in your environment. Tag values must be unique. + + For example, assume your vCenter environment includes three compute clusters (cluster-1, cluster-2, and cluster-3) that are part of data center dc-1. You can tag them as follows: + +| **vSphere Object** | **Tag Category** | **Tag Value** | +| :------------- | :---------- | :----------- | +| dc-1 | k8s-region | region1 | +| cluster-1 | k8s-zone | az1 | +| cluster-2 | k8s-zone | az2 | +| cluster-3 | k8s-zone | az3 | + + +## VMware Privileges + +The vSphere user account that deploys Palette must have the minimum root-level vSphere privileges listed in the table below. The **Administrator** role provides superuser access to all vSphere objects. For users without the **Administrator** role, one or more custom roles can be created based on tasks the user will perform. +Permissions and privileges vary depending on the vSphere version you are using. + +Select the tab for your vSphere version. + + + +:::caution + +If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” is required. + +::: + + + + + + + + +## Root-Level Role Privileges + +Root-level role privileges listed in the table are applied only to root objects and data center objects. + + + +**vSphere Object** |**Privileges**| +|---------------|----------| +|**Cns**|Searchable| +|**Datastore**|Browse datastore +|**Host**|Configuration +||* Storage partition configuration +|**vSphere** **Tagging**|Create vSphere Tag| +||Edit vSphere Tag| +|**Network**|Assign network| +|**Sessions**|Validate session| +|**VM Storage Policies**|View VM storage policies| +|**Storage views**|View| + + +## Spectro Role Privileges + +The Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, virtual machines, templates, datastore, and network objects. + + +:::info + +Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. + +::: + + +|**vSphere Object** |**Privileges**| +|---------------|----------| +|**spectro-templates** |Read only| +|**Cns**|Searchable +|**Datastore**|Allocate space| +||Browse datastore| +||Low-level file operations| +||Remove file| +||Update virtual machine files| +||Update virtual machine metadata| +|**Folder**|Create folder| +||Delete folder| +||Move folder| +||Rename folder| +|**Host**|Local operations| +||Reconfigure virtual machine| +|**vSphere Tagging**|Assign or Unassign vSphere Tag| +||Create vSphere Tag| +||Delete vSphere Tag| +||Edit vSphere Tag| +|**Network**|Assign network| +|**Resource**|Apply recommendation| +||Assign virtual machine to resource pool| +||Migrate powered off virtual machine| +||Migrate powered on virtual machine| +||Query vMotion| +|**Sessions**|Validate session| +|**VM Storage Policies**|View VM storage policies| +|**Storage views**|Configure service| +||View| +|**Tasks**|Create task| +||Update task| +|**vApp**|Export| +||Import| +||View OVF environment| +||vApp application configuration| +||vApp instance configuration| +|**Virtual machines**|**Change Configuration**| +||* Acquire disk lease| +||* Add existing disk| +||* Add new disk| +||* Add or remove device| +||* Advanced configuration| +||* Change CPU count| +||* Change Memory| +||* Change Settings| +||* Change Swapfile placement| +||* Change resource| +||* Configure Host USB device| +||* Configure Raw device| +||* Configure managedBy| +||* Display connection settings| +||* Extend virtual disk| +||* Modify device settings| +||* Query Fault Tolerance compatibility| +||* Query unowned files| +||* Reload from path| +||* Remove disk| +||* Rename| +||* Reset guest information| +||* Set annotation| +||* Toggle disk change tracking| +||* Toggle fork parent| +||* Upgrade virtual machine compatibility| +||**Edit Inventory**| +||* Create from existing| +||* Create new| +||* Move| +||* Register| +||* Remove| +||* Unregister| +||**Guest operations**| +||* Guest operation alias modification| +||* Guest operation alias query| +||* Guest operation modifications| +||* Guest operation program execution| +||* Guest operation queries| +||**Interaction**| +||* Console interaction| +||* Power off| +||* Power on| +||**Provisioning**| +||* Allow disk access| +||* Allow file access| +||* Allow read-only disk access| +||* Allow virtual machine download| +||* Allow virtual machine files upload| +||* Clone template| +||* Clone virtual machine| +||* Create template from virtual machine| +||* Customize guest| +||* Deploy template| +||* Mark as template| +||* Mark as virtual machine| +||* Modify customization specification| +||* Promote disks| +||* Read customization specifications| +||**Service configuration**| +||* Allow notifications| +||* Allow polling of global event notifications| +||* Manage service configurations| +||* Modify service configuration| +||* Query service configurations| +||* Read service configuration| +||**Snapshot management**| +||* Create snapshot| +||* Remove snapshot| +||* Rename snapshot| +||* Revert to snapshot| +||**vSphere Replication**| +||* Configure replication| +||* Manage replication| +||* Monitor replication| +|**vSAN**|Cluster| +||ShallowRekey| + + + + + + + +## Root-Level Role Privileges + +Root-level role privileges listed in the table are applied only to root object and data center objects. + +**vSphere Object** |**Privileges**| +|---------------|----------| +|**Cns**|Searchable| +|**Datastore**|Browse datastore +|**Host**|Configuration +||* Storage partition configuration +|**vSphere** **Tagging**|Create vSphere Tag| +||Edit vSphere Tag| +|**Network**|Assign network| +|**Sessions**|Validate session| +|**Profile-driven storage**|Profile-driven storage view| +|**Storage views**|View| + + +## Spectro Role Privileges + + +The Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, virtual machines, templates, datastore, and network objects. + + +:::info + +Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. + +::: + +|**vSphere Object** |**Privileges**| +|---------------|----------| +|**spectro-templates** |Read only| +|**Cns**|Searchable +|**Datastore**|Allocate space| +||Browse datastore| +||Low level file operations| +||Remove file| +||Update virtual machine files| +||Update virtual machine metadata| +|**Folder**|Create folder| +||Delete folder| +||Move folder| +||Rename folder| +|**Host**|Local operations| +||Reconfigure virtual machine| +|**vSphere Tagging**|Assign or Unassign vSphere Tag| +||Create vSphere Tag| +||Delete vSphere Tag| +||Edit vSphere Tag| +|**Network**|Assign network| +|**Resource**|Apply recommendation| +||Assign virtual machine to resource pool| +||Migrate powered off virtual machine| +||Migrate powered on virtual machine| +||Query vMotion| +|**Sessions**|Validate session| +|**Profile-driven storage**|Profile-driven storage view| +|**Storage views**|Configure service| +||View| +|**Tasks**|Create task| +||Update task| +|**vApp**|Export| +||Import| +||View OVF environment| +||vApp application configuration| +||vApp instance configuration| +|**Virtual machines**|**Change Configuration**| +||* Acquire disk lease| +||* Add existing disk| +||* Add new disk| +||* Add or remove device| +||* Advanced configuration| +||* Change CPU count| +||* Change Memory| +||* Change Settings| +||* Change Swapfile placement| +||* Change resource| +||* Configure Host USB device| +||* Configure Raw device| +||* Configure managedBy| +||* Display connection settings| +||* Extend virtual disk| +||* Modify device settings| +||* Query Fault Tolerance compatibility| +||* Query unowned files| +||* Reload from path| +||* Remove disk| +||* Rename| +||* Reset guest information| +||* Set annotation| +||* Toggle disk change tracking| +||* Toggle fork parent| +||* Upgrade virtual machine compatibility| +||**Edit Inventory**| +||* Create from existing| +||* Create new| +||* Move| +||* Register| +||* Remove| +||* Unregister| +||**Guest operations**| +||* Guest operation alias modification| +||* Guest operation alias query| +||* Guest operation modifications| +||* Guest operation program execution| +||* Guest operation queries| +||**Interaction**| +||* Console interaction| +||* Power off| +||* Power on| +||**Provisioning**| +||* Allow disk access| +||* Allow file access| +||* Allow read-only disk access| +||* Allow virtual machine download| +||* Allow virtual machine files upload| +||* Clone template| +||* Clone virtual machine| +||* Create template from virtual machine| +||* Customize guest| +||* Deploy template| +||* Mark as template| +||* Mark as virtual machine| +||* Modify customization specification| +||* Promote disks| +||* Read customization specifications| +||**Service configuration**| +||* Allow notifications| +||* Allow polling of global event notifications| +||* Manage service configurations| +||* Modify service configuration| +||* Query service configurations| +||* Read service configuration| +||**Snapshot management**| +||* Create snapshot| +||* Remove snapshot| +||* Rename snapshot| +||* Revert to snapshot| +||**vSphere Replication**| +||* Configure replication| +||* Manage replication| +||* Monitor replication| +|**vSAN**|Cluster| +||ShallowRekey| + + + + + + +## Root-Level Role Privileges + + +Root-level role privileges listed in the table are applied only to root object and data center objects. + + +**vSphere Object** |**Privileges**| +|---------------|----------| +|**Cns**|Searchable| +|**Datastore**|Browse datastore +|**Host**|Configuration +||* Storage partition configuration +|**vSphere** **Tagging**|Create vSphere Tag| +||Edit vSphere Tag| +|**Network**|Assign network| +|**Sessions**|Validate session| +|**Profile-driven storage**|Profile-driven storage view| +|**Storage views**|View| + + +## Spectro Role Privileges + +The Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, virtual machines, templates, datastore, and network objects. + +:::info + +Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. + +::: + +|**vSphere Object** |**Privileges**| +|---------------|----------| +|**spectro-templates** |Read only| +|**Cns**|Searchable +|**Datastore**|Allocate space| +||Browse datastore| +||Low level file operations| +||Remove file| +||Update virtual machine files| +||Update virtual machine metadata| +|**Folder**|Create folder| +||Delete folder| +||Move folder| +||Rename folder| +|**Host**|Local operations| +||Reconfigure virtual machine| +|**vSphere Tagging**|Assign or Unassign vSphere Tag| +||Create vSphere Tag| +||Delete vSphere Tag| +||Edit vSphere Tag| +|**Network**|Assign network| +|**Resource**|Apply recommendation| +||Assign virtual machine to resource pool| +||Migrate powered off virtual machine| +||Migrate powered on virtual machine| +||Query vMotion| +|**Sessions**|Validate session| +|**Profile-driven storage**|Profile-driven storage view| +|**Storage views**|Configure service| +||View| +|**Tasks**|Create task| +||Update task| +|**vApp**|Export| +||Import| +||View OVF environment| +||vApp application configuration| +||vApp instance configuration| +|**Virtual machines**|**Change Configuration**| +||* Acquire disk lease| +||* Add existing disk| +||* Add new disk| +||* Add or remove device| +||* Advanced configuration| +||* Change CPU count| +||* Change Memory| +||* Change Settings| +||* Change Swapfile placement| +||* Change resource| +||* Configure Host USB device| +||* Configure Raw device| +||* Configure managedBy| +||* Display connection settings| +||* Extend virtual disk| +||* Modify device settings| +||* Query Fault Tolerance compatibility| +||* Query unowned files| +||* Reload from path| +||* Remove disk| +||* Rename| +||* Reset guest information| +||* Set annotation| +||* Toggle disk change tracking| +||* Toggle fork parent| +||* Upgrade virtual machine compatibility| +||**Edit Inventory**| +||* Create from existing| +||* Create new| +||* Move| +||* Register| +||* Remove| +||* Unregister| +||**Guest operations**| +||* Guest operation alias modification| +||* Guest operation alias query| +||* Guest operation modifications| +||* Guest operation program execution| +||* Guest operation queries| +||**Interaction**| +||* Console interaction| +||* Power off| +||* Power on| +||**Provisioning**| +||* Allow disk access| +||* Allow file access| +||* Allow read-only disk access| +||* Allow virtual machine download| +||* Allow virtual machine files upload| +||* Clone template| +||* Clone virtual machine| +||* Create template from virtual machine| +||* Customize guest| +||* Deploy template| +||* Mark as template| +||* Mark as virtual machine| +||* Modify customization specification| +||* Promote disks| +||* Read customization specifications| +||**Service configuration**| +||* Allow notifications| +||* Allow polling of global event notifications| +||* Manage service configurations| +||* Modify service configuration| +||* Query service configurations| +||* Read service configuration| +||**Snapshot management**| +||* Create snapshot| +||* Remove snapshot| +||* Rename snapshot| +||* Revert to snapshot| +||**vSphere Replication**| +||* Configure replication| +||* Manage replication| +||* Monitor replication| +|**vSAN**|Cluster| +||ShallowRekey| + + + + + + + +--- + +## Create VMware Cloud Gateway + + + + +You can use two different PCG installation methods for VMware vSphere. You can use the Palette CLI, or you can use an OVA/OVF template. Review the prerequisites for each option to help you identify the correct installation method. + + + + + + + +### Prerequisites + + +- Palette version 4.0.X or greater. + + +- A Palette API key. Refer to the [Create API Key](../../user-management/user-authentication.md#api-key) page for guidance. + + +- Download the Palette CLI from the [Downloads](../../spectro-downloads#palette-cli) page and install the CLI. Refer to the [Palette CLI Install](../../palette-cli/install-palette-cli.md) guide to learn more. + +- You can set up the PCG as a single or three-node cluster based on your requirements for high availability (HA). The minimum PCG resource requirements are the following. + + - Single-node cluster: 2 vCPU, 4 GB memory, 60 GB storage. + + - High-Availability (HA) three-node cluster: 6 vCPU, 12 GB memory, 70 GB storage. + + +- Sufficient available IP addresses within the configured OpenStack subnets. + + +:::info + +Self-hosted Palette installations provide a system PCG out-of-the-box and typically do not require a separate, user-installed PCG. However, you can create additional PCGs as needed to support provisioning into remote data centers that do not have a direct incoming connection from the management console. + +::: + +### Install PCG + +1. In an x86 Linux host, open up a terminal session. + + +2. Use the Palette CLI `login` command to authenticate the CLI with Palette. When prompted, enter the information listed in the following table. + +
+ + ```shell + palette login + ``` + +
+ + |**Parameter** | **Description**| + |:-----------------------------|---------------| + |**Spectro Cloud Console** |Enter the Palette endpoint URL. When using the Palette SaaS service, enter `https://console.spectrocloud.com`. When using a self-hosted instance of Palette, enter the URL for that instance. | + |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if you are using a self-hosted Palette instance with self-signed TLS certificates. Otherwise, enter `n`.| + |**Spectro Cloud API Key** |Enter your Palette API Key.| + |**Spectro Cloud Organization** |Enter your Palette Organization name.| + |**Spectro Cloud Project** |Enter your desired project name within the selected Organization.| + + + +3. Once you have authenticated successfully, invoke the PCG installer by issuing the following command. When prompted, enter the information listed in each of the following tables. + +
+ + ```bash + palette pcg install + ``` + +
+ + |**Parameter** | **Description**| + |:-----------------------------|---------------| + |**Cloud Type**| Choose OpenStack.| + |**Private Cloud Gateway Name** | Enter a custom name for the PCG. Example: ``openstack-pcg-1``.| + |**Share PCG Cloud Account across platform Projects** |Enter `y` if you want the Cloud Account associated with the PCG to be available from all projects within your organization. Enter `n` if you want the Cloud Account to only be available at the tenant admin scope.| + + +4. Next, provide environment configurations for the cluster. Refer to the following table for information about each option. + + |**Parameter**| **Description**| + |:-------------|----------------| + |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: `https://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| + |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all PCG nodes and all of its cluster nodes. Example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| + |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: `my.company.com,10.10.0.0/16`.| + |**Proxy CA Certificate Filepath**|The default is blank. You can provide the file path of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| + |**Pod CIDR**|Enter the CIDR pool that will be used to assign IP addresses to pods in the PCG cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| + |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the PCG cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| + + + + +5. After the environment options, the next set of prompts is for configuring the PCG cluster for the VMware environment. The following table contains information about each prompt. + +
+ + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + |**vSphere Endpoint** | vSphere endpoint: FQDN or IP address, without the HTTP scheme `https://` or `http://`.
Example: `vcenter.mycompany.com`| + |**vSphere Username** | vSphere account username.| + |**vSphere Password** | vSphere account password.| + |**Allow Insecure Connection** |Enabling this option bypasses x509 verification. Enter `y` if using a vSphere instance with self-signed TLS certificates. Otherwise, enter `n`.| + + +6. Next, fill out VMware account configurations. Specify values for the following properties. + +
+ + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **Datacenter** | The data center to target.| + | **Folder**| The folder to target.| + | **Fault Domains**| Specify any fault domains you would like to use.| + | **Cluster**| The compute cluster to use for the PCG deployment.| + | **Network**| The network the PCG cluster will use. | + | **Resource Pool** | The resource pool to target when deploying the PCG cluster.| + | **Storage Type**| Select the datastore and VM Storage policy to apply to the PCG cluster. | + | **NTP Servers**| Specify the IP address for any Network Time Protocol (NTP) servers the PCG cluster can reference.| + | **SSH Public Keys**| Provide the public OpenSSH key for the PCG cluster. Use this key when establishing an SSH connection with the PCG cluster. This prompt will result in the default text editor for the Operating System to open. Vi is the more common text editor used in Linux environments. | + | **Cluster Size** | The number of nodes that will make up the cluster. Available options are **1** or **3** . Use three nodes for a High Availability (HA) cluster. | + + + +7. Specify IP Pool configuration. You have the option to select a static placement or use Dynamic Domain Name Service (DDNS). With static placement, an IP pool is created and the VMs are assigned IP addresses from the selected pool. With DDNS, VMs are assigned IP addresses via DNS. Review the following tables to learn more about each parameter. + +
+ + ##### Static Placement Configuration + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **IP Start range** | Enter the first address in the PCG IP pool range.| + | **IP End range** | Enter the last address in the PCG IP pool range.| + | **Network Prefix** | Enter the network prefix for the IP pool range. Valid values are network CIDR subnet masks from the range `0 - 32`. Example: `18`.| + | **Gateway IP Address** | Enter the IP address of the static IP gateway.| + | **Name servers** | Comma-separated list of DNS name server IP addresses.| + | **Name server search suffixes (optional)** | Comma-separated list of DNS search domains.| + + ##### DDNS Placement Configuration + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **Search domains** | Comma-separated list of DNS search domains.| + + +8. Specify the cluster boot configuration. + +
+ + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **Patch OS on boot** | This parameter indicates whether or not to patch the OS of the PCG hosts on the first boot.| + | **Reboot nodes once OS patch is applied** | This parameter indicates whether or not to reboot PCG nodes after OS patches are complete. This only applies if the **Patch OS on boot** parameter is enabled.| + + + + +9. Enter the vSphere Machine configuration for the Private Cloud Gateway. + +
+ + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **CPU** | The number of CPUs in the Virtual Machine. | + | **Memory** | The number of memory to allocate to the Virtual Machine.| + | **Storage** | The amount of storage to allocate to the Virtual Machine. | + +10. A new PCG configuration file is generated and its location is displayed on the console. You will receive an output similar to the following. + +
+ + ```bash hideClipboard + ==== PCG config saved ==== + Location: :/home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml + ``` + + :::info + + The ``CloudAccount.apiKey`` and ``Mgmt.apiKey`` values in the **pcg.yaml** are encrypted and cannot be manually updated. To change these values, restart the installation process using the `palette pcg install` command. + ::: + + +The Palette CLI will now provision a PCG cluster in your VMware environment. +If the deployment fails due to misconfiguration, update the PCG configuration file and restart the installer. Refer to the [Edit and Redeploy PCG](vmware#edit-and-redeploy-pcg) section below. For additional assistance, visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. + + +### Validate + +Once installed, the PCG registers itself with Palette. To verify the PCG is registered, use the following steps. + + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and select **Tenant Settings** + + +3. From the **Tenant Settings Menu** click on **Private Cloud Gateways**. Verify your PCG cluster is available from the list of PCG clusters displayed. + + +### Edit and Redeploy PCG + +To change the PCG install values, restart the installation process using the `palette pcg install` command. Use the following steps to redeploy the PCG or restart the install process. + + +1. Make the necessary changes to the PCG configuration file the CLI created during the installation, if needed. Use a text editor, such as vi or nano to update the PCG install configuration file. + +
+ + ```shell hideClipboard + ==== Create PCG reference config ==== + ==== PCG config saved ==== + Location: /Users/demo/.palette/pcg/pcg-20230717114807/pcg.yaml + ``` + + ```bash hideClipboard + vi /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml + ``` + + + +2. To redeploy the PCG, use the `install` command with the flag `--config-file`. Provide the file path to the generated PCG config file that was generated and displayed in the output. + +
+ + ```bash hideClipboard + palette pcg install --config-file /home/demo/.palette/pcg/pcg-20230706150945/pcg.yaml + ``` + + +
+ + + + +## PCG Install With OVA/OVF + + +The following points give an overview of what you will do to set up the PCG: + +
+ + - Initiate the installation from the tenant portal. + + + - Deploy the gateway installer VM in VMware vSphere. + + + - Launch the cloud gateway from the tenant portal. + +:::info + +Self-hosted Palette installations provide a system gateway out-of-the-box and typically do not require a PCG. However, you can create additional gateways as needed to support provisioning into remote data centers that do not have a direct incoming connection from the management console. + +::: + + + + + +### Prerequisites + + +- Palette version 3.4.X or older. + + +- You can set up the PCG as a single- or three-node cluster based on your requirements for high availability (HA). The minimum PCG resource requirements are the following. + - Single-node cluster: 2 vCPU, 4 GB memory, 60 GB storage. + + - High-Availability (HA) three-node cluster: 6 vCPU, 12 GB memory, 70 GB storage. + +### Install PCG + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and select **Tenant Settings** > **Private Cloud Gateway**. + + +3. Click the **Create Private Cloud Gateway** button and select **VMware**. Private Gateway installation instructions are displayed. + + +4. Copy the gateway-installer link. Alternatively, you can download the OVA and upload it to an accessible location and import it as a local file. + + +### vSphere - Deploy Gateway Installer + +1. Deploy a new OVF template by providing the link to the installer OVA as the URL. + + +2. Proceed through the OVF deployment wizard, selecting the desired Name, Placement, Compute, Storage, and Network options. + + +3. At the **Customize Template** step, specify Palette properties as follows: + +
+ +| **Parameter** | **Value** | **Description** | +|---|---|---| +|**Installer Name** | Desired Palette Gateway Name. | The name will be used to identify the gateway instance. Typical environments may only require a single gateway to be deployed. However, multiple gateways may be required to manage clusters across multiple vCenters. We recommend choosing a name that readily identifies the environment for which this gateway instance is being configured.| +| **Console endpoint** | URL to Palette management platform portal. | Default: https://console.spectrocloud.com | +|**Pairing Code** | PIN displayed on the Palette management platform portal's 'Create a new gateway' dialogue. | | +| **SSH Public Key** | Optional key for troubleshooting purposes. | We recommended having an SSH key, as it enables SSH access to the VM as 'ubuntu' user. | +| **Pod CIDR** | Optional IP range exclusive to pods. | This range should be different to prevent an overlap with your network CIDR. | +| **Service cluster IP range** | Optional IP range in the CIDR format exclusive to the service clusters. | This range also must not overlap with either the pod CIDR or your network CIDR. | + + +Proxy environments require additional property settings. Each of the proxy properties may or may not have the same value but all the three properties are required. + + +| **Parameter** | **Value** | **Remarks** | +|---|---|---| +|HTTP PROXY | Endpoint for the HTTP proxy server. | This setting will be propagated to all the nodes launched in the proxy network. For example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT` | +| HTTPS PROXY | Endpoint for the HTTPS proxy server. | This setting will be propagated to all the nodes launched in the proxy network. For example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT` | +| NO Proxy | A comma-separated list of vCenter server, local network CIDR, hostnames, and domain names that should be excluded from proxying. | This setting will be propagated to all the nodes to bypass the proxy server. For example: `vcenter.company.com`, `.company.org`, and `10.10.0.0/16` | +| Certificate | The base64-encoded value of the proxy server's certificate OR the base64-encoded root and issuing certificate authority (CA) certificates used to sign the proxy server's certificate. | Depending on how the certificate is decoded, an additional `=` character may appear at the end of the value. You can use this command to properly encode the certificate: `base64 -w0 | sed "s/=$//"`. + +4. Complete the OVF deployment wizard and wait for the OVA to be imported and the Virtual Machine (VM) to be deployed. + + +5. Power on the VM. + + +## Tenant Portal - Launch Cloud Gateway + +1. Close the **Create New Gateway** installation instructions and navigate to the Private Cloud Gateway page under **Tenant Settings** if you have navigated away or logged out. + + +2. Wait for a gateway widget to display on the page and for the **Configure** option to become available. The IP address of the installer VM will be displayed on the gateway widget. This may take a few minutes after the VM is powered on. Failure of the installer to register with Palette within 10 minutes of powering on the Virtual Machine on vSphere might indicate an error. Follow steps in [Troubleshooting](../../troubleshooting/pcg.md) to identify and resolve the issue. + + +3. Click on the **Configure** button to invoke the Palette Configuration dialogue. Provide vCenter credentials and proceed to the next configuration step. + + +4. Choose the desired values for the Data Center, Compute Cluster, Datastore, Network, Resource pool, and Folder. Optionally, provide one or more SSH Keys or NTP server addresses. + +
+ + Virtual machine port groups and distributed port groups are listed with their names. NSX-T distributed virtual port groups that exist in vSphere will be listed with their name and segment IDs. + + +5. Choose the IP Allocation Scheme - Static IP or DHCP. Selecting static IP enables the option to create an IP pool. To create an IP pool, provide an IP range or a subnet. The IP addresses from the IP pool will be assigned to the gateway cluster. By default, the IP pool is available for use by other tenant clusters. You can prevent this by toggling on the **Restrict to a single cluster** option. + + + + +6. Click on **Confirm** to initiate gateway cluster provisioning. Cluster status should change to **Provisioning** and eventually to **Running**, when the gateway cluster is fully provisioned. This process can take about 10 minutes. + + You can click on the Cloud Gateway widget in the UI to view a detailed provisioning sequence on the **Cluster Details** page. If gateway cluster provisioning results in errors or gets stuck, you can view the details on the **Summary** tab or the **Events** tab of the **Cluster Details** page. + + In certain cases where provisioning of the gateway cluster is stuck or failed due to invalid configuration, you can reset the process from the Cloud Gateway widget. + + +7. When the Gateway transitions to the **Running** state, it is fully provisioned and ready to bootstrap tenant cluster requests. + + +8. Power off the installer OVA that you initially imported at the start of this installation process. + +:::info + +A Gateway cluster installation automatically creates a cloud account using the credentials entered at the time the gateway cluster is deployed. You can use this account to provision clusters across all tenant projects. + +::: + +
+ +
+ +--- +## Upgrade PCG + +Palette maintains the OS image and all configurations for the cloud gateway. Periodically, the OS images, configurations, or other components need to be upgraded to resolve security or functionality issues. Palette releases such upgrades when required and communication about the same is presented in the form of an upgrade notification on the gateway. + +Administrators should review the changes and apply them at a suitable time. Upgrading a cloud gateway does not result in any downtime for the Tenant Clusters. During the upgrade process, the provisioning of new clusters might be temporarily unavailable. New cluster requests are queued while the gateway is being upgraded and are processed as soon as the gateway upgrade is complete. + + +### Delete a VMware Cloud Gateway + +The following steps need to be performed to delete a cloud gateway: + +1. As a Tenant Administrator, navigate to the **Private Cloud Gateway** page under **Settings**. + + +2. Invoke the **Delete** action on the PCG instance you want to delete. + + +3. The system performs a validation to ensure there are no running tenant clusters associated with the PCG instance being deleted. If such instances are found, an error is displayed. Delete any running tenant clusters and retry deleting the PCG. + + +4. Delete the Gateway Virtual Machines from vSphere. + + +### Resize PCG +You can set up the PCG as a single-node cluster or as a three-node cluster for high availability (HA). For production environments, we recommend three nodes. A PCG can be initially set up with one node and resized to three nodes later. Use the following steps to resize a single-node PCG cluster to a three-node PCG cluster. + +1. As a Tenant Administrator, navigate to the **Private Cloud Gateway** page under **Settings**. + + +2. Invoke the resize action for the relevant cloud gateway instance. + + +3. Update the size from one (1) to three (3). + + +4. The gateway upgrade begins shortly after the update. Two new nodes are created on vSphere and the gateway is upgraded to a 3-node cluster. + +:::info +Scaling a 3-node cluster down to a 1-node cluster is not permitted.

A load balancer instance is launched even for a 1-node gateway to support future expansion. +::: + +## IP Address Management + +Palette supports both DHCP and Static IP-based allocation strategies for the VMs that are launched during cluster creation. IP Pools can be defined using a range or a subnet. Administrators can define one or more IP pools linked to a PCG. + +Clusters created using a PCG can select from the IP pools linked to the corresponding PCG. By default, IP Pools are shared across multiple clusters but can optionally be restricted to a cluster. + +The following is a description of various IP Pool properties: + +| **Property** | **Description** | +|---|---| +| **Name** | Descriptive name for the IP Pool. This name will be displayed for IP Pool selection when static IP is chosen as the IP allocation strategy | +| **Network Type** | Select **Range** to provide a start and an end IP address. IPs within this range will become part of this pool. Alternately select 'Subnet' to provide the IP range in CIDR format.| +| **Start** | First IP address for a range based IP Pool E.g. 10.10.183.1| +| **End** | Last IP address for a range based IP Pool. E.g. 10.10.183.100 | +| **Subnet** | CIDR to allocate a set of IP addresses for a subnet based IP Pool. E.g. 10.10.183.64/26 | +| **Subnet Prefix** | Network subnet prefix. e.g. /18| +| **Gateway** | Network Gateway E.g. 10.128.1.1 | +| **Name server addresses** | A comma-separated list of name servers. e.g., 8.8.8.8 | +| **Restrict to a Single Cluster** | Select this option to reserve the pool for the first cluster that uses this pool. By default, IP pools can be shared across clusters.| + +## Create a VMware Cloud Account + +Use the following steps to create a VMware cloud account. + + +### Prerequisites + +- A VMware cloud gateway must be configured. Refer to the [Create VMware Cloud Gateway](#delete-a-vmware-cloud-gateway) section for guidance. + + :::info + Enterprise version users should choose the Use System Gateway option. + ::: + +In addition to the default cloud account already associated with the private cloud gateway, new user cloud accounts can be created for the different vSphere users. + +| **Property** | **Description** | +|---|---| +|**Account Name** | Custom name for the cloud account | +| **Private cloud gateway** | Reference to a running cloud gateway| +| **vCenter Server** | IP or FQDN of the vCenter server| +| **Username** | vCenter username| +| **Password** | vCenter password| + +:::caution +If you change the password for a user account in vCenter, you must also change it in Palette for the same VMware cloud account. We recommend updating the passwords immediately to avoid potentially locking Palette out of vCenter. For guidance, refer to [Change VMware Cloud Account Password in Palette](#change-vmware-cloud-account-password). +::: + + +## Change VMware Cloud Account Password + +The user account password in vCenter must match the password for the corresponding VMware cloud account in Palette. This section provides steps to change the password in Palette in the event the vCenter password changes. + +### Prerequisites + +- Access to the vCenter credentials. + +### Change the Password in Palette + +1. Log in to [Palette](https://console.spectrocloud.com/). + +2. From the **Menu Menu** navigate to **Tenant Settings** > **Cloud Accounts**. + +3. Click the **three-dot Menu** for the VMware account you want to update, and select **Edit**. + + + + +4. In the window that opens, update the password in the **Password** field and click the **Validate** button. + +5. Confirm your changes. + +### Validation + +Palette validates the password. Incorrect credentials will result in an error. As an extra precaution, try scaling a cluster up or down. + +:::info +In addition to changing the password for a VMware account, Palette provides a way for you to also change the user associated with an account by entering a new username in the **Username** field. Ensure the new user account has the same permissions as the previous user account in vCenter. +::: + + +# Deploy a VMware Cluster + + + + +Use the following steps to provision a new VMware cluster. + +
+ +1. Provide the basic cluster information like Name, Description, and Tags. Tags are currently not propagated to the Virtual Machines (VMs) deployed on the cloud/data center environments. + + +2. Select a Cluster Profile created for the VMware environment. The profile definition will be used as the cluster construction template. + + +3. Review and override Pack Parameters as desired. By default, parameters for all Packs are set with values defined in the Cluster Profile. + + +4. Provide a vSphere Cloud account and placement information. + + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **Cloud Account** | Select the desired cloud account.
VMware cloud accounts with credentials need to be preconfigured
in the Project Settings section. An account is auto-created as
part of the cloud gateway setup and is available for
provisioning of Tenant Clusters if permitted by the administrator.| + | **Datacenter** |The vSphere data center where the cluster nodes will be launched.| + | **Deployment Folder** | The vSphere VM Folder where the cluster nodes will be launched.| | + | **Image Template Folder** | The vSphere folder to which the Spectro templates are imported.| + | **SSH Keys (Optional)** | Public key to configure remote SSH access to the nodes (User: spectro).| + | **NTP Server (Optional)** | Setup time synchronization for all the running nodes.| + | **IP Allocation strategy** | DHCP or Static IP| + +5. Configure the master and worker node pools. Fill out the input fields in the **Add node pool** page. The following table contains an explanation of the available input parameters. + +### Master Pool + +|**Parameter** | **Description**| +|------------------|---------------| +|**Name** |A descriptive name for the node pool.| +|**Size** |Number of VMs to be provisioned for the node pool. For the master pool, this number can be 1, 3, or 5.| +|**Allow worker capability**|Select this option for allowing workloads to be provisioned on master nodes.| +|**[Labels](../cluster-management/taints.md#labels)**| Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. +|**[Taints](../cluster-management/taints.md#taints)**|To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints.| +|**Instance type** |Select the compute instance type to be used for all nodes in the node pool.| +|**Availability Zones**| Choose one or more availability zones. Palette provides fault tolerance to guard against hardware failures, network failures, etc., by provisioning nodes across availability zones if multiple zones are selected.| +|**Disk Size**|Give the required storage size| + +### Worker Pool + +|**Parameter** | **Description**| +|------------------|---------------| +|**Name** |A descriptive name for the node pool.| +|**Enable Autoscaler**|You can enable the autoscaler by toggling the **Enable Autoscaler** button. Autoscaler scales resources up and down between the defined minimum and maximum number of nodes to optimize resource utilization.| +||Set the scaling limit by setting the **Minimum Size** and **Maximum Size**, as per the workload the number of nods will scale up from minimum set value to maximum set value and the scale down from maximum set value to minimum set value| +|**Size** |Number of VMs to be provisioned for the node pool.| +|**Rolling Update**| Rolling update has two available options. Review the [Update Parameter](#update-parameter-table) table below for more details. +|**[Labels](../cluster-management/taints.md#labels)**|Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. +|**[Taints](../cluster-management/taints.md#taints)**|To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints.| +|**Instance type** |Select the compute instance type to be used for all nodes in the node pool.| +|**Availability Zones**| Choose one or more availability zones. Palette provides fault tolerance to guard against hardware failures, network failures, etc., by provisioning nodes across availability zones if multiple zones are selected.| +|**Disk Size**|Provide the required storage size + +6. Review settings and deploy the cluster. Provisioning status with details of ongoing provisioning tasks is available to track progress. + +:::info +New worker pools may be added if it is desired to customize certain worker nodes to run specialized workloads. As an example, the default worker pool may be configured with 4 CPUs, 8 GB of memory for general-purpose workloads, and another worker pool with 8 CPUs, 16 GB of memory for advanced workloads that demand larger resources. +::: + +# Delete a VMware Cluster + +The deletion of a VMware cluster results in the removal of all Virtual machines and associated storage disks created for the cluster. The following tasks need to be performed to delete a VMware cluster: + + +1. Select the cluster to be deleted from the **Cluster** **View** page and navigate to the **Cluster Overview** page. + + +2. Invoke the delete action available on the page: **Cluster** > **Settings** > **Cluster** **Settings** > **Delete** **Cluster**. + + +3. Click **Confirm** to delete. + + +The Cluster Status is updated to **Deleting** while the Cluster Resources are being deleted. Provisioning status is updated with the ongoing progress of the delete operation. Once all resources are successfully deleted, the Cluster Status changes to **Deleted** and is removed from the list of Clusters. + +:::info +The Delete action is only available for Clusters that are fully provisioned. For Clusters that are still in the process of being provisioned, Abort action is available to stop provisioning and delete all resources. +::: + +# Force Delete a Cluster + +A cluster stuck in the **Deletion** state can be force deleted by the user through the User Interface. The user can go for a force deletion of the cluster, only if it is stuck in a deletion state for a minimum of **15 minutes**. Palette enables cluster force delete from the Tenant Admin and Project Admin scope. + +## To force delete a cluster: + +1. Log in to the Palette Management Console. + + +2. Navigate to the **Cluster Details** page of the cluster stuck in deletion mode. + + - If the deletion status is stuck for more than 15 minutes, click the **Force Delete Cluster** button from the **Settings** dropdown. + + - If the **Force Delete Cluster** button is not enabled, wait for 15 minutes. The **Settings** dropdown will give the estimated time for the auto-enabling of the **Force Delete** button. + + +:::caution +If there are any cloud resources still on the cloud, the user should cleanup those resources before going for the force deletion. +::: \ No newline at end of file diff --git a/docs/docs-content/clusters/edge/_category_.json b/docs/docs-content/clusters/edge/_category_.json new file mode 100644 index 0000000000..455b8e4969 --- /dev/null +++ b/docs/docs-content/clusters/edge/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 20 +} diff --git a/docs/docs-content/clusters/edge/architecture.md b/docs/docs-content/clusters/edge/architecture.md new file mode 100644 index 0000000000..a12727309b --- /dev/null +++ b/docs/docs-content/clusters/edge/architecture.md @@ -0,0 +1,129 @@ +--- +sidebar_label: "Architecture" +title: "Architecture" +description: "Learn about Palette Edge and the architecture used to suppport edge clusters." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["edge", "architecture"] +--- + +The following are architectural highlights of Palette-provisioned Edge native clusters. + + +* Kubernetes is natively installed on the host. + + + +* Support for AMD64 and ARM64 architectures. + + +* Support for bare metal and virtualized edge devices. + + +* Customizable site properties such as network proxies and certificates. + + +* Configurable Kubernetes API servers to work with virtual IP address (VIP) or Dynamic DNS. + + +* Edge supports adding multiple devices to the site to form a multi-node Kubernetes cluster. + + +* Operating system (OS) images are derived from immutable container-based OS images provided by the [Kairos](http://kairos.io) open-source project. + + +* The installation is bootstrapped using a relatively small distribution-agnostic *Stylus* installer image. The operating system and Kubernetes version are derived from cluster profile settings associated with the edge site and dynamically downloaded and installed. + + +* Palette Edge Distribution supports use cases that require customizing OS packages, device drivers, and more. + + ![Architecture diagram of Edge](/native-edge.png "#title=An architecture diagram of Palette and all of the components.") + + +## Minimum Device Requirements + +The following minimum device requirements must be met to deploy an Edge host successfully. + +* 2 CPU + + +* 8 GB Memory + + +* 60 GB Storage + + +If Trusted Platform Module (TPM) is used, it must be TPM 2.0 or greater. + + +## Supported Architectures + +Palette supports AMD64 and ARM64 (beta) architectures for Edge installations. However, we cannot guarantee that all hardware and software configurations will work due to the various options available in the market. We recommend that you test your hardware configuration before deploying to production. + +
+ +:::caution + + ARM64 support is a preview feature and requires Palette version 4.0.0 or later. + +::: + + +## Palette Edge Distribution + +Palette provides the following distributions for edge installations. + +|Name|OS |Kubernetes Distro|CNIs|CSIs| +|----|---|----------|----|----| +|Palette Optimized K3s |openSUSE, Ubuntu |K3s |Calico, Flannel|Rook Ceph| +|Palette Optimized RKE2|openSUSE, Ubuntu |RKE2|Calico, Flannel|Rook Ceph| +|[Palette eXtended Kubernetes Edge (PXK-E)](../../glossary-all#palette-extended-kubernetes-edge-pxk-e)|openSUSE, Ubuntu|CNCF|Calico, Flannel|Rook Ceph| + + +## Supported Configurations + +Palette offers complete flexibility in deploying clusters at edge sites with various aspects you can customize. The table below describes these aspects and the available options. + +| **Parameter** | **Choices** | +|-|-| +| Cluster Mode | - Connected: The site has internet connectivity and the installation is initiated via Palette Management Console
- Air-Gapped: The site does not have internet connectivity. Installation is initiated via the Palette CLI.| +| OS | - Ubuntu
- OpenSUSE
- Bring your own OS (BYOOS) | +| K8s Flavor | - Palette eXtended K8s for Edge FIPS (PXK-E)
- Palette eXtended K8s for Edge (PXK-E)
- Palette Optimized K3s
- Palette Optimized RKE2 | +| K8s Version |- 1.24.x
- 1.25.x
- 1.26.x
- 1.27.x | +| FIPS Mode |- True: Enforce usage of FIPS packs and other required FIPS configuration to meet FIPS compliance
- False | +| Edge Host Registration Mode | - Manual: A unique Edge host ID is manually entered into the Palette Management Console
- Auto: Edge hosts automatically register with the Palette through the usage of a registration token supplied in the use-data
- QR Code: Scan a QR code that takes you to a web application that registers the Edge host with Palette. This method is considered advanced with the benefit of simplifying the Edge host registration without needing a tenant token or a manual entry. | +| Edge Host Type - Installer Format | Create an ISO image that contains all your dependencies and custom configurations. | + +
+ + +## Kubernetes Defaults + +The following items are disabled by default for RKE2 and K3s. + +* Traefik + +* SERVICE-lb + +* local-path provisioner + +* Flannel + +**Example Scenario:** + +For the Palette optimized K3s pack, the default network component flannel is disabled to allow the user to independently use any container network interface pack such as Flannel or others, as part of the network layer of a cluster profile. + +The component metrics server is disabled to avoid duplicating it because Palette installs the metrics server by default. + +``` +cluster: + config: + # disable the built in cni + flannel-backend: none + no-flannel: true + disable-network-policy: true + Disable: + - metrics-server +``` + +
\ No newline at end of file diff --git a/docs/docs-content/clusters/edge/edge-configuration/_category_.json b/docs/docs-content/clusters/edge/edge-configuration/_category_.json new file mode 100644 index 0000000000..455b8e4969 --- /dev/null +++ b/docs/docs-content/clusters/edge/edge-configuration/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 20 +} diff --git a/docs/docs-content/clusters/edge/edge-configuration/cloud-init.md b/docs/docs-content/clusters/edge/edge-configuration/cloud-init.md new file mode 100644 index 0000000000..c6bc3006fc --- /dev/null +++ b/docs/docs-content/clusters/edge/edge-configuration/cloud-init.md @@ -0,0 +1,293 @@ +--- +sidebar_label: "Cloud Init Stages" +title: "Cloud Init Stages" +description: "Learn how to use cloud-init stages when installing an Edge device with Palette." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["edge"] +--- + +The installation process supports all the cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/). Kairos is an open-source project that is used to create immutable images, Kairos is a container layer that enables you to specify dependencies and create resources before locking down the image. + +The following diagram displays the available cloud-init stages you can use to customize the device installation. + +![A diagram that displays all the cloud-init stages supported. The stages are listed in the markdown table below.](/clusters_edge_cloud-init_cloud-init-stages-supported.png) + +You can read more about Kairos and cloud-init by reviewing [Kairo's cloud-init](https://kairos.io/docs/architecture/cloud-init/) resource. For your convenience, all the supported cloud-init stages are listed below. + + +| Stage | Description +|-------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------| +| `rootfs` | This is the earliest stage, running before switching to root. It happens right after the root is mounted in /sysroot and before applying the immutable rootfs configuration. This stage is executed over initrd root, no chroot is applied. | | +| `initramfs` | This is still an early stage, running before switching to root. Here you can apply changes to the booting setup of Elemental. Despite executing this before switching to root, this invocation is chrooted into the target root after the immutable rootfs is set up and ready. | | +| `boot` | This stage executes after initramfs has switched to root and during the systemd boot-up process. | | +| `fs` | This stage is executed when fs is mounted and is guaranteed to have access to the state and persistent partitions ( COS_STATE and COS_PERSISTENT respectively). | | +| `network` | This stage executes when the network is available | | +| `reconcile` | This stage executes 5m after boot up and every 60m. | | +| `after-install` | This stage executes after the installation of the OS ends. | | +| `after-install-chroot` | This stage executes after the installation of the OS ends. | | +| `after-upgrade` | This stage executes after the OS upgrade ends. | | +| `after-upgrade-chroot` | This stage executes after the OS upgrade ends (chroot call). | | +| `after-reset` | This stage executes after the OS resets. | | +| `after-reset-chroot` | This stage executes after the OS resets (chroot call). | | +| `before-install` | This stage executes before installation. | | +| `before-upgrade` | This stage executes before the upgrade. | | +| `before-reset` | This stage executes before reset. | | + + +:::info + +Each stage has a before and after hook you can use to achieve more granular customization. For example, you can use `network.after` to verify network connectivity. + +::: + + +## Where to Apply Cloud-Init Stages? + +You may ask yourself where to use cloud-init stages, as both the Edge Installer and the OS pack support the usage of cloud-init stages. Use the following statements to help you decide. +
+ + +* If you need to apply a set of configurations to a specific site, then use the Edge Installer user data configuration file and its cloud-init stages to provide site settings to that specific site. + + +* If you have common configurations across a fleet of Edge host devices, customize the OS pack and use the cloud-init stages to apply those configurations. + +## Example Use Cases + + +To help you become familiar with the cloud-init stages and better understand how to use them to achieve your goals, check out the following use cases. + +
+ +:::caution + +Remember that the following code snippets are only intended to help you understand how cloud-init can be used to customize the edge host. +You can use countless combinations of the Edge Installer and OS cloud-init stages to achieve the desired customization. Check out the Kairos [stages](https://kairos.io/docs/reference/configuration/#stages) resource to learn more about other key terms, options, and advanced examples. + +::: + +Use the Edge Installer user data to apply specific site configurations to the edge host. + + +#### Set the User Password + +The `initramfs` stage is used to set the password for the user, `kairos`. + +```yaml +stages: + initramfs: + - users: + kairos: + passwd: kairos +``` + +#### Assign a User to a Group + +Another example of the `initramfs`, but this time the user is assigned to the `sudo` group. + +```yaml +stages: + initramfs: + - users: + kairos: + groups: + - sudo +``` + + +#### Assign an SSH Key + +An example configuration of assigning an SSH key to a user. + +```yaml +stages: + initramfs: + - users: + kairos: + ssh_authorized_keys: + - ssh-rsa AAAAB3N… +``` + + +#### Configure a Registry Mirror + +For situations where you need to configure a registry mirror, you can use the following example that uses the `initramfs` stage. + +```yaml +stages: + initramfs: + files: + - path: "/etc/rancher/k3s/registries.yaml" + permissions: 0644 + owner: 0 + group: 0 + content: | + mirrors: + "gcr.io": + endpoint: + - "https://my-mirror.example.com" + rewrite: + "(^.*)": "test/$1" + configs: + "my-mirror.example.com": + auth: + username: "user1" + password: "mysupermagicalpassword" + tls: + insecure_skip_verify: true +``` + +#### Erase Partitions + +You can use the `before-install` stage to remove partitions if needed. + +
+ +```yaml +stages: + before-install: + - name: "Erase Old Partitions on Boot Disk" + commands: + - wipefs -a /dev/nvme0n1 +``` + + +#### Install Tooling + +This is an example of installing third-party software or tooling. + +
+ +```yaml +stages: + after-install-chroot: + - name: "Install SSM" + commands: + - snap install amazon-ssm-agent --classic +``` + +#### Pass a Sensitive Information + +If you need to transmit sensitive information, such as credentials, during the site installation phase, you can make the Edge installer skip copying specific stages to the edge hosts. The Edge installer will skip copying the stages that follow the `skip-copy-[string]` naming convention. Refer to the [Sensitive Information in the User Data Stages](skip-copying-stages.md) guide to learn more. +
+ +```yaml +stages: + network.after: + - name: skip-copy-subscribe + if: [ -f "/usr/sbin/subscription-manager" ] + commands: + - subscription-manager register --username "myname" --password 'mypassword' +``` + + +#### Complete Example + +This is an Edge Installer user data configuration that configures the user `kairos` and prepares the edge host by providing network settings and adding SSL certificates. + +
+ +```yaml +stages: + boot: + - users: + kairos: + groups: + - sudo + passwd: kairos +stylus: + site: + paletteEndpoint: api.spectrocloud.com + name: edge-randomid + registrationURL: https://edge-registration-app.vercel.app/ + network: + httpProxy: http://proxy.example.com + httpsProxy: https://proxy.example.com + noProxy: 10.10.128.10,10.0.0.0/8 + nameserver: 1.1.1.1 + interfaces: + enp0s3: + type: static + ipAddress: 10.0.10.25/24 + gateway: 10.0.10.1 + nameserver: 10.10.128.8 + enp0s4: + type: dhcp + caCerts: + - | + ------BEGIN CERTIFICATE------ + ***************************** + ***************************** + ------END CERTIFICATE------ + - | + ------BEGIN CERTIFICATE------ + ***************************** + ***************************** + ------END CERTIFICATE------ +``` + + +## OS User Data Stages + +You can also customize the device by using the OS cloud-init stages. As mentioned previously, use OS cloud-init stages to apply common configurations to many edge hosts. + +
+ +#### Assign User to Group + +In this example snippet, the OS pack is using the cloud-init stage `initramfs` to assign a default password to the user `kairos` and add the user to the `sudo` group. +
+ +```yaml +stages: + initramfs: + - users: + kairos: + groups: + - sudo + passwd: kairos +``` + +#### Custom Commands + +This is an example of moving files to a different location prior to another stage or boot-up process that requires the file. +
+ +```yaml +stages: + initramfs: + - name: "Move Files" + commands: + - | + mv /myCLI/customCLI /usr/local/bin/ + rm -R /myCLI +``` + + +#### Update Network Settings + +The network settings will get updated when the `network` stage takes effect. + +
+ +```yaml +stages: + network: + - name: "Configure DNS host" + commands: + - echo "10.100.45.98 example.local" >> /etc/hosts +``` + + +#### Invoke Custom Script + +An example of applying logic after the device has booted by using the `boot.after` stage. +
+ +```yaml +boot.after: + - | + sftp -i /credentials/ssh/id_rsa.pub@cv543.example.internal.abc:/inventory/2023/site-inventory.json + mv site-inventory.json /location/inventory/ +``` \ No newline at end of file diff --git a/docs/docs-content/clusters/edge/edge-configuration/edge-configuration.md b/docs/docs-content/clusters/edge/edge-configuration/edge-configuration.md new file mode 100644 index 0000000000..8cf22caaad --- /dev/null +++ b/docs/docs-content/clusters/edge/edge-configuration/edge-configuration.md @@ -0,0 +1,76 @@ +--- +sidebar_label: "Install Configuration" +title: "Install Configuration" +description: "Learn about the possible Palette Edge install configurations available." +hide_table_of_contents: false +tags: ["edge"] +--- + +The Edge Installer is responsible for preparing the Edge host to be ready for workloads. The Edge Installer supports the ability to specify a user data configuration file. You can use this configuration file to customize the installation and ensure your Edge host has all the required dependencies and settings to work properly in your environment. + +To better understand the Edge installation process, review the order of operations. + + +### Order of Operations: + +1. Boot device with Edge Installer. + + +2. Edge Installer gets copied to disk. + + +3. Device powers off or reboots based on the user data configuration. + + +4. Upon boot up or reboot, cloud-init stages that are specified in the Edge Installer configuration file take effect. + + +5. Edge Host Registers with Palette. + + +6. Device pairs with Palette. + + +7. Edge Installer identifies cloud-init stages as specified in the OS pack. + + +8. Operating System (OS) is installed on the device. + + +9. Device reboots. + + +10. OS cloud-init stages are applied in the proper order. + + +11. Edge Host is ready for use. + + +![The boot order sequence, listing 9 steps that flow in a sequential order.](/clusters_edge_cloud-init_boot-order-squence.png) + + +The Edge installation process accepts two types of configurations that you can use to customize the installation: Edge Installer Configuration and Edge OS Configuration. + + + +## Edge Installer Configuration + +The Edge installation process expects you to specify installation parameters. You can supply the install parameters in multiple stages. You can provide common installation configurations for all your sites during the manufacturing or staging phases. + +You can also specify additional location-specific configurations at the site during the installation. The install configurations provided in various stages are merged to create the edge host's final configuration. + +## Edge OS Configuration + +The Edge installation process supports the ability for you to customize your operating system (OS) through the usage of cloud-init stages. You can supply Edge configurations during the edge host installation with the Edge Installer and at the Operating System (OS) layer by customizing the OS pack. Once the edge host installation process is complete, the OS stages take effect during the boot-up process. + + +To effectively use the Edge Installer, we recommend you review the Edge [installer configuration](installer-reference.md) page so you gain an overview of all the available parameters. + + + + +## Resources + +- [Edge OS Configuration: Cloud-Init Stages](cloud-init.md) + +- [Edge Install Configuration](installer-reference.md) diff --git a/docs/docs-content/clusters/edge/edge-configuration/installer-reference.md b/docs/docs-content/clusters/edge/edge-configuration/installer-reference.md new file mode 100644 index 0000000000..62c8166f6e --- /dev/null +++ b/docs/docs-content/clusters/edge/edge-configuration/installer-reference.md @@ -0,0 +1,401 @@ +--- +sidebar_label: "Installer Configuration" +title: "Edge Installer Configuration" +description: "Review the available Edge Installer configuration options." +hide_table_of_contents: false +sidebar_position: 20 +tags: ["edge"] +--- + + +The Edge Installer configuration user data accepts a parameter named `stylus`. In addition to the `stylus` parameter, the user data file also supports the use of cloud-init stages and other Kairos-supported parameters. The `stylus.site` parameter is how you primarily configure the Edge host, but you can also use cloud-init stages to customize the installation. Refer to the [Site Parameters](#site-parameters) for a list of all the parameters supported in the `stylus.site` parameter block. + + +:::info + +The `#cloud-config` value is a required cloud-init header required by the [cloud-init](https://cloudinit.readthedocs.io/en/latest/explanation/format.html) standard. + +::: + +## User Data Parameters + +### Defaults + +The Edge Installer is configured with a set of default values. + +| Parameter | Default | Description | +| --- | --- | --- | +| `PaletteEndpoint`| `api.console.spectrocloud.com`| The Palette API endpoint. | +| `Prefix`| `edge`| The default prefix to apply to the unique identifier. | +| `RegistrationURL`| `https://edge-registration-generic.vercel.app`| The URL that operators should use when registering the Edge host with Palette.| +| `disableAutoRegister`| `false` | Set to `true` if you want to disable auto registration. Refer to the [Register Edge Host](../site-deployment/site-installation/edge-host-registration.md) reference page to learn more about Edge host registrations.| + +The default values assume you are installing the Edge host in an environment without a network proxy, do not require remote access to the Edge host, and are using Palette SaaS. If you have requirements different from the default values, you must provide the Edge Installer with additional information. + +You can provide the installer with additional configuration values in the user data configuration file. The following table contains all the supported user data parameters the installer accepts. + +### Debug Parameters + +You can enable the `debug` and `trace` parameters when you need to troubleshoot Edge Installer issues. + +| Parameter | Description | +| --- | --- | +| `debug` | Enable this parameter for debug output. Allowed values are `true` or `false`. Default value is `false`. | +| `trace` | Enable this parameter to display trace output. Allowed values are `true` or `false`. Default value is `false`.| +| `imageOverride`| You can specify a different Edge Installer image versus the default image. | + +```yaml +#cloud-config +stylus: + debug: true + trace: true + imageOverride: "example.com/example-installer:v1.4.0" +``` + +### Install Mode + +You can specify the mode the Edge Installer should prepare the installation for. The Edge Installer supports two different modes. + +
+ +- Connected: The site has internet connectivity and the installation is initiated through Palette. + + +- Air-Gapped: The site does not have internet connectivity. The Installation is initiated through the Palette Edge CLI. + + +| Parameter | Description | +| --- | --- | +| `installationMode` | Allowed values are `connected`. Default value is `connected`. | + +
+ +```yaml +#cloud-config +stylus: + installationMode: "connected" +``` + + + + +### External Registry + +You can point the Edge Installer to a non-default registry to load content from another source. Use the `registryCredentials` parameter object to specify the registry configurations. + + +| Parameter | Description | +|----------------------|--------------------------------------------------------| +| `domain` | The domain of the registry. You can use an IP address plus the port or a domain name. | +| `username` | The username to authenticate with the registry. | +| `password` | The password to authenticate with the registry. | +| `insecure` | Whether to allow insecure connections to the registry. Default value is `false`. | + + +
+ + +```yaml +#cloud-config +stylus: + registryCredentials: + domain: 10.10.254.254:8000/spectro-images + username: ubuntu + password: + insecure: true +``` + +### Site Parameters + +The `stylus.site` blocks accept the following parameters. + +| Parameter | Description | +| --- | --- | +| `paletteEndpoint` | The URL endpoint that points to Palette. Example: `api.spectrocloud.com` | +| `edgeHostToken` | A token created at the tenant scope that is required for auto registration. | +| `projectUid` | The id of the project the Edge host will belong to. | +| `projectName` | The name of the project. +| `name` | If you do not specify an Edge hostname, the system will generate one from the serial number of the device. If Stylus is unable to identify the serial number, it will generate a random ID instead. In cases where the hardware does not have a serial number, we suggest that you specify a value so there is minimal chance of duplication. Use the value `"$random"` to generate a random ID. You can also use the `DeviceUIDPaths` to read in a value from a system file. | +| `prefix` | The system assigns a prefix value to the device UID generated by the Edge Installer. By default, this value is set to `edge`. | +| `network` | The network configuration settings. Review the [Site Network Parameters](#site-network-parameters) below for more details. | +| `registrationURL` | The URL that operators should use to register the Edge host with Palette. | +| `insecureSkipVerify` | This controls whether or not a client verifies the server's certificate chain and host name. | +| `caCerts` | The Secure Sockets Layer (SSL) certificate authority (CA) certificates.| +| `clusterId` | The id of the host cluster the edge host belongs to. | +| `clusterName` | The name of the host cluster the edge host belongs to. | +| `tags` | A parameter object you use to provide optional key-value pairs. Refer to the [Tags](#tags) section to learn more. | +| `tagsFromFile` | Specify tags from a file. Refer to the [Tags](#tags) section to learn more. | +| `tagsFromScript` | Use a script to generate the tags. Refer to the [Tags](#tags) section to learn more. | +| `deviceUIDPaths` | Specify the file path for reading in product or board serial that can be used to set the device ID. The default file path is **/sys/class/dmi/id/product_uuid**. Refer to the [Device ID (UID) Parameters](#device-id-uid-parameters) section to learn more.| + +### Site Network Parameters + +Use the site network parameters to configure network settings so the edge host can communicate with Palette. + +| Parameter | Description | +| --- | --- | +| `siteNetwork.httpProxy` | The URL of the HTTP proxy endpoint. | +| `siteNetwork.httpSProxy` | The URL of the HTTPS proxy endpoint. | +| `siteNetwork.noProxy` | The list of IP addresses or CIDR ranges to exclude routing through the network proxy. | +| `siteNetwork.interfaces` | The network settings respective to the interfaces. Review the [Network Parameters](#network-parameters) table below for more details. | +| `siteNetwork.nameserver` | The IP address of the global DNS nameserver that requests should be routed to. | + +### Network Parameters + +Network settings specific to the network interface of the edge host. You can configure multiple interfaces. + +| Parameter | Description | +| --- | --- | +| `networkInterface.ipAddress` | The assigned IP address to the network interface. | +| `networkInterface.mask` | The network mask for the assigned IP address. | +| `networkInterface.type` | Defines how the IP address is assigned. Allowed values are `dhcp` or `static`. Defaults to `dhcp`. | +| `networkInterface.gateway` | The network gateway IP address. | +| `networkInterface.nameserver` | The IP address of the DNS nameserver this interface should route requests to.| + +### Device ID (UID) Parameters + +The device ID is generated by a specific priority sequence. The below table outlines the priority order from top to bottom when generating a UID for the Edge host. The UID generation starts with priority one, the device `name`, followed by attributes within the `deviceUIDPAths`, and lastly generating a random UUID if all other methods are unsuccessful. + +| Priority | Method | Description | +|----------|------------------------------------------------|-------------------------------------------------------------------------------------------------------| +| 1 | `name` | The device name is used as the primary identifier for the Edge host. | +| 2 | `deviceUIDPaths` | Specifies the paths and associated regular expressions to extract the UID. | +| 3 | `"$random"` | Assigns a random UUID as the Edge host ID. | + + +By default, the product UID path is set to `/sys/class/dmi/id/product_uuid`. To modify this path and use other attributes within the same folder, such as the product or board serial, use the `regex` parameter. For example, instead of the default path **/sys/class/dmi/id/product_uuid**, you can use the board Serial Number path **/sys/class/dmi/id/board_serial** by applying a `regex` parameter. Refer to the [regex syntax](https://github.com/google/re2/wiki/Syntax) reference guide to learn more. + +| Parameter | Description | +|-------------------|-------------------------------------------------------| +| `name` | The path of the file containing the UID. | +| `regex` | The regular expression pattern to match the UID. | + +You can use the `regex` parameter to remove unsupported characters from attributes to Refer to the warning box below for a list of unsupported characters. + +
+ +```yaml +#cloud-config +stylus: + site: + deviceUIDPaths: + - name: /etc/palette/metadata-regex + regex: "edge.*" +``` + +
+ +:::caution + +The length of the UID truncates to a maximum allowed length of 128 characters. The following characters are unsupported: + +`/ ? # & + % , $ ~ ! @ * () {} | = ; : <> ' . ^ "` + + +::: + +### Tags + +You can assign tags to the Edge host by specifying tags manually in the configuration file. The tags object accepts key-value pairs. The following example shows how to assign tags manually to the Edge host. + +```yaml +#cloud-config +stylus: + site: + tags: + env: prod + department: engineering +``` + + +You can also specify tags through alternative methods that are more dynamic, such as reading in tags from a file or from a script that returns a JSON object. You can combine the various methods to provide tags to the Edge host. The following sections describe the various methods you can use to provide tags dynamically to the Edge host. + +
+ +:::info + +The order of precedence for tags is as follows: + +1. Manually provided tags - `tags`. + +2. Tags from a script - `tagsFromScript`. + +3. Tags from a file - `tagsFromFile`. + +Tags from higher priority orders override tags from lower priority. For example, if you specify a tag manually and also specify the same tag in a `tagsFromFile`, the tag from the `tag` object is what the Edge installer will use. + +::: + +
+ +### Tags From a File + +You can specify tags from a file by using the `tagsFromFile` parameter object. The `tagsFromFile` parameter object accepts the following parameters. + +| Parameter | Description | Default Value | +| --- | --- | --- | +| `fileName` | The path to the file containing the tags. | `''` | +| `delimiter` | The delimiter used to separate the key-value pairs. | `\n` | +| `separator` | The separator used to separate the key from the value. | `:` | + +
+ +```yaml +#cloud-config +stylus: + site: + tags: + department: 'sales' + tagsFromFile: + fileName: "/etc/palette/tags.txt" + delimiter: ";" + separator: ":" +``` + +Example: + +You can specify different delimiters and separators to parse the content depending on how the content is formatted. Assume the file **/etc/palette/tags.txt** contains the following content. + +
+ +```text hideClipboard +Location:Mumbai,India; Latitude:48.856614; Longitude:2.352221; owner:p78125d +``` + +
+ +### Tags From a Script + +You can specify tags from a script by using the `tagsFromScript` parameter object. The script must be executable and return a JSON object that contains the tags in the following format. + +
+ +```json hideClipboard +{ + "key": "value", +} +``` + +Example: + +
+ +```json +{ + "department": "sales", + "owner": "p78125d" +} +``` + + + +The `tagsFromScript` parameter object accepts the following parameters. + +| Parameter | Description | Default Value | +| --- | --- | --- | +| `scriptName` | The path to the script that returns a JSON object. | `''` | +| `timeout` | The timeout value in seconds. | `60` | + +
+ +```yaml +#cloud-config +stylus: + site: + tags: + department: 'sales' + tagsFromScript: + scriptName: "/etc/palette/tags.py" + timeout: 60 +``` + + +## Installer Example Configuration + +The following example shows how user data configuration is used to customize the Edge host installation process. + +
+ +```yaml +#cloud-config +stylus: + site: + paletteEndpoint: api.spectrocloud.com + edgeHostToken: yourEdgeRegistrationTokenHere + projectUid: 12345677788 + tags: + env: east + terraform_managed: true + os: ubuntu + name: edge-59d3f182-35fe-4e10-b0a0-d7f761f1a142 + + network: + httpProxy: http://proxy.example.com + httpsProxy: https://proxy.example.com + noProxy: 10.10.128.10,10.0.0.0/8 + nameserver: 1.1.1.1 + interfaces: + enp0s3: + type: static + ipAddress: 10.0.10.25/24 + gateway: 10.0.10.1 + nameserver: 10.10.128.8 + enp0s4: + type: dhcp + caCerts: + - | + ------BEGIN CERTIFICATE------ + ***************************** + ***************************** + ------END CERTIFICATE------ + - | + ------BEGIN CERTIFICATE------ + ***************************** + ***************************** + ------END CERTIFICATE------ +``` + +
+ +:::info + +Check out the [Prepare User Data](../edgeforge-workflow/prepare-user-data.md) resource for more examples. + +::: + + +## Additional Configurations + +The Edge Installer will honor other Kairos parameters, such as `install`, and `options`. To learn more about Kairos parameters, refer to the [Kairos configuration](https://kairos.io/docs/reference/configuration/) page. + +The following is an example Edge installer configuration that is using the `install` parameter block to power off the device upon completion of the installation process. + +
+ +```yaml +#cloud-config +stylus: + site: + paletteEndpoint: api.spectrocloud.com + registrationURL: https://edge-registration.vercel.app + projectUid: yourProjectIdHere + edgeHostToken: yourEdgeRegistrationTokenHere + tags: + myTag: myValue + myOtherTag: myOtherValue + tagsFromScript: + scriptName: /etc/palette/tags.sh + timeout: 30 + reboot: false + +stages: + initramfs: + - users: + palette: + groups: + - sudo + passwd: palette + +install: + poweroff: true +``` \ No newline at end of file diff --git a/docs/docs-content/clusters/edge/edge-configuration/skip-copying-stages.md b/docs/docs-content/clusters/edge/edge-configuration/skip-copying-stages.md new file mode 100644 index 0000000000..d7c8996e74 --- /dev/null +++ b/docs/docs-content/clusters/edge/edge-configuration/skip-copying-stages.md @@ -0,0 +1,82 @@ +--- +sidebar_label: "Sensitive User Data Handling" +title: "Sensitive User Data Handling" +description: "Learn how to make the Edge installer skip copying the specific user data stages to the edge hosts so that you can use sensitive information in the user data stages." +hide_table_of_contents: false +sidebar_position: 10 +tags: ["edge"] +--- + + +Suppose you must add sensitive information, such as credentials, in your user data configuration file. In the Edge deployment lifecycle, you have two opportunities to apply user data to edge hosts. The first is during the staging phase, where you add the Edge installer to the Edge host. The second opportunity is during the site installation phase, where you can provide supplementary user-data configurations if needed. The diagram below highlights the two mentioned phases in the Edge lifecycle. + + + ![A diagram highlighting the two stages in the edge deployment lifecycle where you can apply user data.](/edge_edge-configuration_cloud-init_user-data.png) + + + +- **Staging Phase** - In the staging phase, you prepare your edge hosts using the organization-level configurations. The configurations include the Edge Installer, the user data, and, optionally, a content bundle. You boot the edge hosts using the Edge Installer and apply the configurations. All the configurations, including the user data, are copied to the edge host during installation. + + Once the edge hosts are prepared with the initial installation, you ship your devices to the site for installation. This step is also called the *installer handoff* step. Refer to the [Prepare Edge Host](../site-deployment/stage.md#prepare-edge-host) guide to learn more about driving the installer handoff step. + + +- **Site Installation Phase** - In the site installation phase, you use supplementary user data to apply site-specific configurations to the edge hosts. The user data is copied to the edge host during the installation unless you follow the specific naming convention for your user data stages as described below. + + Refer to the [Multiple User Data Use Case](../edgeforge-workflow/prepare-user-data.md#multiple-user-data-use-case) guide to understand the use cases for applying supplementary user data. If you need to apply a supplementary user data, refer to the [Perform Site Install](../site-deployment/site-installation/site-installation.md) guide to learn the site installation process in detail. + + +In both steps mentioned above, the Edge Installer copies the user data configuration file provided to the **/run/stylus/userdata** file or the **/oem/userdata** file on the edge hosts. If you want to prevent some user data stages from getting copied to the edge host's storage, you can use a specific naming convention to disable the default copy behavior. However, be aware that different persistence behaviors apply depending on which stage of the Edge deployment life cycle you provide sensitive data in the user data configuration file. Refer to the [Sensitive Information in the Site Installation](#sensitive-information-in-the-site-installation) section below to learn more. +
+ +## Sensitive Information in the Installer Handoff + + +:::caution + +We do not recommend inserting sensitive information in the user data configuration file provided in the installer handoff phase. Use a supplementary user data configuration file and apply it at the site installation phase. + +::: + + +In the installer handoff step, the Edge Installer copies and persists *all* your user data stages into the configuration files on the edge hosts. Copying sensitive information to the edge hosts may pose security risks. Therefore, we recommend you avoid inserting sensitive information in the user data configuration file provided in the installer handoff phase. Use a supplementary user data configuration file and apply it at the site installation phase. + + +
+ +## Sensitive Information in the Site Installation + +If you want to use sensitive information, such as credentials for patching the OS on your edge hosts, in any user data stage during the site installation phase. In such scenarios, you must use the `skip-copy-[string]` naming convention for your user data stages. Replace the `[string]` placeholder with any meaningful string per your requirements. The Edge Installer will skip copying the stages whose name matches the regular expression `skip-copy-*` to the edge host. The stages will execute as long as the drive containing the user data configuration file is mounted to the edge hosts. In most cases, the drive will be a bootable USB flash drive. + +For example, the `skip-copy-subscribe` stage below follows the `skip-copy-[string]` naming convention. Therefore, the Edge Installer will skip copying the stage to the **/run/stylus/userdata** file or the **/oem/userdata** file on the edge host. The stage and the sensitive information below are marked with the points of interest 1 and 2, respectively. +
+ + + +```yaml +stages: + network.after: + - name: skip-copy-subscribe + if: [ -f "/usr/sbin/subscription-manager" ] + commands: + - subscription-manager register --username "myname" --password 'mypassword' +``` + + + +The stage will execute as long as you have mounted the drive containing the user data configuration file. You must unmount the drive from the edge host after the device registers with Palette and before you deploy a Kubernetes cluster on the device. \ No newline at end of file diff --git a/docs/docs-content/clusters/edge/edge-native-lifecycle.md b/docs/docs-content/clusters/edge/edge-native-lifecycle.md new file mode 100644 index 0000000000..6d12ca91e1 --- /dev/null +++ b/docs/docs-content/clusters/edge/edge-native-lifecycle.md @@ -0,0 +1,39 @@ +--- +sidebar_label: "Deployment Lifecycle" +title: "Edge Deployment Lifecycle" +description: "Learn about the Edge Deployment Lifecycle" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["edge"] +--- + + +The typical end-to-end lifecycle of deploying clusters at edge locations involves several distinct phases in which different organizations or teams need to perform specific tasks. + + + ![A flow of the lifecycle, starting with model, staging, install, and finally register. Once all phases are complete the cluster provision occurs.](/native-edge-deployment-lifecycle.png) + +* **Modeling** - App owners build and test the applications in test environments and model application profiles in Palette for a cluster installation. + + +* **Staging** - IT/Ops teams prepare an Edge host installer variant from Palette's base installer. In this phase, available customizations that are common to all edge locations are applied to the base installer. This includes specifying or overriding properties such as Palette endpoints, app URL for QR code-based registration, default OS users, and default network settings. The installer variant is then exported to all the site locations. + + +* **Installation** - Site operators provision one or more Edge hosts at edge locations using the image prepared in the staging phase. In this phase, the site operator applies site-specific properties such as static IP address, network proxy, and certificate. + + +* **Registration** - Edge hosts need to be registered with Palette or through registration tokens. Each cluster requires a cluster profile. Clusters are configured with infrastructure and add-on profiles that application architects applied in the modeling phase. + +IT Ops teams can perform this step in two different ways: + + * Deploy an application without having to manage a server to automate edge device registration. We provide a sample application you can customize to fit your needs. + + * Register Edge hosts and configure clusters using the Palette UI, API or Terraform. + +The Palette Edge Management agent inside the Edge host waits for the configuration to be available in Palette. Once registration and configuration are complete, the agent installs the Kubernetes cluster. The agent reads the Kubernetes distribution, version, and configuration properties from the cluster profile. Additional add-ons, if any, are deployed after the Kubernetes installation. You can install a single or multi-node cluster using this process. You can scale up your cluster at a later time after deployment. + +If the edge location configuration is known and predictable, then the IT/Ops team can combine staging, installation, and registration into one step and ship the fully configured Edge hosts to the edge location. The site operator at the edge location only needs to hook up the power and network cables without further device configuration. The Edge cluster will be ready to be centrally managed for future upgrades. + +## Next Steps + +Now that you have an understanding of the deployment lifecycle, start the deployment of your Edge host by reviewing the [Site Deployment](site-deployment/site-deployment.md) instructions. diff --git a/docs/docs-content/clusters/edge/edge.md b/docs/docs-content/clusters/edge/edge.md new file mode 100644 index 0000000000..8e09c89a44 --- /dev/null +++ b/docs/docs-content/clusters/edge/edge.md @@ -0,0 +1,89 @@ +--- +sidebar_label: "Edge" +title: "Edge" +description: "The methods of creating clusters for a speedy deployment on any CSP" +hide_table_of_contents: false +sidebar_custom_props: + icon: "hdd" +tags: ["edge"] +--- + + +Edge clusters are Kubernetes clusters set up on Edge hosts installed in isolated locations like grocery stores and restaurants versus a data center or cloud environment. These Edge hosts can be bare metal machines or virtual machines and are managed by operators at remote sites. + +Palette provisions workload clusters on Edge hosts from the Palette management console. Palette also provides end-to-end cluster management through scaling, upgrades, and reconfiguration operations. + + +Edge computing brings computing and data storage closer to the source, reducing latency and bandwidth issues that result from central computing and improving overall application performance. Industries such as retail, restaurants, manufacturing, oil and gas, cruise ships, healthcare, and 5G telecommunication providers typically have use cases that require content data and processing to be closer to their applications. + + +
+ +![A drawing of Edge architecture with humans interacting](/clusters_edge_edge-arch-drawing.png) + +
+ + + +The following are some highlights of the comprehensive Palette Edge Solution: + +* Centralized Full Stack Management + + +* Low touch, plug-and-play setup + + +* Support for AMD64 and ARM64 architectures + + +* Immutable update for Kubernetes and operating system (OS) with zero downtime + + +* Distro-agnostic Kubernetes and OS + + +* Secured remote troubleshooting + + +* Scalable from tens to thousands of locations + + +* Support for pre-provisioned and on-site device registration + + +Palette's Edge solution is designed for sites that typically have one or more small devices, such as [Intel NUC](https://www.intel.com/content/www/us/en/products/docs/boards-kits/nuc/what-is-nuc-article.html). An instance of Palette optimized for edge computing is installed in the device along with the operating system and Kubernetes. + + +:::info + +Edge is built on top of the open-source project [Kairos](https://kairos.io), which provides a tamper-proof immutable operating system with zero downtime rolling upgrade. + +::: + +Palette manages the installation and all the Day-2 activities, such as scaling, upgrades, and reconfiguration. + + + +## Get Started With Edge + + +To start with Edge, review the [architecture](architecture.md) and the [lifecycle](edge-native-lifecycle.md) resource to gain a high-level understanding of the Edge components and installation process. Next, become familiar with the [EdgeForge workflow](edgeforge-workflow/edgeforge-workflow.md). EdgeForge is the workflow you will use to customize the Edge host installation to match your environment and organizational needs - this includes creating the Edge artifacts for Edge hosts. The last step of the Edge deployment lifecycle is the deployment step. Review the [Deployment](site-deployment/site-deployment.md) guide to understand what it takes to deploy an Edge host. + + + +## Resources + +- [Edge Native Architecture](architecture.md) + + +- [Deployment Lifecycle](edge-native-lifecycle.md) + + +- [Install Configuration](edge-configuration/edge-configuration.md) + + +- [EdgeForge Workflow](edgeforge-workflow/edgeforge-workflow.md) + + +- [Site Deployment](site-deployment/site-deployment.md) + diff --git a/docs/docs-content/clusters/edge/edgeforge-workflow/_category_.json b/docs/docs-content/clusters/edge/edgeforge-workflow/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/clusters/edge/edgeforge-workflow/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/clusters/edge/edgeforge-workflow/build-artifacts.md b/docs/docs-content/clusters/edge/edgeforge-workflow/build-artifacts.md new file mode 100644 index 0000000000..93ee864cf3 --- /dev/null +++ b/docs/docs-content/clusters/edge/edgeforge-workflow/build-artifacts.md @@ -0,0 +1,268 @@ +--- +sidebar_label: "Build Edge Artifacts using a Content Bundle" +title: "Build Edge Artifacts using a Content Bundle" +description: "Learn how to build an Edge installer ISO using the Palette Edge CLI and the CanvOS utilities." +icon: "" +hide_table_of_contents: false +sidebar_position: 30 +tags: ["edge"] +--- + + +Palette's Edge solution supports creating Edge artifacts for edge devices deployed in a low internet bandwidth environment or an *air-gapped* environment. An air-gapped environment is a deployment site with no direct internet access. Using a content bundle, you can build Edge artifacts for installation in such environments. + + +A content bundle is an archive that includes the Operating System (OS) image, the Kubernetes distribution, the Network Container Interface (CNI), and all other dependencies specified in the cluster profiles you want to deploy to the Edge cluster. A content bundle provides several benefits, such as: + +- Software dependencies are pre-loaded into the installer image. + + +- Optimizes the deployment process for bandwidth-constrained environments or air-gapped environments. + + +- The ability to more granularly manage the software dependencies available to Edge clusters. + + +This how-to guide provides instructions for creating and using a content bundle to build the Edge artifacts. You will begin with installing a necessary tool, the Palette Edge CLI, on your development machine. The Palette Edge CLI is a command-line utility to interact with Palette and perform specific tasks in your development environment, such as creating a content bundle. Next, you will download all the software dependencies mentioned in your cluster profile using the Palette Edge CLI and create a content bundle. Lastly, when your content bundle is ready, you will use the CanvOS utility to embed the content bundle and user data into the Edge installer ISO image. + +The diagram below displays the overarching steps to build the Edge installer ISO using a content bundle. The diagram also highlights the primary prerequisites to create a content bundle. + +![An overarching diagram displaying the workflow in the current guide.](/clusters_edge_edge-forge-workflow_build-images_build-artifacts_overarching.png) + + +# Prerequisites + +:::caution + + This how-to guide extends the [Build Edge Artifacts](palette-canvos.md) workflow. Therefore, you must complete it before proceeding with the current guide. + +::: + + +To complete this guide, you will need the following items: + + +* A physical or virtual Linux machine with *AMD64* (also known as *x86_64*) processor architecture to build the Edge installer ISO image. You can issue the following command in the terminal to check your processor architecture. + + ```bash + uname -m + ``` + +* The Linux machine should have the following minimum hardware configuration: + - 4 CPU + - 8 GB memory + - 100 GB storage. The actual storage will depend on the size of the content bundle you will use to build the Edge installer ISO image. + + +* You must have completed the [Build Edge Artifacts](palette-canvos.md) guide to build the provider images and create a cluster profile referencing one of the provider images. + + +* A Spectro Cloud API key. Later in this guide, you will use this API key to authenticate the Palette Edge CLI utility and allow it to interact with Palette. Refer to the [User Authentication](../../../user-management/user-authentication.md/#api-key) guide to create a new API key. + + +## Instructions + +Use the following instructions on your Linux machine, which this guide refers to as the development environment. + + +1. Visit the [Downloads](../../../spectro-downloads#palette-edge-cli) page and download the latest Palette Edge CLI. You can download the Palette Edge CLI by clicking on the available URL or using the download URL in the following command. Replace the `[PALETTE-EDGE-BINARY-URL]` placeholder with the download URL. +
+ + ```bash + curl [PALETTE-EDGE-BINARY-URL] --output palette-edge + ``` + + +2. Open a terminal session and navigate to the folder where you have downloaded the palette-edge binary. Set the executable permissions for the palette-edge binary by issuing the following command. +
+ + ```bash + chmod 755 palette-edge + ``` + + +3. Use the following command to move the palette-edge binary to the **/usr/local/bin** directory to make the binary available in your system $PATH. This will allow you to issue the `palette-edge` command from any directory in your development environment. +
+ + ```bash + mv palette-edge /usr/local/bin + ``` + + +4. Verify the installation of the Palette Edge CLI by issuing the following command. The output will display information about the currently supported OS and Kubernetes distributions. +
+ + ```bash + palette-edge show + ``` + + ```hideClipboard bash + # Sample output + ┌────────────────────────────────────────────────────────────────────────┐ + | OS Flavor | Description | Base Image URI | + | opensuse-leap | Opensuse Leap 15.4 | quay.io/kairos/core-opensuse-leap | + | ubuntu-20 | Ubuntu 20.4 LTS | quay.io/kairos/core-ubuntu-20-lts | + | ubuntu-22 | Ubuntu 22.4 LTS | quay.io/kairos/core-ubuntu-22-lts | + └────────────────────────────────────────────────────────────────────────┘ + ┌─────────────────────────────────────────────────────────────────────────────────────────────┐ + | K8S Flavor | Description | Supported Versions | + | k3s | Rancher K3s | 1.25.2-k3s1,1.24.6-k3s1,1.23.12-k3s1,1.22.15-k3s1 | + | kubeadm | Kubernetes kubeadm | 1.25.2,1.24.6,1.23.12,1.22.15 | + | rke2 | Rancher RK2 | 1.25.2-rke2r1,1.24.6-rke2r1,1.23.12-rke2r1,1.22.15-rke2r1 | + └─────────────────────────────────────────────────────────────────────────────────────────────┘ + ┌─────────────────────────────────┐ + | Component | Version | + | Spectro Agent Version | v3.4.3 | + | Kairos Version | v2.0.3 | + └─────────────────────────────────┘ + ``` +
+ + + +5. Set the Spectro Cloud API key as an environment variable by issuing the following command. Replace the `[USE-YOUR-API-KEY_HERE]` placeholder with your API key. The Palette Edge CLI will use this API key to authenticate with Palette. Once authenticated, the Palette Edge CLI can interact with your Palette account. +
+ + ```bash + export API_KEY=[USE-YOUR-API-KEY_HERE] + ``` + + +6. Log in to [Palette](https://console.spectrocloud.com). + + +7. Copy your Palette project ID. You will use this ID in a later step. The project ID is on the top-right corner of your Palette project overview page. Use the following screenshot to help you find your project ID. + + ![A screenshot highlighting the project ID in Palette project overview page](/clusters_edge_edge-forge-workflow_build-images_build-project_id.png) + + +8. Navigate to the left **Main Menu** and select **Profiles**. + + +9. Select the cluster profile you want to include in the content bundle. Click on the target cluster profile to access its details page. + + +10. Examine the cluster details page URL. The cluster details page URL follows the `[Palette-URL]/projects/[PROJECT-ID]/profiles/cluster/[CLUSTER-PROFILE-ID]` syntax. The cluster details page URL has your project ID and the cluster profile ID. For example, the screenshot below highlights the project ID and the cluster profile ID in a cluster details page URL. + + ![A screenshot highlighting the cluster profile ID and project ID in the URL of the cluster details page.](/clusters_edge_edge-forge-workflow_build-images_build-artifacts_url.png) + + + +11. Copy the cluster profile ID from the cluster details page URL for the next step. + + +12. Switch back to your development environment, and set the project ID as an environment variable by issuing the following command. Replace the `[USE-YOUR-PROJECT-ID_HERE]` placeholder with your project ID. +
+ + ```bash + export PROJECT_ID=[USE-YOUR-PROJECT-ID_HERE] + ``` + + +13. Set the cluster profile ID as an environment variable using the following command. Replace the `[USE-YOUR-PROFILE-ID_HERE]` placeholder with your cluster profile ID. The Palette Edge CLI uses the cluster profile ID to reference the correct cluster profile and download all its software dependencies. +
+ + ```bash + export PROFILE_ID=[USE-YOUR-PROFILE-ID_HERE] + ``` + + +14. Issue the command below to create the content bundle. The `build` command uses the following flags: + + |**Command Flag**|**Value**| + |---|---| + |`--api-key`|Spectro Cloud API key| + |`--project-id`|Palette project ID| + |`--cluster-profile-ids`|Cluster profile IDs. If you want to include multiple cluster profiles in the content bundle, add multiple cluster profile IDs separated by a comma.| + |`--palette-endpoint`|Palette API endpoint. The default Palette API endpoint is `api.spectrocloud.com`| + |`--outfile`|Path to write the final content bundle. | + You can issue `palette-edge build --help` to know about other available flags. +
+ + ```bash + palette-edge build --api-key $API_KEY \ + --project-id $PROJECT_ID \ + --cluster-profile-ids $PROFILE_ID \ + --palette-endpoint api.spectrocloud.com \ + --outfile content + ``` + + + +15. Use the command below to list all files in the current directory to verify that you created the content bundle successfully. The content bundle will have the following naming convention, `content-[random-string]`, for example, **content-8e61a9e5**. +
+ + ```bash + ls -al + ``` + + +16. List the files in the content bundle folder using the following command. The output will display the compressed core and app content files. +
+ + ```bash + ls -al content-*/ + ``` + ```hideClipboard bash + # Sample output + total 3981104 + -rw-rw-r-- 1 jb jb 1598552722 Jul 26 18:20 app-content-8e61a9e5.zst + -rw-rw-r-- 1 jb jb 2478086360 Jul 26 18:20 core-content-8e61a9e5.zst + ``` + + +17. Issue the following command to build the Edge artifacts with your content bundle. The `+iso` option specifies the build target. This command will generate an ISO image from the content bundle and other configurations you have specified in the **.arg** and **user-data** files. +
+ + ```bash + sudo ./earthly.sh +iso + ``` + This command may take up to 15-20 minutes to finish depending on the resources of the host machine. + + +## Validate + +List the Edge installer ISO and checksum by issuing the following command from the **CanvOS/** directory. +
+ +```shell +ls build/ +``` + +```hideClipboard shell +palette-edge-installer.iso +palette-edge-installer.iso.sha256 +``` +
+ +To validate, you can prepare an edge device using the Edge installer ISO. You can follow the [Prepare Edge Host for Installation](../site-deployment/stage.md) guide if you prepare a bare metal machine or a VMware VM as a host. Below are the high-level steps for your reference: +
+ +1. Create a bootable USB flash drive using any third-party software. Most software that creates a bootable USB drive will validate the ISO image. + + +2. Select a physical or virtual host machine to emulate as an edge device. Enable (Dynamic Host Configuration Protocol) DHCP on the host before proceeding with the installation process. Enabling DHCP is necessary for the device to obtain an IP address automatically from the network. + + +3. Flash the edge device with a bootable USB drive. + + +4. The last step is to power on the edge device and start the installation process. For more information, refer to the [Perform Site Install](../site-deployment/site-installation/site-installation.md) documentation. +
+ +## Next Steps + +Palette's Edge solution allows you to create Edge artifacts using a content bundle for edge devices deployed in low internet bandwidth or air-gapped environments. You created a content bundle using the Palette Edge CLI in this guide. Next, you used the CanvOS utility to embed the content bundle and user data into an Edge installer ISO. + + + +As the next step, we recommend you check out the end-to-end tutorial, [Deploy an Edge Cluster on VMware](../site-deployment/deploy-cluster.md). The tutorial provides a detailed walkthrough on deploying an Edge cluster in a VMware environment. + + +Check out the reference resources below to learn more about preparing an Edge host. + + +- [Prepare Edge Host for Installation](../site-deployment/stage.md) + + +- [Perform Site Install](../site-deployment/site-installation/site-installation.md) \ No newline at end of file diff --git a/docs/docs-content/clusters/edge/edgeforge-workflow/build-content-bundle.md b/docs/docs-content/clusters/edge/edgeforge-workflow/build-content-bundle.md new file mode 100644 index 0000000000..6afa346aee --- /dev/null +++ b/docs/docs-content/clusters/edge/edgeforge-workflow/build-content-bundle.md @@ -0,0 +1,138 @@ +--- +sidebar_label: "Build Content Bundle" +title: "Build Content Bundle" +description: "Learn about building your edge content bundles in order to optimize cluster deployments" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["edge"] +--- + + +Content bundles are archives of all the required container images required for a cluster profiles. The content bundle includes Helm charts, Packs, and manifest files needed to deploy your Edge host cluster. In addition to core container images, the content bundle can include artifacts from your applications that you wish to deploy to the Edge cluster. [Cluster Profiles](../../../cluster-profiles/cluster-profiles.md) are the primary source for building these content bundles. + + +:::caution + +Currently, the content bundles include Helm charts and Packs. However, keep in mind that the container images of the Helm Charts and Packs are extracted and predeployed into the container runtime [containerd](https://containerd.io/) for optimization. In the future, Palette will include a built-in OCI registry to host Helm Charts and other artifacts to avoid downloading these from the internet if included in a content bundle + +::: + + +## Benefits of Content Bundle + +Creating a content bundle provides several benefits that may address common use cases related to deploying Edge hosts. + + +* Preloading required software dependencies removes the need to download assets during cluster deployment. + + +* If connectivity to a container registry is unstable or bandwidth limited, preloading the software dependencies can address these concerns. + + +* Preloading required software dependencies optimizes the Edge host deployment process when the Edge host is in an internet bandwidth-constrained environment. + + +* Organizations that want better control over the software used by their Edge hosts can use content bundles to ensure that only approved software is consumed. + + +## Prerequisites + +- Linux Machine (Physical or VM) with an AMD64 architecture. + + +- Palette API key. Refer to the [User Authentication](../../../user-management/user-authentication.md/#api-key) resource to learn how to create a Palette API key. + + +- An Edge Native cluster profile. Refer to [Create Edge Native Cluster Profile](../site-deployment/model-profile.md) guide to learn how to create an Edge Native cluster profile. You may also have other add-on profiles that you wish to attach to your cluster. + + +- Content tags in your profiles highlight the exact location of container images to be downloaded. + +## Create Content Bundle + +1. Download Palette Edge Content CLI and assign the executable bit to the CLI. +
+ + ```shell + VERSION=4.0.2 + wget https://software.spectrocloud.com/stylus/v$VERSION/cli/linux/palette-edge + chmod +x palette-edge + ``` + +2. Log in to [Palette](https://console.spectrocloud.com). + + +3. Select the project you want to deploy the Edge host to and copy down the **Project ID**. +You can find the project id at the top right side corner of the landing page below the **User drop-down Menu**. + + +4. Navigate to the left **Main Menu** and select **Profiles**. + + +5. Use the **Cloud Types drop-down Menu** and select **Edge Native**. + + +6. Click on the cluster profile you want to include in the content bundle. + + +7. You can find the cluster profile ID by reviewing the URL of the current page. The cluster profile ID is the last value in the URL. Repeat this step for all the cluster profiles you want to specify in the content bundle. + +
+ + ```text + https://console.spectrocloud.com/projects/yourProjectId/profiles/cluster/ + ``` + +8. Navigate back to your terminal window and issue the following command to create the content bundle. Replace the placeholder values with your actual values. + +
+ + :::info + + There are several Spectro Cloud CLI flags that you can use to customize the content bundle. Use the command `./palette-edge build --help` to learn more about the available flags. + + ::: + +
+ + ```shell + ./palette-edge build --api-key \ + --project-id \ + --cluster-profile-ids \ + --palette-endpoint \ + --outfile .tar \ + --iso + ``` + + ```hideClipboard shell + # Output + INFO[0000] getting hubble export for build + INFO[0000] Fetching latest version for service 'stylus' + INFO[0000] stylus version: 3.4.3 + INFO[0000] Fetching manifest for service stylus and version 3.4.3 for action resources + INFO[0000] Fetching manifest of service stylus and version '3.4.3' for action resources + INFO[0000] Fetching manifest from service stylus and version '3.4.3' for action resources with file name images.yaml + INFO[0000] Get manifest with file name: images.yaml + INFO[0000] Get manifest with file content: image: gcr.io/spectro-images-public/stylus:v3.4.3 + INFO[0002] successfully pulled image : gcr.io/spectro-images-public/calico/cni:v3.25.0 + ... + ... + INFO[0143] Total translation table size: 0 + INFO[0143] Total rockridge attributes bytes: 272 + INFO[0143] Total directory bytes: 0 + INFO[0143] Path table size(bytes): 10 + INFO[0143] Max brk space used 0 + INFO[0143] 872027 extents written (1703 MB) + INFO[0144] ISO file created successfully + ``` + +The result is a content bundle that you can use to preload into your installer. Alternatively, you can use the ISO version of the content bundle and transfer it to a USB drive to be used separately at the time of Edge host installation. + +## Validate + +You can validate that the ISO image has not been corrupted by attempting to flash a bootable device. Most software that creates a bootable device will validate the ISO image before the flash process. + + +## Next Steps + +Your next step is to build the Edge artifacts so that you can deploy an Edge host. To create an Edge artifacts, check out the [Build Images](../edgeforge-workflow/palette-canvos.md) guide. diff --git a/docs/docs-content/clusters/edge/edgeforge-workflow/edgeforge-workflow.md b/docs/docs-content/clusters/edge/edgeforge-workflow/edgeforge-workflow.md new file mode 100644 index 0000000000..b7da0fd25c --- /dev/null +++ b/docs/docs-content/clusters/edge/edgeforge-workflow/edgeforge-workflow.md @@ -0,0 +1,108 @@ +--- +sidebar_label: "EdgeForge Workflow" +title: "EdgeForge Workflow" +description: "Learn how to build your own Edge artifacts customized to your specific needs." +hide_table_of_contents: false +tags: ["edge"] +--- + + +*EdgeForge* is the process or workflow of preparing an Edge host with all the required components and dependencies. The EdgeForge workflow contains several steps and key elements that you must complete to ensure the Edge host is ready for a successful site deployment. + +EdgeForge contains three critical components. + +* Edge Installer ISO. + + +* Edge Host Agent Container Image. + + +* Edge Provider Container Images. + + + +Each component plays a critical role in the [lifecycle](../edge-native-lifecycle.md) of an Edge deployment. Review the [Edge Artifacts](../edgeforge-workflow/edgeforge-workflow.md#edge-artifacts) section to learn more about each component. + +![A diagram that displays the relationship between the three components and how they relate to an Edge host](/clusters_edge-forge-workflow_edgeforge-workflow_components-diagram.png) + + +## Get Started + + +To start building a custom Edge artifact, use the [Build Edge Artifacts](palette-canvos.md) guide. + +
+ +## Edge Artifacts + +### Edge Installer ISO + +An ISO file that bootstraps the installation is created in the EdgeForge process. The ISO image contains the Edge Installer that installs the Palette Edge host agent and metadata to perform the initial installation. + +
+ +![A diagram breaking up the internal components of the ISO image](/clusters_edge_edgeforge-workflow_iso-diagram.png) + +### Edge Host Agent Container Image + +The Edge host agent container image contains the Palette Edge host agent. The agent is responsible for Day-2 operations and management of the Edge host. The Edge host agent also provides ongoing support during cluster runtime. + +
+ +### Edge Provider Container Images + +These are [Kairos](https://kairos.io/)-based container images for each supported Operating System (OS) and Kubernetes combination. These container images are downloaded during the installation by the Edge Installer and converted to disk images for the system to boot into. + +Palette provides these artifacts out-of-the-box. All the container images are hosted in Palette's public container registries, or a private self-hosted OCI registry and automatically downloaded during installation. You can use the default Palette container registries to familiarize yourself with the installation process. However, in a typical production scenario, you would need to customize these artifacts to suit your specific needs or perform some [content bundle](../edgeforge-workflow/build-content-bundle.md) optimization. + +
+ + ![A diagram breaking up the internal components of the Edge Provider container images](/clusters_edge_edgeforge-workflow_provider-diagram.png) + + +:::info + +You can specify a custom registry for the Edge Installer to use during installation with the user data parameter `registryCredentials`. Refer to the [Installer Configuration](../edge-configuration/installer-reference.md#external-registry) reference resource for more details. + +::: + +## Deployment Scenarios + +The Edge Installer supports various deployment scenarios. You can customize your Edge host deployment by using the Edge Installer configuration user data, creating content bundles, and creating a custom Edge artifact. Below are a few common scenarios that organizations encounter when deploying an Edge host that requires customization. If you have a similar scenario, use the CLIs to help you with the customization. + +
+ +- **Additional Packages**: +You may need to install additional OS packages for your specific needs, such as an NVIDIA driver or a network package essential for your hardware to establish an outgoing network connection. These additional OS packages would need to be added to the Edge Installer and the Edge Provider images. + + +- **Installer OS Restriction**: +Palette's out-of-the-box Edge Installer is based on the OpenSUSE OS. If you want to install an Ubuntu or an RHEL-based Edge cluster, you may need an Edge Installer based on another OS. + + +- **Optimize Bandwidth**: +In your Edge environments, you may have internet connectivity but limited bandwidth. You can optimize the installation process by embedding all the required components such as the Edge Host Container Image, the Edge Provider Container Images, and content bundles into the Edge Installer. By embedding the required components in the Edge Installer, you remove the need to download the components during installation. + + +- **Bootstrap Install Configuration**: +You can embed the Edge Installer configuration user data into the Edge Installer. This removes the need to create separate user data uploaded as an ISO through a USB drive. Check out the [Prepare User Data](/clusters/edge/edgeforge-workflow/prepare-user-data) guide to learn more about user data and when to use multiple user data files. + + +- **Bring Your Own OS (BYOOS)**: +For environments that require a different runtime OS, you can specify another OS through the [BYOOS](/integrations/byoos) option. Follow the instructions in the [Build Edge Artifacts](/clusters/edge/edgeforge-workflow/palette-canvos) guide to learn more about how you can customize the OS used in an Edge deployment. + +
+ +## Resources + + +- [Build Edge Artifacts](palette-canvos.md) + + +- [Build Preloaded Content Bundles](build-content-bundle.md) + + +- [Build Edge Artifacts using a Content Bundle](build-artifacts.md) + + +- [Prepare User Data](prepare-user-data.md) \ No newline at end of file diff --git a/docs/docs-content/clusters/edge/edgeforge-workflow/palette-canvos.md b/docs/docs-content/clusters/edge/edgeforge-workflow/palette-canvos.md new file mode 100644 index 0000000000..4f1615b799 --- /dev/null +++ b/docs/docs-content/clusters/edge/edgeforge-workflow/palette-canvos.md @@ -0,0 +1,803 @@ +--- +sidebar_label: "Build Edge Artifacts" +title: "Build Edge Artifacts" +description: "Learn how to build Edge artifacts, such as the Edge Installer ISO and provider images using Spectro Cloud's CanvOS utility." +icon: "" +sidebar_position: 10 +toc_min_heading_level: 2 +toc_max_heading_level: 2 +hide_table_of_contents: false +tags: ["edge"] +--- + + +Palette's Edge solution requires Edge hosts to be ready with the required dependencies and [user data](../edge-configuration/installer-reference.md) configurations before deploying a Kubernetes cluster. An Edge host requires the following artifacts to prepare for successful cluster deployment: + +* **Edge installer ISO image** - This bootable ISO image installs the necessary dependencies and configurations on a bare host machine. During installation, the host machine will boot from the Edge installer ISO, partition the disk, copy the image content to the disk, install the Palette Edge host agent and metadata, and perform several configuration steps. These configuration steps include registering the host with Palette, setting user privileges, and configuring network or security settings. + +* **Provider Images** - These are [Kairos](https://kairos.io/)-based images containing the OS and the desired Kubernetes versions. These images install an immutable Operating System (OS) and software dependencies compatible with a specific Kubernetes version at runtime, i.e., during the cluster deployment. A provider image is used in the OS and the Kubernetes layer when creating a cluster profile. + + +In this guide, you will use the utility, [CanvOS](https://github.com/spectrocloud/CanvOS/blob/main/README.md), to build an Edge installer ISO image and provider images for all the Palette-supported Kubernetes versions. The utility builds multiple provider images, so you can use either one that matches the desired Kubernetes version you want to use with your cluster profile. + +:::info + +CanvOS is a utility that helps you build Edge artifacts. CanvOS is part of the EdgeForge workflow. + +::: + + +The diagram below shows the high-level steps to building the Edge artifacts and pushing the provider images to an image registry. + + +![Overarching diagram showing the workflow in the current guide.](/tutorials/palette-canvos/clusters_edge_palette-canvos_artifacts.png) + + +This guide presents two workflows - Basic and Advanced. + +The basic workflow has minimal customizations and offers a quick start to build Edge artifacts. This workflow builds an Ubuntu based Edge installer ISO and provider images. You will also push the provider images to the default image registry, [ttl.sh](https://ttl.sh/). + +The advanced workflow uses more customization options. This workflow builds an openSUSE based Edge installer ISO and provider images. You will push the provider images to your Docker Hub image registry. + +You can follow either of the workflows below that suits your use case. + + + + + + + +### Prerequisites + +To complete this basic guide, you will need the following items: + + +* A physical or virtual Linux machine with *AMD64* (also known as *x86_64*) processor architecture to build the Edge artifacts. You can issue the following command in the terminal to check your processor architecture. + + ```bash + uname -m + ``` + +* Minimum hardware configuration of the Linux machine: + - 4 CPU + - 8 GB memory + - 50 GB storage + + +* [Git](https://cli.github.com/manual/installation). You can ensure git installation by issuing the `git --version` command. + + +* [Docker Engine](https://docs.docker.com/engine/install/) version 18.09.x or later. You can use the `docker --version` command to view the existing Docker version. You should have root-level or `sudo` privileges on your Linux machine to create privileged containers. + + +* A [Spectro Cloud](https://console.spectrocloud.com) account. If you have not signed up, you can sign up for a [free trial](https://www.spectrocloud.com/free-tier/). + + +* Palette registration token for pairing Edge hosts with Palette. You will need tenant admin access to Palette to generate a new registration token. For detailed instructions, refer to the [Create Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide. + + + +### Instructions + +Use the following instructions on your Linux machine to create all the required Edge artifacts with minimal customization. +
+ +1. Check out the [CanvOS](https://github.com/spectrocloud/CanvOS) GitHub repository containing the starter code. + +
+ + ```bash + git clone https://github.com/spectrocloud/CanvOS.git + ``` + + + + + +2. Change to the **CanvOS/** directory. +
+ + ```bash + cd CanvOS + ``` + + +3. View the available [git tag](https://github.com/spectrocloud/CanvOS/tags). +
+ + ```bash + git tag + ``` + + +4. Check out the newest available tag. This guide uses **v3.4.3** tag as an example. +
+ + ```shell + git checkout v3.4.3 + ``` + + +5. Review the files relevant for this guide. + - **.arg.template** - A sample **.arg** file that defines arguments to use during the build process. + - **Dockerfile** - Embeds the arguments and other configurations in the image. + - **Earthfile** - Contains a series of commands to create target artifacts. + - **earthly.sh** - Script to invoke the Earthfile, and generate target artifacts. + - **user-data.template** - A sample user-data file. +
+ + +6. Issue the command below to assign an image tag value that will be used when creating the provider images. This guide uses the value `palette-learn` as an example. However, you can assign any lowercase and alphanumeric string to the `CUSTOM_TAG` argument. + + ```bash + export CUSTOM_TAG=palette-learn + ``` +
+ +7. Issue the command below to create the **.arg** file containing the custom tag. The remaining arguments in the **.arg** file will use the default values. For example, `ubuntu` is the default operating system, `demo` is the default tag, and [ttl.sh](https://ttl.sh/) is the default image registry. Refer to the existing **.arg.template** file in the current directory or the [README](https://github.com/spectrocloud/CanvOS#readme) to learn more about the available customizable arguments. + + :::info + + The default ttl.sh image registry is free and does not require a sign-up. Images pushed to ttl.sh are ephemeral and will expire after the 24 hrs time limit. Should you need to use a different image registry, refer to the Advanced workflow in the [Build Edge Artifacts](palette-canvos.md) guide. + + ::: + + Using the arguments defined in the **.arg** file, the final provider images you generate will have the following naming convention, `[IMAGE_REGISTRY]/[IMAGE_REPO]:[CUSTOM_TAG]`. For example, one of the provider images will be `ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-demo`. + + ```bash + cat << EOF > .arg + CUSTOM_TAG=$CUSTOM_TAG + IMAGE_REGISTRY=ttl.sh + OS_DISTRIBUTION=ubuntu + IMAGE_REPO=ubuntu + OS_VERSION=22 + K8S_DISTRIBUTION=k3s + ISO_NAME=palette-edge-installer + PE_VERSION=$(git describe --abbrev=0 --tags) + ARCH=amd64 + EOF + ``` + + View the newly created file to ensure the customized arguments are set correctly. + + ```bash + cat .arg + ``` + + +8. Issue the command below to save your tenant registration token to an environment variable. Replace `[your_token_here]` with your actual registration token. + + ```bash + export token=[your_token_here] + ``` + + +9. Use the following command to create the **user-data** file containing the tenant registration token. Also, you can click on the *Points of Interest* numbers below to learn more about the main attributes relevant to this example. + + + + ```shell + cat << EOF > user-data + #cloud-config + stylus: + site: + edgeHostToken: $token + install: + poweroff: true + users: + - name: kairos + passwd: kairos + EOF + ``` + + + +
+ + View the newly created user data file to ensure the token is set correctly. + + ```bash + cat user-data + ``` +
+ +10. The CanvOS utility uses [Earthly](https://earthly.dev/) to build the target artifacts. Issue the following command to start the build process. + + ```bash + sudo ./earthly.sh +build-all-images + ``` + + ```bash hideClipboard + ===================== Earthly Build SUCCESS ===================== + Share your logs with an Earthly account (experimental)! Register for one at https://ci.earthly.dev. + ``` + + :::info + + If you plan to build Edge artifacts using a content bundle, use the `+build-provider-images` option instead of the `+build-all-images` option in the command above. The command, `sudo ./earthly.sh +build-provider-images`, will build the provider images but not the Edge installer ISO. + + ::: + + This command may take up to 15-20 minutes to finish depending on the resources of the host machine. Upon completion, the command will display the manifest, as shown in the example below, that you will use in your cluster profile later in this tutorial. Note that the `system.xxxxx` attribute values in the manifest example are the same as what you defined earlier in the **.arg** file. + + Copy and save the output attributes in a notepad or clipboard to use later in your cluster profile. + + ```bash + pack: + content: + images: + - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" + options: + system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" + system.registry: ttl.sh + system.repo: ubuntu + system.k8sDistribution: k3s + system.osName: ubuntu + system.peVersion: v3.4.3 + system.customTag: demo + system.osVersion: 22 + ``` +
+ + +11. List the Docker images to review the provider images created. By default, provider images for all the Palette's Edge-supported Kubernetes versions are created. You can identify the provider images by reviewing the image tag value you used in the **.arg** file's `CUSTOM_TAG` argument. +
+ + ```shell + docker images --filter=reference='*/*:*palette-learn' + ``` + + ```hideClipboard bash {3,4} + # Output + REPOSITORY TAG IMAGE ID CREATED SIZE + ttl.sh/ubuntu k3s-1.25.2-v3.4.3-palette-learn b3c4956ccc0a 6 minutes ago 2.49GB + ttl.sh/ubuntu k3s-1.24.6-v3.4.3-palette-learn fe1486da25df 6 minutes ago 2.49GB + ``` +
+ + +12. To use the provider images in your cluster profile, push them to the image registry mentioned in the **.arg** file. The current example uses the [ttl.sh](https://ttl.sh/) image registry. This image registry is free to use and does not require a sign-up. Images pushed to *ttl.sh* are ephemeral and will expire after the 24 hrs time limit. Use the following commands to push the provider images to the *ttl.sh* image registry. +
+ + ```bash + docker push ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-palette-learn + docker push ttl.sh/ubuntu:k3s-1.24.6-v3.4.3-palette-learn + ``` +
+ + :::caution + + As a reminder, [ttl.sh](https://ttl.sh/) is a short-lived image registry. If you do not use these provider images in your cluster profile within 24 hours of pushing to *ttl.sh*, they will expire and must be re-pushed. Refer to the Advanced workflow in the current guide to learn how to use another registry, such as Docker Hub, and tag the docker images accordingly. + + ::: +
+ + +13. After pushing the provider images to the image registry, open a web browser and log in to [Palette](https://console.spectrocloud.com). Ensure you are in the **Default** project scope before creating a cluster profile. + + +14. Navigate to the left **Main Menu** and select **Profiles**. Click on the **Add Cluster Profile** button, and fill out the required basic information fields to create a cluster profile for Edge. + + +15. Add the following [BYOS Edge OS](../../../integrations/byoos.md) pack to the OS layer in the **Profile Layers** section. + + |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| + |---|---|---|---| + |OS|Public Repo|BYOS Edge OS|`1.0.0`| + + +16. Replace the the cluster profile's BYOOS pack manifest with the following custom manifest so that the cluster profile can pull the provider image from the ttl.sh image registry. + + The `system.xxxxx` attribute values below refer to the arguments defined in the **.arg** file. If you modified the arguments in the **.arg** file, you must modify the attribute values below accordingly. +
+ + ```yaml + pack: + content: + images: + - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" + options: + system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" + system.registry: ttl.sh + system.repo: ubuntu + system.k8sDistribution: k3s + system.osName: ubuntu + system.peVersion: v3.4.3 + system.customTag: demo + system.osVersion: 22 + ``` + The screenshot below displays how to reference a provider image in the BYOOS pack of your cluster profile. + + ![Screenshot of a sample cluster profile's OS layer ](/tutorials/palette-canvos/clusters_edge_palette-canvos_edit_profile.png) +
+ + :::info + + The BYOOS pack's `system.uri` attribute references the Kubernetes version selected in the cluster profile by using the `{{ .spectro.system.kubernetes.version }}` [macro](../../cluster-management/macros.md). This is how the provider images you created and pushed to a registry are tied to the OS and Kubernetes version you selected in the **.arg** file. + + ::: + +17. Add the following **Palette Optimized K3s** pack to the Kubernetes layer of your cluster profile. Select the k3s version 1.25.x because earlier in this how-to guide, you pushed a provider image compatible with k3s v1.25.2 to the ttl.sh image registry. + + |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| + |---|---|---|---| + |Kubernetes|Public Repo|Palette Optimized k3s|`1.25.x`| + + +18. Add the network layer to your cluster profile, and choose a Container Network Interface (CNI) pack that best fits your needs, such as Calico, Flannel, Cilium, or Custom CNI. For example, you can add the following network layer. This step completes the core infrastructure layers in the cluster profile. + + |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| + |---|---|---|---| + |Network|Public Repo|Calico|`3.25.x`| + + +19. Add add-on layers and manifests to your cluster profile per your requirements. + + +20. If there are no errors or compatibility issues, Palette displays the newly created complete cluster profile for review. Verify the layers you added, and finish creating the cluster profile. +
+ + +### Validate +List the Edge installer ISO image and checksum by issuing the following command from the **CanvOS/** directory. +
+ +```shell +ls build/ +``` + +```hideClipboard shell +# Output +palette-edge-installer.iso +palette-edge-installer.iso.sha256 +``` + +You can validate the ISO image by creating a bootable USB flash drive using any third-party software and attempting to flash a bare host machine. Most software that creates a bootable USB drive will validate the ISO image. Here, the flash process means installing the necessary tools and configurations on a host machine. + + +
+ + + + +### Prerequisites + +To complete this advanced guide, you will need the following items: +
+ +* A physical or virtual Linux machine with *AMD64* (also known as *x86_64*) processor architecture to build the Edge artifacts. You can issue the following command in the terminal to check your processor architecture. +
+ + ```bash + uname -m + ``` + +* Minimum hardware configuration of the Linux machine: + - 4 CPU + - 8 GB memory + - 50 GB storage + + +* [Git](https://cli.github.com/manual/installation). You can ensure git installation by issuing the `git --version` command. + + +* [Docker Engine](https://docs.docker.com/engine/install/) version 18.09.x or later. You can use the `docker --version` command to view the existing Docker version. You should have root-level or `sudo` privileges on your Linux machine to create privileged containers. + + +* A [Spectro Cloud](https://console.spectrocloud.com) account. If you have not signed up, you can sign up for a [free trial](https://www.spectrocloud.com/free-tier/). + + +* Palette registration token for pairing Edge hosts with Palette. You will need tenant admin access to Palette to generate a new registration token. For detailed instructions, refer to the [Create Registration Token](/clusters/edge/site-deployment/site-installation/create-registration-token) guide. + + +* An account with [Docker Hub](https://hub.docker.com/). If you do not have an account with Docker Hub already, refer to the [Create an account](https://docs.docker.com/docker-id/) page for signing-up instructions. +
+ + :::info + + This guide uses Docker Hub as an example. You can use any other image registry that suit your requirements. + + ::: +
+ +* A public repository named `opensuse-leap` in your image registry. Refer to the [Create a repository](https://docs.docker.com/docker-hub/repos/create/#create-a-repository) instructions for creating a Docker Hub repository and setting the repository's visibility to `public`. + + +### Instructions + +Use the following instructions on your Linux machine to customize the arguments and Dockerfile and then create all the required Edge artifacts. + +
+ +1. Check out the [CanvOS](https://github.com/spectrocloud/CanvOS.git) GitHub repository containing the starter code. +
+ + ```bash + git clone https://github.com/spectrocloud/CanvOS.git + ``` + + +2. Change to the **CanvOS/** directory. +
+ + ```bash + cd CanvOS + ``` + + +3. View the available [git tag](https://github.com/spectrocloud/CanvOS/tags). +
+ + ```bash + git tag + ``` + + +4. Check out the newest available tag. This guide uses **v3.4.3** tag as an example. +
+ + ```shell + git checkout v3.4.3 + ``` +
+ +5. Review the files relevant for this guide. + - **.arg.template** - A sample **.arg** file that defines arguments to use during the build process. + - **Dockerfile** - Embeds the arguments and other configurations in the image. + - **Earthfile** - Contains a series of commands to create target artifacts. + - **earthly.sh** - Script to invoke the Earthfile, and generate target artifacts. + - **user-data.template** - A sample user-data file. +
+ + +6. Review the **.arg** file containing the customizable arguments, such as image tag, image registry, image repository, and OS distribution. The table below shows all arguments, their default value, and allowed values. + + |**Argument**|**Description**|**Default Value**| **Allowed Values** | + |---|---|---|---| + |`CUSTOM_TAG`|Tag for the provider images|demo|Lowercase alphanumeric string without spaces.| + |`IMAGE_REGISTRY`|Image registry name|ttl.sh|Your image registry hostname, without `http` or `https`
Example: docker.io/spectrocloud| + |`OS_DISTRIBUTION`|OS Distribution |ubuntu | ubuntu, opensuse-leap| + |`IMAGE_REPO`|Image repository name.
It is the same as the OS distribution.|`$OS_DISTRIBUTION`|Your image repository name.| + |`OS_VERSION`|OS version, only applies to Ubuntu |22| 20, 22| + |`K8S_DISTRIBUTION`|Kubernetes Distribution |k3s| k3s, rke2, kubeadm | + |`ISO_NAME`|Name of the Installer ISO|palette-edge-installer|Lowercase alphanumeric string without spaces. The charaters `-` and `_` are allowed. | + | `PE_VERSION` | The Palette Edge installer version. This should match the tag checked out from Git. This is an advanced setting. Do not modify unless told to do so. | String | Git tags. | +| `platform` | Type of platform to use for the build. Used for cross platform builds (arm64 to amd64 as example). | string | `linux/amd64` | + + Next, you will customize these arguments to use during the build process. +
+ +7. Issue the command below to assign an image tag value that will be used when creating the provider images. This guide uses the value `palette-learn` as an example. However, you can assign any lowercase and alphanumeric string to the `CUSTOM_TAG` argument. +
+ + ```bash + export CUSTOM_TAG=palette-learn + ``` +
+ +8. Use the command below to save the Docker Hub image registry hostname in the `IMAGE_REGISTRY` argument. Before you execute the command, replace `[DOCKER-ID]` in the declaration below with your Docker ID. Your image registry hostname must comply with standard DNS rules and may not contain underscores. +
+ + ```bash + export IMAGE_REGISTRY=docker.io/[DOCKER-ID] # Follows [HOST]/[DOCKER-ID] syntax. Example: docker.io/spectrocloud + ``` +
+ +9. Issue the following command to use the openSUSE Leap OS distribution. +
+ + ```bash + export OS_DISTRIBUTION=opensuse-leap + ``` +
+ +10. Issue the command below to create the **.arg** file containing the custom tag, Docker Hub image registry hostname, and openSUSE Leap OS distribution. The **.arg** file uses the default values for the remaining arguments. You can refer to the existing **.arg.template** file to learn more about the available customizable arguments. +
+ + ```bash + cat << EOF > .arg + IMAGE_REGISTRY=$IMAGE_REGISTRY + OS_DISTRIBUTION=$OS_DISTRIBUTION + IMAGE_REPO=$OS_DISTRIBUTION + CUSTOM_TAG=$CUSTOM_TAG + K8S_DISTRIBUTION=k3s + ISO_NAME=palette-edge-installer + PE_VERSION=$(git describe --abbrev=0 --tags) + ARCH=amd64 + EOF + ``` + + View the newly created file to ensure the customized arguments are set correctly. +
+ + ```bash + cat .arg + ``` +
+ + :::caution + + Using the arguments defined in the **.arg** file, the final provider image name will have the following naming pattern, `[IMAGE_REGISTRY]/[IMAGE_REPO]:[CUSTOM_TAG]`. Ensure the final artifact name conforms to the Docker Hub image name syntax - `[HOST]/[DOCKER-ID]/[REPOSITORY]:[TAG]`. + + ::: +
+ +11. Use the following command to append the [WireGuard](https://www.wireguard.com/install/) installation instructions to the Dockerfile. You can install more tools and dependencies and configure the image to meet your needs. Add your customizations below the line tagged with the `Add any other image customizations here` comment in the Dockerfile. Do not edit or add any lines before this tagged comment. +
+ + ```bash + echo 'RUN sudo zypper refresh && sudo zypper install -y wireguard-tools' >> Dockerfile + ``` + + View the newly created file to ensure the instruction to install WireGuard is appended correctly. +
+ + ```bash + cat Dockerfile + ``` +
+ :::caution + + Using the `-y` option with the `sudo zypper install` command is critical to successfully build the images. The default behavior for package installations is to prompt the user for permission to install the package. A user prompt will cause the image creation process to fail. This guidance applies to all dependencies you add through the **Dockerfile**. + + ::: +
+ +12. Issue the command below to save your tenant registration token to a local variable. Replace `[your_token_here]` with your actual registration token. +
+ + ```bash + export token=[your_token_here] + ``` +
+ +13. Use the following command to create the **user-data** file containing the tenant registration token. Also, you can click on the *Points of Interest* numbers below to learn more about the main attributes relevant to this example. +
+ + + + ```shell + cat << EOF > user-data + #cloud-config + stylus: + site: + edgeHostToken: $token + install: + poweroff: true + users: + - name: kairos + passwd: kairos + EOF + ``` + + + + View the newly created user data file to ensure the token is set correctly. +
+ + ```bash + cat user-data + ``` + + If you want further customization, check the existing **user-data.template** file, and refer to the [Edge Configuration Stages](../edge-configuration/cloud-init.md) and [User Data Parameters](../edge-configuration/installer-reference.md) documents to learn more. +
+ +14. CanvOS utility uses [Earthly](https://earthly.dev/) to build the target artifacts. Issue the following command to start the build process. +
+ + ```bash + sudo ./earthly.sh +build-all-images + ``` + + ```hideClipboard bash {2} + # Output condensed for readability + ===================== Earthly Build SUCCESS ===================== + Share your logs with an Earthly account (experimental)! Register for one at https://ci.earthly.dev. + ``` +
+ + :::info + + If you plan to build Edge artifacts using a content bundle, use the `+build-provider-images` option instead of the `+build-all-images` option in the command above. The command, `sudo ./earthly.sh +build-provider-images`, will build the provider images but not the Edge installer ISO. + + ::: + + + This command may take up to 15-20 minutes to finish depending on the resources of the host machine. Upon completion, the command will display the manifest, as shown in the example below, that you will use in your cluster profile later in this tutorial. Note that the `system.xxxxx` attribute values in the manifest example are the same as what you defined earlier in the **.arg** file. + + Copy and save the output attributes in a notepad or clipboard to use later in your cluster profile. +
+ + ```bash + pack: + content: + images: + - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" + options: + system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" + system.registry: docker.io/spectrocloud + system.repo: opensuse-leap + system.k8sDistribution: k3s + system.osName: opensuse-leap + system.peVersion: v3.4.3 + system.customTag: palette-learn + ``` +
+
+ +15. List the Docker images to review the provider images created. By default, provider images for all the Palette's Edge-supported Kubernetes versions are created. You can identify the provider images by reviewing the image tag value you used in the **.arg** file's `CUSTOM_TAG` argument. +
+ + ```shell + docker images --filter=reference='*/*:*palette-learn' + ``` + + ```hideClipboard bash {3,4} + # Output + REPOSITORY TAG IMAGE ID CREATED SIZE + spectrocloud/opensuse-leap k3s-1.25.2-v3.4.3-palette-learn 2427e3667b2f 24 minutes ago 2.22GB + spectrocloud/opensuse-leap k3s-1.24.6-v3.4.3-palette-learn 0f2efd533a33 24 minutes ago 2.22GB + ``` +
+ +16. To use the provider images in your cluster profile, push them to your image registry mentioned in the **.arg** file. Issue the following command to log in to Docker Hub. Provide your Docker ID and password when prompted. +
+ + ```bash + docker login + ``` + + ```hideClipboard bash + # Output + Login Succeeded + ``` +
+ +17. Use the following commands to push the provider images to the Docker Hub image registry you specified. Replace the `[DOCKER-ID]` and version numbers in the command below with your Docker ID and respective Kubernetes versions that the utility created. +
+ + ```bash + docker push docker.io/[DOCKER-ID]/opensuse-leap:k3s-1.25.2-v3.4.3-palette-learn + docker push docker.io/[DOCKER-ID]/opensuse-leap:k3s-1.24.6-v3.4.3-palette-learn + ``` +
+ +18. After pushing the provider images to the image registry, open a web browser and log in to [Palette](https://console.spectrocloud.com). Ensure you are in the **Default** project scope before creating a cluster profile. + + +19. Navigate to the left **Main Menu** and select **Profiles**. Click on the **Add Cluster Profile** button, and fill out the required basic information fields to create a cluster profile for Edge. + + +20. Add the following [BYOS Edge OS](../../../integrations/byoos.md) pack to the OS layer in the **Profile Layers** section. + + |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| + |---|---|---|---| + |OS|Public Repo|BYOS Edge OS|`1.0.0`| + + +21. Replace the the cluster profile's BYOOS pack manifest with the following custom manifest so that the cluster profile can pull the provider image from the ttl.sh image registry. + + The `system.xxxxx` attribute values below refer to the arguments defined in the **.arg** file. If you modified the arguments in the **.arg** file, you must modify the attribute values below accordingly. +
+ + ```yaml + pack: + content: + images: + - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" + options: + system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" + system.registry: docker.io/spectrocloud + system.repo: opensuse-leap + system.k8sDistribution: k3s + system.osName: opensuse-leap + system.peVersion: v3.4.3 + system.customTag: palette-learn + ``` + The screenshot below displays how to reference a provider image in the BYOOS pack of your cluster profile. + + ![Screenshot of a sample cluster profile's OS layer ](/tutorials/palette-canvos/clusters_edge_palette-canvos_edit_profile.png) +
+ + :::info + + The BYOOS pack's `system.uri` attribute references the Kubernetes version selected in the cluster profile by using the `{{ .spectro.system.kubernetes.version }}` [macro](../../cluster-management/macros.md). This is how the provider images you created and pushed to a registry are tied to the OS and Kubernetes version you selected in the **.arg** file. + + ::: + +22. Add the following **Palette Optimized K3s** pack to the Kubernetes layer of your cluster profile. Select the k3s version 1.25.x because earlier in this how-to guide, you pushed a provider image compatible with k3s v1.25.2 to the ttl.sh image registry. + + |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| + |---|---|---|---| + |Kubernetes|Public Repo|Palette Optimized k3s|`1.25.x`| + + +23. Add the network layer to your cluster profile, and choose a Container Network Interface (CNI) pack that best fits your needs, such as Calico, Flannel, Cilium, or Custom CNI. For example, you can add the following network layer. This step completes the core infrastructure layers in the cluster profile. + + |**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| + |---|---|---|---| + |Network|Public Repo|Calico|`3.25.x`| + + +24. Add add-on layers and manifests to your cluster profile per your requirements. + + +25. If there are no errors or compatibility issues, Palette displays the newly created complete cluster profile for review. Verify the layers you added, and finish creating the cluster profile. +
+ + +### Validate +List the Edge installer ISO image and checksum by issuing the following command from the **CanvOS/** directory. +
+ +```shell +ls build/ +``` + +```hideClipboard shell +# Output +palette-edge-installer.iso +palette-edge-installer.iso.sha256 +``` + +You can validate the ISO image by creating a bootable USB flash drive using any third-party software and attempting to flash a bare host machine. Most software that creates a bootable USB drive will validate the ISO image. Here, the flash process means installing the necessary tools and configurations on a host machine. + +
+ +
+ +## Next Steps + +After building the Edge artifacts and creating an Edge cluster profile, the next step is to use the Edge installer ISO image to prepare your Edge host. To learn more about utilizing Edge artifacts to prepare Edge hosts and deploy Palette-managed Edge clusters, we encourage you to check out the reference resources below. +
+ +- [Deploy an Edge Cluster on VMware](../site-deployment/deploy-cluster.md) + + +- [Prepare Edge Host for Installation](../site-deployment/stage.md) \ No newline at end of file diff --git a/docs/docs-content/clusters/edge/edgeforge-workflow/prepare-user-data.md b/docs/docs-content/clusters/edge/edgeforge-workflow/prepare-user-data.md new file mode 100644 index 0000000000..6d3581b9b0 --- /dev/null +++ b/docs/docs-content/clusters/edge/edgeforge-workflow/prepare-user-data.md @@ -0,0 +1,293 @@ +--- +sidebar_label: "Prepare User Data" +title: "Prepare User Data" +description: "Learn about building your staging user data" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["edge"] +--- + + + +The Edge Installer supports using a custom configuration file in the format of a YAML that you can use to customize the installation process. You can provide the customized configuration to the Edge Installer as a user data file. + +:::info + +Review the Edge [Install Configuration](../edge-configuration/installer-reference.md) resource to learn more about all the supported configuration parameters you can use in the configuration user data. + +::: + +You can also use the Operating System (OS) pack to apply additional customization using cloud-init stages. Both the Edge Installer configuration file and the OS pack support the usage of cloud-init stages. Refer to the [Cloud-Init Stages](../edge-configuration/cloud-init.md) to learn more. + +## User Data Samples + +You may encounter the following scenarios when creating an Edge Installer configuration user data file. Use these examples as a starting point to help you create user data configurations that fit your needs. + +```yaml +#cloud-config +stylus: + site: + # The Palette API endpoint to use. The default value is api.spectrocloud.com. + paletteEndpoint: api.spectrocloud.com + + # The edgeHostToken is an auto-registration token to register the Edge host with Palette upon bootup. + # This token can be generated by navigating to the Tenant Settings -> Registration Token. + # Specify a name and select the project id with which the Edge host should register itself. + edgeHostToken: aUAxxxxxxxxx0ChYCrO + + # The Palette project ID the Edge host should pair with. This is an optional field if an edgeHostToken is used and the token was assigned to a project. + projectUid: 12345677788 + # Tags which that will be assigned to the device as labels. + tags: + key1: value1 + key2: value2 + key3: value3 + + # The device's name, may also be referred to as the Edge ID or Edge host ID. If no Edge hostname is specified, + # a hostname will be generated from the device serial number. If the Edge Installer cannot identify the device serial number, then a random ID will + # be generated and used instead. In the case of hardware that does not have a serial number, we recommended specifying a + # random name, with minimal chances of being re-used by a different Edge host. + name: edge-appliance-1 + + # Optional + # If the Edge host requires a proxy to connect to Palette or to pull images, then specify the proxy information in this section + network: + # configures http_proxy + httpProxy: http://proxy.example.com + # configures https_proxy + httpsProxy: https://proxy.example.com + # configures no_proxy + noProxy: 10.10.128.10,10.0.0.0/8 + + # Optional: configures the global nameserver for the system. + nameserver: 1.1.1.1 + # configure interface specific info. If omitted all interfaces will default to dhcp + interfaces: + enp0s3: + # type of network dhcp or static + type: static + # Ip address including the mask bits + ipAddress: 10.0.10.25/24 + # Gateway for the static ip. + gateway: 10.0.10.1 + # interface specific nameserver + nameserver: 10.10.128.8 + enp0s4: + type: dhcp + caCerts: + - | + ------BEGIN CERTIFICATE------ + ***************************** + ***************************** + ------END CERTIFICATE------ + - | + ------BEGIN CERTIFICATE------ + ***************************** + ***************************** + ------END CERTIFICATE------ + +# There is no password specified to the default kairos user. You must specify authorized keys or passwords to access the Edge host console. +stages: + initramfs: + - users: + kairos: + groups: + - sudo + passwd: kairos +``` + +### Connected Sites - Multiple User Data Configuration + +In this example, two configuration user user data files are used. The first one is used in the staging phase and is included with the Edge Installer image. Note how the first user data contains the registration information and creates a user group. A bootable USB stick applies the second user data at the physical site. The secondary user data includes network configurations specific to the edge location. + +**Staging** - included with the Edge Installer. + +```yaml +#cloud-config +stylus: + site: + paletteEndpoint: api.spectrocloud.com + edgeHostToken: + tags: + city: chicago + building: building-1 + +install: + poweroff: true + +stages: + initramfs: + - users: + kairos: + groups: + - sudo + passwd: kairos +``` + +**Site** - supplied at the edge location through a bootable USB drive. If specified, the `projectName` value overrides project information specified in the `edgeHostToken` parameter. You can add optional tags to identify the city, building, and zip-code. If the edge site requires a proxy for an outbound connection, provide it in the network section of the site user data. + +```yaml +#cloud-config +stylus: + site: + projectName: edge-sites + tags: + zip-code: 95135 +``` + +### Connected Sites - Single User Data + +This example configuration is for a *connected site*. +In this scenario, only a single Edge Installer configuration user data is used for the entire deployment process. + +
+ +```yaml +#cloud-config +stylus: + site: + paletteEndpoint: api.spectrocloud.com + edgeHostToken: + projectName: edge-sites + tags: + city: chicago + building: building-1 + zip-code: 95135 + +install: + poweroff: true + +stages: + initramfs: + - users: + kairos: + groups: + - sudo + passwd: kairos +``` + +### Apply Proxy & Certificate Settings + +This example showcases how you can include network settings in a user data configuration. + +```yaml +#cloud-config +stylus: + site: + paletteEndpoint: api.spectrocloud.com + edgeHostToken: + projectName: edge-sites + tags: + city: chicago + building: building-1 + zip-code: 95135 + network: + httpProxy: http://proxy.example.com + httpsProxy: https://proxy.example.com + noProxy: 10.10.128.10,10.0.0.0/8 + nameserver: 1.1.1.1 + # configure interface specific info. If omitted all interfaces will default to dhcp + interfaces: + enp0s3: + # type of network dhcp or static + type: static + # Ip address including the mask bits + ipAddress: 10.0.10.25/24 + # Gateway for the static ip. + gateway: 10.0.10.1 + # interface specific nameserver + nameserver: 10.10.128.8 + enp0s4: + type: dhcp + caCerts: + - | + ------BEGIN CERTIFICATE------ + ***************************** + ***************************** + ------END CERTIFICATE------ + - | + ------BEGIN CERTIFICATE------ + ***************************** + ***************************** + ------END CERTIFICATE------ + +install: + poweroff: true + +stages: + initramfs: + - users: + kairos: + groups: + - sudo + passwd: kairos +``` + +### Load Content From External Registry + +In this example, content is downloaded from an external registry. + +```yaml +#cloud-config +stylus: + registryCredentials: + domain: 10.10.254.254:8000/spectro-images + username: ubuntu + password: + insecure: true + site: + debug: true + insecureSkipVerify: false + paletteEndpoint: api.console.spectrocloud.com + name: edge-appliance-1 + caCerts: + - | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + +install: + poweroff: false + +stages: + initramfs: + - users: + kairos: + groups: + - sudo + passwd: kairos +``` + + +## Multiple User Data Use Case + +If you don't need to apply any unique configurations on the device once it arrives at the physical site, then your site deployment flow would look like the following. + +![The flow of an install process not requiring additional customization](/clusters_site-deployment_prepare-edge-configuration_install-flow.png) + +Should you need to apply different configurations once the device arrives at the physical site, you can use a secondary user data to support this use case. + +Use the additional user data to override configurations from the previous user data that was flashed into the device or to inject new configuration settings. Using secondary user data at the physical site is a common pattern for organizations that need to change settings after powering on the Edge host at the physical location. + +To use additional user data, create a bootable device, such as a USB stick, that contains the user data in the form of an ISO image. The Edge Installer will consume the additional user data during the installation process. + +![The flow of an install process with an additional customization occurring at the physical site. The additional customization is using a USB stick to upload the new user data.](/clusters_site-deployment_prepare-edge-configuration_install-flow-with-more-user-data.png) + +When creating your Edge Installer, you can embed the user data into the installer image to eliminate providing it via a USB drive. + +In the staging phase, you may identify user data parameter values that apply uniformly to all your edge sites. But you may also have some edge locations that require different configurations such as site network proxy, site certs, users and groups, etc. +Site-specific configurations are typically not included in the Edge installer image. For the latter scenario, you can use a secondary user data configuration. Refer to the [Apply Site User Data](../site-deployment/site-installation/site-user-data.md) guide to learn more about applying secondary site-specific user data. + + + +:::info + +For your initial testing, your user data may include global settings and site-specific properties in a single user data. As you gain more experience, you should evaluate whether secondary site-specific user data is a better design for your use case. + +::: + + + +## Next Steps + +The last step of the EdgeForce workflow is to build the Edge artifacts. Check out the [Build Edge Artifacts](palette-canvos.md) guide to learn how to create the Edge artifacts. diff --git a/docs/docs-content/clusters/edge/site-deployment/_category_.json b/docs/docs-content/clusters/edge/site-deployment/_category_.json new file mode 100644 index 0000000000..ae9ddb024d --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 50 +} diff --git a/docs/docs-content/clusters/edge/site-deployment/deploy-cluster.md b/docs/docs-content/clusters/edge/site-deployment/deploy-cluster.md new file mode 100644 index 0000000000..f10ff5b8da --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/deploy-cluster.md @@ -0,0 +1,1000 @@ +--- +sidebar_label: "Deploy an Edge Cluster on VMware" +title: "Deploy an Edge Cluster on VMware" +description: "Learn how to deploy an Edge host using VMware as the deployment platform. You will learn how to use the Edge Installer ISO, create a cluster profile, and deploy a Kubernetes cluster to the Edge host on VMware." +icon: "" +hide_table_of_contents: false +sidebar_position: 40 +tags: ["edge", "tutorial"] +--- + + +Palette supports deploying Kubernetes clusters in remote locations to support edge computing workloads. Palette's Edge solution enables you to deploy your edge devices, also called Edge hosts, which contain all the required software dependencies to support Palette-managed Kubernetes cluster deployment. + +Maintaining consistency while preparing edge devices at scale can be challenging for operation teams. For example, imagine you are an IT administrator for a retail company that has decided to expand to 1000 new stores this year. The company needs you to deploy Kubernetes clusters in each new store using edge devices, such as Intel NUC, and ensure each device has the same software and security configurations. Your job is to prepare each device so the development team can deploy Kubernetes clusters on each device. You have decided to use Palette's Edge solution to help you meet the organizational requirements. You will prepare a small set of Edge devices and deploy a Kubernetes cluster to verify readiness for consistent deployment across all physical sites. + +The following points summarize the primary stages of Edge cluster deployment to a production environment: + +- Create Edge artifacts such as the Edge Installer ISO, provider images, and content bundles. + +- Initialize the Edge device with the Edge installer ISO. The ISO includes a base Operating System (OS) and other configurations such as networking, proxy, security, tooling, and user privileges. + +- Create a cluster profile to ensure consistency in all the Edge hosts. The cluster profile lets you declare the desired software dependencies for each Kubernetes cluster. + + +Following the primary stages outlined above, this tutorial will guide you to build the Edge artifacts (Edge installer ISO image and provider images) and use the Edge installer ISO image to prepare Edge hosts. Next, you will use the provider image to create a cluster profile and then deploy a cluster on those Edge hosts. You will use VMware to deploy the Edge hosts to simulate a bare metal environment. + + +For learning purposes, you will set up Virtual Machines (VMs) as Edge hosts and deploy a cluster on Edge host VMs. VMs provide a more accessible Edge learning experience, as you do not require connecting to physical Edge devices. The diagram below shows the main steps to prepare Edge hosts and deploy a cluster. + + +![An overarching diagram showing the tutorial workflow.](/tutorials/edge/clusters_edge_deploy-cluster_overarching.png) + + +## Prerequisites + +To complete this tutorial, you will need the following: + +* Access to a VMware vCenter environment where you will provision VMs as Edge hosts. You will need the server URL, login credentials, and names of the data center, data store, resource pool, folder, cluster, and DHCP-enabled network. + + +* The VMs you will prepare as Edge hosts must be attached to a DHCP-enabled network. To ensure DHCP is enabled on the network, review the network settings on your ESXi Host. +You can refer to the [Prepare the DHCP Server for vSphere](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.esxi.install.doc/GUID-9D8333F5-5F5B-4658-8166-119B44895098.html) guide from VMware to configure a DHCP server on the network. + + +* A physical or virtual Linux machine with *AMD64* (also known as *x86_64*) processor architecture to build the Edge artifacts. You can issue the following command in the terminal to check your processor architecture. +
+ + ```bash + uname -m + ``` +
+ + :::caution + + The Linux machine must have network connectivity to your VMware vCenter environment. + + ::: + +* The following minimum hardware configuration: + - 4 CPU + - 8 GB memory + - 50 GB storage + + +* [Git](https://cli.github.com/manual/installation). Ensure git installation by issuing the `git --version` command. + + +* [Docker Engine](https://docs.docker.com/engine/install/) version 18.09.x or later. You can use the `docker --version` command to view the existing Docker version. You should have root-level or `sudo` privileges on your Linux machine to create privileged containers. + + +* A [Spectro Cloud](https://console.spectrocloud.com) account. If you have not signed up, you can sign up for a [free trial](https://www.spectrocloud.com/free-tier/). + + +* A Palette registration token for pairing Edge hosts with Palette. You will need tenant admin access to Palette to generate a new registration token. For detailed instructions, refer to the [Create Registration Token](../site-deployment/site-installation/create-registration-token.md) guide. Copy the newly created token to a clipboard or notepad file to use later in this tutorial. + + The screenshot below shows a sample registration token in the **Tenant Settings** > **Registration Tokens** section in Palette. + + ![A screenshot of a registration token in Palette](/tutorials/edge/clusters_edge_deploy-cluster_registration-token.png) + + +## Build Edge Artifacts + +In this section, you will use the [CanvOS](https://github.com/spectrocloud/CanvOS/blob/main/README.md) utility to build an Edge installer ISO image and provider images for all the Palette-supported Kubernetes versions. The utility builds multiple provider images, so you can use either one that matches the desired Kubernetes version you want to use with your cluster profile. + +This tutorial builds and uses the provider image compatible with K3s v1.25.2. +
+ +### Check Out Starter Code + +Issue the following and subsequent command-line instructions on your Linux machine, which this tutorial refers to as the development environment. + +Clone the [CanvOS](https://github.com/spectrocloud/CanvOS) GitHub repository containing the starter code to build Edge artifacts. +
+ +```bash +git clone https://github.com/spectrocloud/CanvOS.git +``` + +Change to the **CanvOS** directory. +
+ +```bash +cd CanvOS +``` + +View the available [git tag](https://github.com/spectrocloud/CanvOS/tags). +
+ +```bash +git tag +``` + +Check out the newest available tag. This guide uses **v3.4.3** tag as an example. +
+ +```shell +git checkout v3.4.3 +``` +
+ +## Define Arguments + +CanvOS requires arguments such as image tag, registry, repository, and OS distribution. The arguments are defined in the **.arg** file. In this step, you will create the **.arg** file and define all the required arguments. + + +Issue the command below to assign an image tag value for the provider images. This guide uses the default value `demo` as an example. However, you can assign any lowercase and alphanumeric string to the `CUSTOM_TAG` variable. +
+ +```bash +export CUSTOM_TAG=demo +``` +
+ +Issue the command below to create the **.arg** file with the custom tag. The remaining arguments will use the default values. For example, `ubuntu` is the default operating system, `demo` is the default tag, and [ttl.sh](https://ttl.sh/) is the default image registry. The default ttl.sh image registry is free and does not require a sign-up. Images pushed to ttl.sh are ephemeral and will expire after the 24 hrs time limit. + +Using the arguments defined in the **.arg** file, the final provider images you generate will have the following naming convention, `[IMAGE_REGISTRY]/[IMAGE_REPO]:[CUSTOM_TAG]`. In this example, the provider images will be `ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-demo`. Refer to the **.arg.template** sample file in the current directory or the [README](https://github.com/spectrocloud/CanvOS#readme) to learn more about the default values. +
+ +```bash +cat << EOF > .arg +CUSTOM_TAG=$CUSTOM_TAG +IMAGE_REGISTRY=ttl.sh +OS_DISTRIBUTION=ubuntu +IMAGE_REPO=ubuntu +OS_VERSION=22 +K8S_DISTRIBUTION=k3s +ISO_NAME=palette-edge-installer +PE_VERSION=$(git describe --abbrev=0 --tags) +ARCH=amd64 +EOF +``` + +View the newly created file to ensure the arguments are defined per your requirements. +
+ +```bash +cat .arg +``` +
+ + +Refer to the [Build Edge Artifacts](../edgeforge-workflow/palette-canvos.md) guide to learn more about customizing arguments. +
+ +## Create User Data + +Next, you will create a **user-data** file that embeds the tenant registration token and Edge host's login credentials in the Edge Installer ISO image. + + +Issue the command below to save your tenant registration token to a local variable. Replace `[your_token_here]` placeholder with your actual registration token. +
+ +```bash +export token=[your_token_here] +``` + + +Use the following command to create the **user-data** file containing the tenant registration token. You can click on the *Points of Interest* numbers below to learn more about the main attributes relevant to this example. +
+ + + + +```shell +cat << EOF > user-data +#cloud-config +stylus: + site: + edgeHostToken: $token +install: + poweroff: true +users: + - name: kairos + passwd: kairos +EOF +``` + + + +Review the newly created user data file. +
+ +```bash +cat user-data +``` +The expected output should show that the `edgeHostToken` and login credentials for Edge hosts are set correctly. The `edgeHostToken` value must match your Palette registration token. Otherwise, your Edge hosts will not register themselves with Palette automatically. Below is a sample output with a dummy token value. +
+ +```hideClipboard bash +#cloud-config +stylus: + site: + edgeHostToken: 62ElvdMeX5MdOESgTleBjjKQg8YkaIN3 +install: + poweroff: true +users: + - name: kairos + passwd: kairos +``` + +
+ +## Build Artifacts + +The CanvOS utility uses [Earthly](https://earthly.dev/) to build the target artifacts. Issue the following command to start the build process. +
+ +```bash +sudo ./earthly.sh +build-all-images +``` + +```hideClipboard bash {2} +# Output condensed for readability +===================== Earthly Build SUCCESS ===================== +Share your logs with an Earthly account (experimental)! Register for one at https://ci.earthly.dev. +``` + +This command may take 15-20 minutes to finish depending on the hardware resources of the host machine. Upon completion, the command will display the manifest, as shown in the example below, that you will use in your cluster profile later in this tutorial. Note that the `system.xxxxx` attribute values in the manifest example are the same as what you defined earlier in the **.arg** file. + +Copy and save the output attributes in a notepad or clipboard to use later in your cluster profile. +
+ +```bash +pack: + content: + images: + - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" +options: + system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" + system.registry: ttl.sh + system.repo: ubuntu + system.k8sDistribution: k3s + system.osName: ubuntu + system.peVersion: v3.4.3 + system.customTag: demo + system.osVersion: 22 +``` +
+ +## View Artifacts + +After completing the build process, list the edge installer ISO image and checksum by issuing the following command from the **CanvOS** directory. +
+ +```bash +ls build/ +``` + +```hideClipboard bash +# Output +palette-edge-installer.iso +palette-edge-installer.iso.sha256 +``` + + +Export the path to the ISO file, the **build** directory, in the `ISOFILEPATH` local variable. Later in the tutorial, you will use this local variable to mount the **build** directory to a Docker container. +
+ +```bash +export ISOFILEPATH=$PWD/build +echo $ISOFILEPATH +``` + + +List the Docker images to review the created provider images. By default, provider images are created for all the Palette-supported Kubernetes versions. You can identify the provider images by the image tag value you used in the **.arg** file's `CUSTOM_TAG` variable. +
+ +```shell +docker images --filter=reference='*/*:*demo' +``` + +```hideClipboard bash {3,4} +# Output +REPOSITORY TAG IMAGE ID CREATED SIZE +ttl.sh/ubuntu k3s-1.24.6-v3.4.3-demo 3a672a023bd3 45 minutes ago 4.61GB +ttl.sh/ubuntu k3s-1.25.2-v3.4.3-demo 0217de3b9e7c 45 minutes ago 4.61GB +``` +
+ +## Push Provider Images + +Push the provider images to the image registry indicated in the **.arg** file so that you can reference the provider image later in your cluster profile. + +Since we used the provider image compatible with K3s v1.25 in the cluster profile, you would use the following command to push the provider image compatible with K3s v1.25 to the image registry. If you want to use the other provider image compatible with K3s v1.24 instead, push that version to the image registry. The example below and default behavior uses the [ttl.sh](https://ttl.sh/) image registry. This image registry is free and does not require you to sign up to use it. Images pushed to ttl.sh are ephemeral and will expire after 24 hours. +
+ +```bash +docker push ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-demo +``` + +:::caution + +As a reminder, [ttl.sh](https://ttl.sh/) is a short-lived image registry. If you do not use these provider images in your cluster profile within 24 hours of pushing to *ttl.sh*, they will expire and must be re-pushed. If you want to use a different image registry, refer to the Advanced workflow in the [Build Edge Artifacts](../edgeforge-workflow/palette-canvos.md) guide to learn how to use another registry. + +::: +
+ + +## Provision Virtual Machines + +In this section, you will create a VM template in VMware vCenter from the Edge installer ISO image and clone that VM template to provision three VMs. Think of a VM template as a snapshot that can be used to provision new VMs. You cannot modify templates after you create them, so cloning the VM template will ensure all VMs have *consistent* guest OS, dependencies, and user data configurations installed. + +This tutorial example will use [Packer](https://www.packer.io/) to create a VM template from the Edge installer ISO image. Later, it will use [GOVC](https://github.com/vmware/govmomi/tree/main/govc#govc) to clone the VM template to provision three VMs. You do not have to install Packer or GOVC in your Linux development environment. You will use our official tutorials container that already contains the required tools.
+ +### Create a VM Template + +You will use the **heredoc** script to create a VM template. The script prompts you to enter your VMWare vCenter environment details and saves them as environment variables in a file named **.packerenv**. Packer reads the environment variables during the build process. + +Before you invoke the **heredoc** script, have values handy in a notepad for the VMWare vCenter environment variables listed in the table. +
+ +|**Variable**|**Description**| **How to find its value?**| +|---|---|---| +| `PKR_VAR_vcenter_server` | vCenter server URL |Check with your VMware data center administrator. Omit `http://` or `https://` in the URL. Example, use `vcenter.spectrocloud.dev`. | +|`PKR_VAR_vcenter_username`| vSphere client username |Request credentials from your VMware data center administrator. Example: `myusername@vsphere.local`| +|`PKR_VAR_vcenter_password`|vSphere client password|--| +|`PKR_VAR_vcenter_datacenter`|Data center name |Expand your vSphere client's main menu and select **Inventory** > **Hosts and Clusters**. The data center name is displayed in the left navigation tree.| +|`PKR_VAR_vcenter_cluster`|Cluster name | Expand the data center inventory to view the cluster name in the left navigation tree. | +|`PKR_VAR_vcenter_resource_pool`|Resource pool name | Expand the cluster inventory to view the resource pool name. | +|`PKR_VAR_vcenter_folder`|Folder name | Switch to the **VMs and Templates** view in your vSphere client. The folder name is displayed in the left navigation tree.| +|`PKR_VAR_vcenter_datastore`|Datastore name | Switch to the **Storage** view in your vSphere client. The datastore name is displayed in the left navigation tree.| +|`PKR_VAR_vcenter_network`| Network name | Switch to the **Networking** view in your vSphere client. The network name is displayed in the left navigation tree.| + + +Use the **heredoc** script to create the **.packerenv** file shown below that contains the VMware vCenter details as environment variables. +
+ +```bash +cat << EOF > .packerenv +PKR_VAR_vcenter_server=$(read -ep 'Enter vCenter Server URL without http:// or https://, for example: vcenter.spectrocloud.dev ' vcenter_server && echo $vcenter_server) +PKR_VAR_vcenter_username=$(read -ep 'Enter vCenter Username value: ' vcenter_username && echo $vcenter_username) +PKR_VAR_vcenter_password=$(read -ep 'Enter vCenter Password value: ' vcenter_password && echo $vcenter_password) +PKR_VAR_vcenter_datacenter=$(read -ep 'Enter vCenter Datacenter name: ' vcenter_datacenter && echo $vcenter_datacenter) +PKR_VAR_vcenter_cluster=$(read -ep 'Enter vCenter Cluster name: ' vcenter_cluster && echo $vcenter_cluster) +PKR_VAR_vcenter_resource_pool=$(read -ep 'Enter vCenter Resource Pool name: ' vcenter_resource_pool && echo $vcenter_resource_pool) +PKR_VAR_vcenter_folder=$(read -ep 'Enter vCenter Folder name: ' vcenter_folder && echo $vcenter_folder) +PKR_VAR_vcenter_datastore=$(read -ep 'Enter vCenter Datastore name: ' vcenter_datastore && echo $vcenter_datastore) +PKR_VAR_vcenter_network=$(read -ep 'Enter vCenter Network name: ' vcenter_network && echo $vcenter_network) +EOF +``` +View the file to ensure you have filled in the details correctly. +
+ +```bash +cat .packerenv +``` + +You will use the **.packerenv** file later in the tutorial when you start Packer. + +Next, verify the `ISOFILEPATH` local variable has the path to the ISO file. The `docker run` command uses this variable to bind mount the host's **build** directory to the container. +
+ +```bash +echo $ISOFILEPATH +``` + +:::info + +The environment variable you set using `export [var-name]=[var-value]` will not persist across terminal sessions. If you opened a new terminal session in your development environment, you will lose the `ISOFILEPATH` variable and will need to reset it. + +::: +
+ +The next step is to use the following `docker run` command to trigger Packer build process to create a VM template. Here is an explanation of the options and sub-command used below: +
+ +- The `--env-file` option reads the **.packerenv** file. + + +- The `--volume ` option mounts a local directory to our official tutorials container, `ghcr.io/spectrocloud/tutorials:1.0.7`. + + +- The `sh -c "cd edge/vmware/packer/ && packer build -force --var-file=vsphere.hcl build.pkr.hcl` shell sub-command changes to the container's **edge/vmware/packer/** directory and invokes `packer build` to create the VM template. The `packer build` command has the following options: + + - The `-force` flag destroys any existing template. + - The `--var-file` option reads the **vsphere.hcl** file from the container. This file contains the VM template name, VM configuration, and ISO file name to use. The VM configuration conforms to the [minimum device requirements](../architecture/#minimum-device-requirements). + +The **vsphere.hcl** file content is shown below for your reference. This tutorial does not require you to modify these configurations. +
+ +```bash hideClipboard +# VM Template Name +vm_name = "palette-edge-template" +# VM Settings +vm_guest_os_type = "ubuntu64Guest" +vm_version = 14 +vm_firmware = "bios" +vm_cdrom_type = "sata" +vm_cpu_sockets = 4 +vm_cpu_cores = 1 +vm_mem_size = 8192 +vm_disk_size = 51200 +thin_provision = true +disk_eagerly_scrub = false +vm_disk_controller_type = ["pvscsi"] +vm_network_card = "vmxnet3" +vm_boot_wait = "5s" +# ISO Objects +iso = "build/palette-edge-installer.iso" +iso_checksum = "build/palette-edge-installer.iso.sha256" +``` +
+ +:::info + +Should you need to change the VM template name or VM settings defined in the **vsphere.hcl** file, or review the Packer script, you must open a bash session into the container using the `docker run -it --env-file .packerenv --volume "${ISOFILEPATH}:/edge/vmware/packer/build" ghcr.io/spectrocloud/tutorials:1.0.7 bash` command, and change to the **edge/vmware/packer/** directory to make the modifications. After you finish the modifications, issue the `packer build -force --var-file=vsphere.hcl build.pkr.hcl` command to trigger the Packer build process. + +::: +
+ +Issue the following command to trigger the Packer build process to create a VM template in the VMware vCenter. It will also upload and keep a copy of the **palette-edge-installer.iso** to the **packer_cache/** directory in the specified datastore. + +
+ +```bash +docker run --interactive --tty --rm \ + --env-file .packerenv \ + --volume "${ISOFILEPATH}:/edge/vmware/packer/build" \ + ghcr.io/spectrocloud/tutorials:1.0.7 \ + sh -c "cd edge/vmware/packer/ && packer build -force --var-file=vsphere.hcl build.pkr.hcl" +``` + +Depending on your machine and network, the build process can take 7-10 minutes to finish. +
+ +```hideClipboard bash {10,11} +# Sample output +==> vsphere-iso.edge-template: Power on VM... + vsphere-iso.edge-template: Please shutdown virtual machine within 10m0s. +==> vsphere-iso.edge-template: Deleting Floppy drives... +==> vsphere-iso.edge-template: Eject CD-ROM drives... +==> vsphere-iso.edge-template: Deleting CD-ROM drives... +==> vsphere-iso.edge-template: Convert VM into template... +Build 'vsphere-iso.edge-template' finished after 7 minutes 13 seconds. +==> Wait completed after 7 minutes 13 seconds +==> Builds finished. The artifacts of successful builds are: +--> vsphere-iso.edge-template: palette-edge-template +``` +
+ + +### Provision VMs + +Once Packer creates the VM template, you can use the template when provisioning VMs. In the next steps, you will use the [GOVC](https://github.com/vmware/govmomi/tree/main/govc#govc) tool to deploy a VM and reference the VM template that Packer created. Remember that the VM instances you are deploying simulate bare metal devices. + + +GOVC requires the same VMware vCenter details as the environment variables you defined earlier in the **.packerenv** file. Use the following command to source the **.packerenv** file and echo one of the variables to ensure the variables are accessible on your host machine. +
+ +```bash +source .packerenv +echo $PKR_VAR_vcenter_server +``` + +Use the following command to create a **.goenv** environment file. The **.goenv** file contains the VMware vCenter credentials and information required to deploy VMs in your VMware environment. +
+ +```bash +cat << EOF > .goenv +vcenter_server=$PKR_VAR_vcenter_server +vcenter_username=$PKR_VAR_vcenter_username +vcenter_password=$PKR_VAR_vcenter_password +vcenter_datacenter=$PKR_VAR_vcenter_datacenter +vcenter_datastore=$PKR_VAR_vcenter_datastore +vcenter_resource_pool=$PKR_VAR_vcenter_resource_pool +vcenter_folder=$PKR_VAR_vcenter_folder +vcenter_cluster=$PKR_VAR_vcenter_cluster +vcenter_network=$PKR_VAR_vcenter_network +EOF +``` +View the file to ensure variable values are set correctly. +
+ +```bash +cat .goenv +``` + + +The next step is to use the following `docker run` command to clone the VM template and provision three VMs. Here is an explanation of the options and sub-command used below: +
+ +- The `--env-file` option reads the **.goenv** file in our official `ghcr.io/spectrocloud/tutorials:1.0.7` tutorials container. + + +- The `sh -c "cd edge/vmware/clone_vm_template/ && ./deploy-edge-host.sh"` shell sub-command changes to the container's **edge/vmware/clone_vm_template/** directory and invokes the **deploy-edge-host.sh** shell script. + + +The **edge/vmware/clone_vm_template/** directory in the container has the following files: +
+ +- **deploy-edge-host.sh** - Provisions the VMs. + + +- **delete-edge-host.sh** - Deletes the VMs. + + +- **setenv.sh** - Defines the GOVC environment variables, the number of VMs, a prefix string for the VM name, and the VM template name. Most of the GOVC environment variables refer to the variables you have defined in the **.goenv** file. + + +Below is the **setenv.sh** file content for your reference. This tutorial does not require you to modify these configurations. +
+ +```bash hideClipboard +#!/bin/bash +# Number of VMs to provision +export NO_OF_VMS=3 +export VM_PREFIX="demo" +export INSTALLER_TEMPLATE="palette-edge-template" + +#### DO NOT MODIFY BELOW HERE #################### +# GOVC Properties +export GOVC_URL="https://${vcenter_server}" # Use HTTPS. For example, https://vcenter.company.com +export GOVC_USERNAME="${vcenter_username}" +export GOVC_PASSWORD="${vcenter_password}" +export GOVC_INSECURE=1 #1 if insecure +export GOVC_DATACENTER="${vcenter_datacenter}" +export GOVC_DATASTORE="${vcenter_datastore}" +export GOVC_NETWORK="${vcenter_network}" +export GOVC_RESOURCE_POOL="${vcenter_resource_pool}" +export GOVC_FOLDER="${vcenter_folder}" +``` +
+ +:::info + +Suppose you have changed the VM template name in the previous step or need to change the number of VMs to provision. In that case, you must modify the **setenv.sh** script. To do so, you can reuse the container bash session from the previous step if it is still active, or you can open another bash session into the container using the `docker run -it --env-file .goenv ghcr.io/spectrocloud/tutorials:1.0.7 bash` command. If you use an existing container bash session, create the **.goenv** file described above and source it in your container environment. Next, change to the **edge/vmware/clone_vm_template/** directory to modify the **setenv.sh** script, and issue the `./deploy-edge-host.sh` command to deploy the VMs. + +::: +
+ +Issue the following command to clone the VM template and provision three VMs. +
+ +```bash +docker run -it --rm \ + --env-file .goenv \ + ghcr.io/spectrocloud/tutorials:1.0.7 \ + sh -c "cd edge/vmware/clone_vm_template/ && ./deploy-edge-host.sh" +``` + +The cloning process can take 3-4 minutes to finish and displays output similar to that shown below. The output displays the Edge host ID for each VM, as highlighted in the sample output below. VMs use this host ID to auto-register themselves with Palette. +
+ +```bash hideClipboard {7} +# Sample output for one VM +Cloning /Datacenter/vm/sp-sudhanshu/palette-edge-template to demo-1...OK +Cloned VM demo-1 +Powering on VM demo-1 +Powering on VirtualMachine:vm-13436... OK +Getting UUID demo-1 +Edge Host ID VM demo-1 : edge-97f2384233b498f6aa8dec90c3437c28 +``` + +For each of the three VMs, copy the Edge host ID. An Edge host ID looks similar to `edge-97f2384233b498f6aa8dec90c3437c28`. + +:::caution + +You must copy the Edge host IDs for future reference. In addition, if auto registration fails you will need the Edge host IDs to manually register Edge hosts in Palette. + +::: + + +## Verify Host Registration + +Before deploying a cluster, you must verify Edge host registration status in Palette. + +Open a web browser and log in to [Palette](https://console.spectrocloud.com). Navigate to the left **Main Menu** and select **Clusters**. Click on the **Edge Hosts** tab and verify the three VMs you created are registered with Palette. + +![A screenshot showing the VMs automatically registered with Palette. ](/tutorials/edge/clusters_edge_deploy-cluster_edge-hosts.png) + + +If the three Edge hosts are not displayed in the **Edge hosts** tab, the automatic registration failed. If this happens, you can manually register hosts by clicking the **Add Edge Hosts** button and pasting the Edge host ID. Repeat this host registration process for each of the three VMs. +If you need help, the detailed instructions are available in the [Register Edge Host](../site-deployment/site-installation/edge-host-registration.md) guide. +
+ +## Deploy a Cluster + +Once you verify the host registration, the next step is to deploy a cluster. In this section, you will use the Palette User Interface (UI) to deploy a cluster that is made up of the three Edge hosts you deployed. +
+ +## Create a Cluster Profile + +Validate you are in the **Default** project scope before creating a cluster profile. +
+ +![A screenshot of Palette's Default scope selected.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_default-scope.png) + +
+ + + +Next, create a cluster profile with the core infrastructure layers and a manifest of a sample application, [Hello Universe](https://github.com/spectrocloud/hello-universe#hello-universe). +Navigate to the left **Main Menu** and select **Profiles**. Click on the **Add Cluster Profile** button, and fill out the required input fields. The cluster profile wizard contains the following sections. +
+ +### Basic Information + +Use the following values when filling out the **Basic Information** section. + +|**Field**|**Value**| +|---|---| +|Name|docs-ubuntu-k3s| +|Version|`1.0.0`| +|Description|Cluster profile as part of the edge cluster deployment tutorial.| +|Type|Full| +|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:false`| + +Click on **Next** to continue. +
+ + +### Cloud Type + +In the **Cloud Type** section, choose **Edge Native** and click on **Next** at the bottom to proceed to the next section. +
+ +### Profile Layers + +In the **Profile Layers** section, add the following [BYOS Edge OS](../../../integrations/byoos.md) pack to the OS layer. + +|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| +|---|---|---|---| +|OS|Public Repo|BYOS Edge OS|`1.0.0`| + +Replace the OS layer manifest with the following custom manifest so that the cluster profile can pull the provider image from the *ttl.sh* image registry. You may recall that the CanvOS script returned an output containing a custom manifest after building the Edge artifacts. You will copy the CanvOS output into the cluster profile's BYOOS pack YAML file. + + The `system.xxxxx` attribute values in the manifest below are as same as those you defined in the **.arg** file while building the Edge artifacts. Copy the code snippet below into the YAML editor for the BYOOS pack. +
+ +```yaml +pack: + content: + images: + - image: "{{.spectro.pack.edge-native-byoi.options.system.uri}}" +options: + system.uri: "{{ .spectro.pack.edge-native-byoi.options.system.registry }}/{{ .spectro.pack.edge-native-byoi.options.system.repo }}:{{ .spectro.pack.edge-native-byoi.options.system.k8sDistribution }}-{{ .spectro.system.kubernetes.version }}-{{ .spectro.pack.edge-native-byoi.options.system.peVersion }}-{{ .spectro.pack.edge-native-byoi.options.system.customTag }}" + system.registry: ttl.sh + system.repo: ubuntu + system.k8sDistribution: k3s + system.osName: ubuntu + system.peVersion: v3.4.3 + system.customTag: demo + system.osVersion: 22 +``` +
+ + +The screenshot below shows you how to reference your provider OS image in a cluster profile by using the utility build output with the BYOOS pack. +
+ +![A screenshot of k3s OS layer in a cluster profile.](/tutorials/edge/clusters_edge_deploy-cluster_edit-profile.png) + + +:::caution + + *ttl.sh* is a short-lived image registry. If you do not use the provider image in your cluster profile within 24 hours of pushing to *ttl.sh*, they will no longer exist and must be re-pushed. In a production environment, use a custom registry for hosting provider images. + +::: +
+ +Click on the **Next layer** button to add the following Kubernetes layer to your cluster profile. + + +|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| +|---|---|---|---| +|Kubernetes|Public Repo|Palette Optimized K3s|`1.25.x`| + + +Select the K3s version 1.25.x. 1.25.X because earlier in this tutorial, you pushed a provider image compatible with K3s v1.25.2 to the *ttl.sh* image registry. The `system.uri` attribute of the BYOOS pack will reference the Kubernetes version you select using the `{{ .spectro.system.kubernetes.version }}` [macro](../../cluster-management/macros.md). + + +Click on the **Next layer** button, and add the following network layer. This example uses the Calico Container Network Interface (CNI). However, you can choose a different CNI pack that fits your needs, such as Flannel, Cilium, or Custom CNI. + + +|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| +|---|---|---|---| +|Network|Public Repo|Calico|`3.25.x`| + + +Click on the **Confirm** button to complete the core infrastructure stack. Palette displays the newly created infrastructure profile as a layered diagram. + +Finally, click on the **Add Manifest** button to add the [Hello Universe](https://github.com/spectrocloud/hello-universe#readme) application manifest. + +![A screenshot of the add Manifest button.](/tutorials/edge/clusters_edge_deploy-cluster_add-manifest.png) + +Use the following values to add the Hello Universe manifest metadata. + +|**Field** |**Value**| +|---|---| +|Layer name| hello-universe| +|Layer values (Optional)|Leave default| +|Install order (Optional)|Leave default| +|Manifests|Add new manifest, and name it `hello-universe`| + +When you provide the `hello-universe` value in the **Manifest** field, a blank text editor opens at right. Copy the following manifest and paste it into the text editor. +
+ +```yaml +apiVersion: v1 +kind: Service +metadata: + name: hello-universe-service +spec: + type: NodePort + selector: + app: hello-universe + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hello-universe-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: hello-universe + template: + metadata: + labels: + app: hello-universe + spec: + containers: + - name: hello-universe + image: ghcr.io/spectrocloud/hello-universe:1.0.12 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 +``` + +The screenshot below shows the manifest pasted into the text editor. Click on the **Confirm & Create** button to finish adding the manifest. + + ![A screenshot of Hello Universe application manifest.](/tutorials/edge/clusters_edge_deploy-cluster_add-manifest-file.png) + + +If there are no errors or compatibility issues, Palette displays the newly created full cluster profile for review. Verify the layers you added, and click on the **Next** button. +
+ +Review all layers and click **Finish Configuration** to create the cluster profile. +
+ +## Create a Cluster + +Click on the newly created cluster profile to view its details page. Click the **Deploy** button to deploy a new Edge cluster. +
+ +![Screenshot of the Profile Layers success.](/tutorials/edge/clusters_edge_deploy-cluster_profile-success.png) + +The cluster deployment wizard displays the following sections. +
+ +### Basic Information + +Use the following values in the **Basic Information** section. + +|**Field**|**Value**| +|---|---| +|Cluster name| docs-tutorial-cluster | +|Description| Cluster as part of the Edge tutorial.| +|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:false`| + +Click **Next** to continue. +
+ +### Parameters + +The **Parameters** section offers you another opportunity to change the profile configuration. For example, clicking on the **BYOS Edge OS 1.0.0** layer allows you to configure the `system.registry`, `system.repo`, and other available attributes. + +Use the default values for all attributes across all layers and click **Next**. +
+ +### Cluster configuration + +Provide the Virtual IP (VIP) address for the host cluster to use during the cluster configuration process. An Edge cluster virtual IP represents the entire cluster, and external clients or applications can use it to access services the Edge cluster provides. Ask your system administrator for an IP address you can use. It must be unique and not conflict with any other IP addresses in the network. + +If available, you can optionally select an SSH key to remote into the host cluster and provide a Network Time Protocol (NTP) server list. + +Click **Next** to continue. +
+ +### Nodes configuration + +In this section, you will use the Edge hosts to create the cluster nodes. Use one of the Edge hosts as the control plane node and the remaining two as worker nodes. In this example, the control plane node is called the master pool, and the set of worker nodes is the worker pool. + +Provide the following details for the master pool. + +|**Field** | **Value for the master-pool**| +|---| --- | +|Node pool name| master-pool | +|Allow worker capability| Checked | +|Additional Labels (Optional) | None | +|[Taints](../../cluster-management/taints.md#taints)|Off| +|Pool Configuration > Edge Hosts | Choose one of the registered Edge hosts.
Palette will automatically display the Nic Name for the selected host. | + +The screenshot below shows an Edge host added to the master pool. + +![Screenshot of an Edge host added to the master pool.](/tutorials/edge/clusters_edge_deploy-cluster_add-master-node.png) + + +Similarly, provide details for the worker pool, and add the remaining two Edge hosts to the worker pool. + +|**Field** | **Value for the worker-pool**| +|---| --- | +|Node pool name| worker-pool | +|Additional Labels (Optional) | None | +|Taints|Off| +|Pool Configuration > Edge Hosts | Choose one or more registered Edge hosts. | + +The screenshot below shows two Edge hosts added to the worker pool. + +![Screenshot of Edge hosts added to the worker pool.](/tutorials/edge/clusters_edge_deploy-cluster_add-worker-node.png) + +Click **Next** to continue. +
+ +### Settings + +This section displays options for OS patching, scheduled scans, scheduled backups, cluster role binding, and location. Use the default values, and click on the **Validate** button. +
+ +### Review + +Review all configurations in this section. The **Review** page displays the cluster name, tags, node pools, and layers. If everything looks good, click on the **Finish Configuration** button to finish deploying the cluster. Deployment may take up to *20 minutes* to finish. + +While deployment is in progress, Palette displays the cluster status as **Provisioning**. While you wait for the cluster to finish deploying, you can explore the various tabs on the cluster details page, such as **Overview**, **Workloads**, and **Events**. +
+ +## Validate + +In Palette, navigate to the left **Main Menu** and select **Clusters**. Select your cluster to display the cluster **Overview** page and monitor cluster provisioning progress. + + +When cluster status displays **Running** and **Healthy**, you can access the application from the exposed service URL with the port number displayed. One random port between 30000-32767 is exposed for the Hello Universe application. Click on the port number to access the application. + +The screenshot below highlights the NodePort to access the application. + +![Screenshot of highlighted NodePort to access the application.](/tutorials/edge/clusters_edge_deploy-cluster_access-service.png) + + + +Clicking on the exposed NodePort displays the Hello Universe application. +
+ +:::caution + +We recommend waiting to click on the service URL, as it takes one to three minutes for DNS to properly resolve the public NodePort URL. This prevents the browser from caching an unresolved DNS request. + +::: + + +![Screenshot of successfully accessing the Hello Universe application.](/tutorials/edge/clusters_edge_deploy-cluster_hello-universe.png) + + +You have successfully provisioned an Edge cluster and deployed the Hello Universe application on it. + +
+ +## Cleanup + +The following steps will guide you in cleaning up your environment, including the cluster, cluster profile, and Edge hosts. +
+ +### Delete Cluster and Profile + +In Palette, display the cluster details page. Click on the **Settings** button to expand the **drop-down Menu**, and select the **Delete Cluster** option, as shown in the screenshot below. + + +![Screenshot of deleting a cluster.](/tutorials/edge/clusters_edge_deploy-cluster_delete-cluster.png) + + +Palette prompts you to enter the cluster name and confirm the delete action. Type the cluster name to delete the cluster. The cluster status changes to **Deleting**. Deletion takes up to 10 minutes. + + +After you delete the cluster, click **Profiles** on the left **Main Menu**, and select the profile to delete. Choose the **Delete** option in the **three-dot Menu**, as shown in the screenshot below. + + +![Screenshot of deleting a cluster profile.](/tutorials/edge/clusters_edge_deploy-cluster_delete-profile.png) + + +Wait for Palette to successfully delete the resources. +
+ +### Delete Edge Hosts + +Switch back to the **CanvOS** directory in the Linux development environment containing the **.goenv** file, and use the following command to delete the Edge hosts. +
+ +```bash +docker run --interactive --tty --rm --env-file .goenv \ + ghcr.io/spectrocloud/tutorials:1.0.7 \ + sh -c "cd edge/vmware/clone_vm_template/ && ./delete-edge-host.sh" +``` + +
+ +### Delete Edge Artifacts + +If you want to delete Edge artifacts from your Linux development environment, delete the Edge installer ISO image and its checksum by issuing the following commands from the **CanvOS/** directory. +
+ +```bash +rm build/palette-edge-installer.iso +rm build/palette-edge-installer.iso.sha256 +``` + +Issue the following command to list all images in your current development environment. +
+ +```bash +docker images +``` + +Note the provider image name and tags, and use the following command syntax to remove all provider images. + +
+ +```bash +docker image rm --force ttl.sh/ubuntu:k3s-1.25.2-v3.4.3-demo +docker image rm --force ttl.sh/ubuntu:k3s-1.24.6-v3.4.3-demo +``` +
+ +### Delete VMware vSphere Resources + +Navigate to **Inventory** > **VMs and Templates** in your vSphere client. To delete the **palette-edge-template** VM template, right-click on it and choose **Delete** option from the **drop-down Menu**. + +Switch to the **Storage** view in your vSphere client. To delete the **palette-edge-installer.iso** file from the **packer_cache/** directory in the VMware vCenter datastore, right-click on it and choose **Delete** option from the **drop-down Menu**. +
+ +## Wrap-Up + +Building Edge artifacts allows you to prepare Edge hosts and deploy Palette-managed Edge clusters. Edge artifacts consist of an Edge installer ISO and provider images for all the Palette-supported Kubernetes versions. An Edge installer ISO assists in preparing the Edge hosts, and the provider image is used in the cluster profile. + +In this tutorial, you learned how to build Edge artifacts, prepare VMware VMs as Edge hosts using the Edge installer ISO, create a cluster profile referencing a provider image, and deploy a cluster. + +Palette's Edge solution allows you to prepare your Edge hosts with the desired OS, dependencies, and user data configurations. It supports multiple Kubernetes versions while building the Edge artifacts and creating cluster profiles, enabling you to choose the desired Kubernetes version for your cluster deployment. + +Before you plan a production-level deployment at scale, you can prepare a small set of Edge devices for development testing and to validate the devices' state and installed applications. Once the validation is satisfactory and meets your requirements, you can roll out Edge artifacts and cluster profiles for deployment in production. This approach maintains consistency while deploying Kubernetes clusters at scale across all physical sites, be it 1000 or more sites. In addition, you can use Palette to manage the entire lifecycle of Edge clusters. + +To learn more about Edge, check out the resources below. + +- [Build Edge Artifacts](../edgeforge-workflow/palette-canvos.md) + + +- [Build Content Bundle](../edgeforge-workflow/build-content-bundle.md) + + +- [Model Edge Native Cluster Profile](../site-deployment/model-profile.md) + + +- [Prepare Edge Hosts for Installation](../site-deployment/stage.md) + + +- [Perform Site Install](../site-deployment/site-installation/site-installation.md) \ No newline at end of file diff --git a/docs/docs-content/clusters/edge/site-deployment/model-profile.md b/docs/docs-content/clusters/edge/site-deployment/model-profile.md new file mode 100644 index 0000000000..39a735a2c1 --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/model-profile.md @@ -0,0 +1,220 @@ +--- +sidebar_label: "Model Edge Native Cluster Profile" +title: "Model Edge Native Cluster Profile" +description: "Instructions for creating an Edge Native Cluster Profile" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["edge"] +--- + + +[Cluster profiles](../../../cluster-profiles/cluster-profiles.md) contain the desired specifications the Kubernetes cluster Edge host makes up. The cluster profile defines the following components. + +- Kubernetes flavor and version + +- Operating system (OS) + +- Container network interface (CNI) + +- Container storage interface (CSI) + +You define these components in an Edge Native Infrastructure profile. As with any other environment in Palette, you can define additional add-on cluster profiles. You can use add-on profiles to define integrations or applications that must be included when Palette deploys the cluster. + + + +The following steps will guide you on how to create a cluster profile for Edge. Choose the workflow that best fits your needs. + +- [Custom OS](#custom-os) + +- [Without Custom OS](#without-custom-os) + + +## Custom OS + +### Prerequisites + +- Ensure all required provider images are created and uploaded to the respective registry. Refer to the EdgeForge [Build Edge Artifacts](../edgeforge-workflow/palette-canvos.md) guide for details. + + +### Enablement + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Choose the desired scope, project or **Tenant Admin**. + + +3. Navigate to the left **Main Menu** and select **Profiles**. + + +4. Click on **Add Cluster Profile**. + + +5. Provide **Basic Information**, such as profile name, description, and tags. Select **Full** and click on **Next**. + + +6. Select **Edge Native** as the **Cloud Type** and click on **Next**. + + +7. Select **Public Repo** in the **Registry field**. + + +8. Select **BYOS Edge OS** in the **Pack Name** field and the pack version. + + +9. Click on the code editor button **** to open up the editor + +
+ + ![A view of the Kubernetes pack editor with a YAML configuration](/clusters_site-deployment_model-profile_byoos-pack-yaml.png) + + +10. Update the `system.uri` parameter in the pack editor. Use the custom OS image you created in the EdgeForge process. Refer to the EdgeForge [Build Images](../edgeforge-workflow/palette-canvos.md) guide if you are missing a custom OS image. The following is an example configuration using a custom OS image. + + ```yaml + pack: + content: + images: + - image: '{{.spectro.pack.edge-native-byoi.options.system.uri}}' + # - image: example.io/my-other-images/example:v1.0.0 + # - image: example.io/my-super-other-images/example:v1.0.0 + #drain: + #cordon: true + #timeout: 60 # The length of time to wait before giving up, zero means infinite + #gracePeriod: 60 # Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used + #ignoreDaemonSets: true + #deleteLocalData: true # Continue even if there are pods using emptyDir (local data that will be deleted when the node is drained) + #force: true # Continue even if there are pods that do not declare a controller + #disableEviction: false # Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, use with caution + #skipWaitForDeleteTimeout: 60 # If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip. + + + options: + system.uri: example.io/my-images/example-custom-os:v1.4.5 + ``` + +
+ + :::info + + You can customize the node drainage behavior and specify additional images that you may have created that are part of the content bundle. Specify any additional image required by the cluster profile in the `images` section. Add an `- image: ` entry for each image you need to specify. Refer to the [BYOOS Pack](../../../integrations/byoos.md) resource to learn more about the pack details. + + ::: + + +11. Click on the **Next layer** button to continue. + + + +12. Complete the cluster profile creation process by filling out the remaining layers. + + +You have successfully created a cluster profile that you can use to deploy Edge clusters. + +### Validate + +Verify you created a cluster profile for Edge hosts by using the following steps. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Choose the desired scope, project or **Tenant Admin**. + + +3. Navigate to the left **Main Menu** and select **Profiles**. + + +4. Use the **Cloud Types** **drop-down Menu** and select **Edge Native**. + + +5. Your newly created cluster profile is displayed along with other cluster profiles of the same type. + + + + +## Without Custom OS + +:::caution + +This workflow is unavailable for new Edge clusters. Use the **Custom OS** tab to learn how to use a custom OS with your cluster profile. + +::: + + +### Prerequisites + +No prerequisites. + +### Enablement + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Choose the desired scope, project or **Tenant Admin**. + + +3. Navigate to the left **Main Menu** and select **Profiles**. + + +4. Click the **Add New Profile** button. + + +5. Provide the profile with a name, description, version, and tags. Select **Full** for the profile type. Click on **Next**. + + +6. Select **Edge Native** as the cloud type and click on **Next**. + + +7. In the profile layers screen, for the OS layer, choose the desired OS type and OS version. Click on **Next layer**. + + :::info + + You can select **Bring Your Own OS (BYOOS)** if you build your enterprise Edge artifacts. Specify the registry that hosts your provider images as the system URI. You can also provide additional cloud-init configurations in the OS pack YAML file to set up Edge host users, install other OS packages, install certificates, and more. Refer to the [Cloud-Init Stages](../edge-configuration/cloud-init.md) resource to learn more about the cloud-init stages. + + ::: + + +8. Choose the desired Kubernetes distribution and version. Click on **Next layer**. + + +9. Choose the desired CNI type and version. Click on **Next layer**. + + +10. Review and save your cluster profile. + +You now have a cluster profile you can use for deploying Edge hosts. + +Consider creating additional profiles with out-of-the-box packs for monitoring, security, authentication, or other capabilities. If you need remote access to the cluster, consider adding the [Spectro Proxy](../../../integrations/frp.md) pack to one of the add-on profiles. + +Optionally, add additional Helm or OCI registries and include applications hosted in those registries in add-on profiles. Check out the guide for adding a [Helm](../../../registries-and-packs/helm-charts.md) or [OCI](../../../registries-and-packs/oci-registry.md) registry to learn more. + +### Validate + +Verify you created a cluster profile for Edge hosts by using the following steps. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Choose the desired scope, project or **Tenant Admin**. + + +3. Navigate to the left **Main Menu** and select **Profiles**. + + +4. Select **Edge Native** as the cloud type. + + +You can view your newly created cluster profile on the **Cluster Profiles** page. + + + + + + +## Next Steps + +Your next step in the deployment lifecycle is to prepare the Edge host for the installation. Use the [Prepare Edge Hosts for Installation](../site-deployment/stage.md) guide to continue. + +
diff --git a/docs/docs-content/clusters/edge/site-deployment/site-deployment.md b/docs/docs-content/clusters/edge/site-deployment/site-deployment.md new file mode 100644 index 0000000000..a241b24f41 --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/site-deployment.md @@ -0,0 +1,53 @@ +--- +sidebar_label: "Deployment" +title: "Deployment" +description: "Learn about the Palette Edge installation process." +hide_table_of_contents: false +sidebar_position: 20 +tags: ["edge"] +--- + +The Edge host deployment process consists of four phases described in the following table. + +| Phase| Description| +| ---| ---| +| Model Cluster Profile | The process of creating a [cluster profile](../../../cluster-profiles/cluster-profiles.md) for the host cluster that will be made up of Edge hosts. | +| Install Handoff | The Edge Installer is copied over from a portable storage device to the Edge host's hard disk. This step is typically performed in the preparation step. Refer to [Prepare Edge Hosts for Installation](../site-deployment/stage.md) to learn more.| +| Registration | The Edge host is registered with Palette. The Edge host will remain in this phase until the registration process is complete.| +|Cluster Provisioning | The Edge host boots into the specified provider OS and proceeds with the cluster deployment.| + + +Review the following guides in sequential order to successfully deploy an Edge host. + +
+ +1. [Model Edge Native Cluster Profile](../site-deployment/model-profile.md) + + +2. [Prepare Edge Hosts for Installation](../site-deployment/stage.md) + + +3. [Perform Site Install](../site-deployment/site-installation/site-installation.md) + + :::info + + In a lab environment, you must perform all the steps. In a non-learning environment, these steps are typically performed by people with different roles. The Palette Edge lifecycle is explained in detail in the [lifecycle](../edge-native-lifecycle.md) resource, highlighting the various roles involved. + + ::: + + +## Resources + +- [Model Cluster Profile](model-profile.md) + + +- [Prepare Edge Hosts for Installation](stage.md) + + +- [Perform Site Install](site-installation/site-installation.md) + + +- [Register Edge Host](site-installation/edge-host-registration.md) + + +- [Create Cluster Definition](site-installation/cluster-deployment.md) diff --git a/docs/docs-content/clusters/edge/site-deployment/site-installation/_category_.json b/docs/docs-content/clusters/edge/site-deployment/site-installation/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/site-installation/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/clusters/edge/site-deployment/site-installation/cluster-deployment.md b/docs/docs-content/clusters/edge/site-deployment/site-installation/cluster-deployment.md new file mode 100644 index 0000000000..8802a28945 --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/site-installation/cluster-deployment.md @@ -0,0 +1,146 @@ +--- +sidebar_label: "Create Cluster Definition" +title: "Create Cluster Definition" +description: "Define your Edge cluster using the Edge hosts that are registered and available." +hide_table_of_contents: false +sidebar_position: 30 +tags: ["edge"] +--- + + + +To complete the Edge Installation process, an Edge host must become a member of a host cluster. You can add an Edge host to an existing host cluster of type Edge Native, or you can create a new host cluster for Edge hosts and make the Edge host a member. + +Select the workflow that best fits your needs. + +- [Create an Edge Native Host Cluster](#create-an-edge-native-host-cluster) + +- [Add an Edge Host to a Host Cluster](#add-an-edge-host-to-a-host-cluster) + + +## Create an Edge Native Host Cluster + + + +Use the following steps to create a new host cluster so that you can add Edge hosts to the node pools. + +### Prerequisites + +- A registered Edge host. + +### Create Cluster + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Click on **Add New Cluster**. + + +4. Choose **Edge Native** for the cluster type and click **Start Edge Native Configuration**. + + +5. Give the cluster a name, description, and tags. Click on **Next**. + + +6. Select a cluster profile. If you don't have a cluster profile for Edge Native, refer to the [Create Edge Native Cluster Profile](../model-profile.md) guide. Click on **Next** after you have selected a cluster profile. + +7. Review your cluster profile values and make changes as needed. Click on **Next**. + + +8. Provide the host cluster with the Virtual IP (VIP) address used by the physical site. You can also select any SSH keys in case you need to remote into the host cluster. You can also provide a list of Network Time Protocol (NTP) servers. Click on **Next**. + + +9. The node configuration page is where you can specify what Edge hosts make up the host cluster. Assign Edge hosts to the **master-pool** and the **worker-pool**. When you have completed configuring the node pools, click on **Next**. + + +10. The Settings page is where you can configure a patching schedule, security scans, backup settings, and set up Role-Based Access Control (RBAC). Review the settings and make changes if needed. Click on **Validate**. + + +11. Review the settings summary and click on **Finish Configuration** to deploy the cluster. + +After you create the cluster, the Palette Edge Host agent will start the installation process. You can track the installation progress in Palette. The cluster overview page displays a summary of the progress. Use the *Events* tab to review detailed logs. + +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the host cluster you created to view its details page. + + +4. Review the **Cluster Status**. Ensure the **Cluster Status** field displays **Running**. + +You can also use the command `kubectl get nodes` to review the status of all nodes in the cluster. Check out the [Access Cluster with CLI](../../../cluster-management/palette-webctl.md) guide to learn how to use `kubectl` with a host cluster. + + + +## Add an Edge Host to a Host Cluster + +You can add Edge hosts to the node pool of an existing host cluster. Use the following steps to add the Edge host to the node pool. + +### Prerequisites + +- A registered Edge host. + +- A host cluster of type Edge Native. + +:::caution + +When adding a new Edge host to an existing cluster, ensure you are not creating a scenario where [etcd](https://etcd.io/) could fail in establishing a quorum. Quorum failures typically result when there is an even number of nodes. +To learn more, check out the resource from the etcd documentation titled [Why an odd number of cluster members](https://etcd.io/docs/v3.3/faq/#why-an-odd-number-of-cluster-members). + +::: + +### Add Edge Host to Node Pool + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Use the **Cloud Types drop-down Menu** and select **Edge Native**. + + +4. Select the host cluster to add the registered Edge host. + + +5. Click on the **Nodes** tab. + + +6. Select the node pool to add the Edge host and click the **Edit** button. + + +7. Navigate to the **Edge Hosts drop-down Menu** and select your Edge host. + + +8. Confirm your changes. + +The Palette Edge Host agent will start the installation process. You can track the installation progress in Palette. The cluster overview page displays a summary of the progress. Use the **Events** tab to review detailed logs. + +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the host cluster you created to view its details page. + + +4. Review the **Cluster Status**. Ensure the **Cluster Status** field displays **Running**. + +You can also use the command `kubectl get nodes` to review the status of all nodes in the cluster. Check out the [Access Cluster with CLI](../../../cluster-management/palette-webctl.md) to learn how to use `kubectl` with a host cluster. + + + + + + diff --git a/docs/docs-content/clusters/edge/site-deployment/site-installation/create-registration-token.md b/docs/docs-content/clusters/edge/site-deployment/site-installation/create-registration-token.md new file mode 100644 index 0000000000..15a9d5d77c --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/site-installation/create-registration-token.md @@ -0,0 +1,75 @@ +--- +sidebar_label: "Create a Registration Token" +title: "Create a Registration Token" +description: "Learn how to create a tenant registration token for Edge host registrations." +hide_table_of_contents: false +sidebar_position: 10 +tags: ["edge"] +--- + + +To successfully register an Edge host with Palette you must provide the Edge Installer with a tenant registration token. To create a registration token, use the following steps. + + +## Prerequisites + +- Tenant admin access. + +
+ +## Create Token + +1. Log into [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Switch to the tenant scope. + + +3. Navigate to the left **Main Menu** and select **Settings**. + + +4. Select **Registration Tokens** in the **Tenant Settings Menu**. + + +5. Click **Add New Registration Token**. + + +6. Fill out the input fields for and **Confirm** your changes. + + - **Registration Token Name** - Used to name the token. + + - **Description** - An optional value used to explain the context usage of the token. + + - **Default Project** - Set a default project for Edge host registration. + + - **Expiration Date** - Set an expiration date for the token. + + + + +7. Save the **Token** value. + +
+ +## Validate + + +1. Log into [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Switch to the tenant scope. + + +3. Navigate to the left **Main Menu** and select **Settings**. + + +4. Select **Registration Tokens** in the **Tenant Settings Menu**. + + +5. Validate the tenant registration token is available + +
+ +## Next Steps + +The next stage in the Edge host site installation process is registering the Edge host. Go ahead and review the instructions in the [Register Edge Host](edge-host-registration.md) guide. \ No newline at end of file diff --git a/docs/docs-content/clusters/edge/site-deployment/site-installation/edge-host-registration.md b/docs/docs-content/clusters/edge/site-deployment/site-installation/edge-host-registration.md new file mode 100644 index 0000000000..6334530978 --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/site-installation/edge-host-registration.md @@ -0,0 +1,288 @@ +--- +sidebar_label: "Register Edge Host" +title: "Register Edge Host" +description: "Learn how to register your edge hosts with the Palette Management Console" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["edge"] +--- + + +To use an Edge host with a host cluster, you must first register it with Palette. A registration token in the user data is required to complete the registration process. You have three options to register the Edge host with Palette. + +| **Method** | **Description** | **Set up Effort** | +|---|---|---| +| Auto Registration | Edge hosts can automatically register with Palette by using a *Registration Token*. This method requires you to specify the registration token in the user data. | Low | +| Manual Registration | You can manually enter a unique Edge host ID into Palette. | Low | +| QR Code | Scan a QR code that takes you to a web application that registers the Edge host with Palette. This method is considered advanced with the benefit of simplifying the Edge host registration without needing a tenant token or a manual entry.| High | + + +:::caution + +A registration token is required for the Edge host registration process. Without the registration token, the registration process will be unable to complete. Review the [Create Registration Token](create-registration-token.md) guide for steps on how to create a tenant registration token. + +::: + + +## Registration Method + +To register the Edge host, you are required to use a registration token with all three registration options. Edge hosts are registered under the default project chosen for the registration token. You can override the default project by specifying the project in the Edge Installer [user data](../../edge-configuration/installer-reference.md) configuration file. + +By default, devices automatically register during the site installation process when a tenant registration token value is present. Set the parameter `disableAutoRegister` to `true` in the Edge Installer configuration to disable auto registration and require manual device registration. + + +
+ + +```yaml +stylus: + site: + edgeHostToken: MjBhNTQxYzZjZTViNGFhM2RlYTU3ZXXXXXXXXXX + disableAutoRegister: true +``` + + +
+ +Select the registration method that best fits your organizational needs and review the steps to get started. + +- [Auto Registration](#auto-registration) + +- [Manual Registration](#manual-registration) + +- [QR Code Registration](#qr-code-registration) + + + +### Auto Registration + + + +You can automate the registration process by using registration tokens. + +If you selected a default project for the registration token, that is the project the Edge host will be registered under. You can override the default project by providing a project name in the user data. + + +
+ +```yaml +stylus: + site: + paletteEndpoint: api.spectrocloud.com + edgeHostToken: yourEdgeRegistrationTokenHere +``` + +#### Prerequisites + +- Tenant admin access. + + +- A tenant registration token is required. Refer to the [Create a Registration Token](create-registration-token.md) guide for more information. + +#### Create Registration Token + +To create a registration token, use the following steps. + +
+ +1. Log into [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Switch to the tenant scope. + + +3. Navigate to the left **Main Menu** and select **Settings**. + + +4. Select **Registration Tokens** in the **Tenant Settings Menu**. + + + +5. Click **Add New Registration Token**. + + + +6. Fill out the input fields and **Confirm** your changes. + + +7. Save the **Token** value. + + +Your next step is to decide how you want to provide the registration token. You can include the registration token in the user data added to the device before shipping. Or you can create a user data ISO and have the registration token in the secondary user data. Check out the [Apply Site User Data](site-user-data.md) resource to learn more about creating site-specific user data. + +#### Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the **Edge Hosts** tab. + + +Your Edge host is displayed and marked as **Registered** in the Edge hosts list. + + + +### Manual Registration + +In this mode, you must manually register the Edge host in Palette by providing the Edge host's unique identifier. Optionally, you can specify a project name to associate the Edge host with a particular project. + + +Use the following steps to manually register an Edge host in Palette. + +#### Prerequisites + +- Tenant admin access + + +- A tenant registration token is required. Refer to the [Create a Registration Token](create-registration-token.md) guide for more information. + + +- Access to the Edge host's unique identifier. You can get the unique identifier or machine ID from the console output as the Edge host powers on. The Edge host unique identifier has the default prefix `edge-`. + + Example Output: + ```shell + time="2022-11-03T11:30:10Z" level=info Msg="starting stylus reset plugin" + time="2022-11-03T11:30:102" level=info Msg="reset cached site name from uuid, cached: edge-03163342f7f0e6fe20de095ed8548c93" + time="2022-11-03T11:30:102" level=info Msg="reset cached site name from unid, new: edge-9e8e3342bafa9eb6d45f81c1f6714ea2" MachineD: edge-9eBe3342bafaeb6d45f81c1fb714ea2 + time="2022-11-03T11:30:192" level=info Msg="MachineIP: 10.239.10.145" + ``` + +:::info + + You can also specify an Edge host's unique identifier in the user data by using the `stylus.site.Name` parameter. Refer to the [Installer Configuration](../../edge-configuration/installer-reference.md) resource to learn more about available configuration parameters. + +::: + +#### Register the Edge Host in Palette + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the **Edge Hosts** tab. + + +4. Click on **Add Edge Hosts**. + + +5. Paste the Edge host's unique identifier in the **Edge Host IDs** input box. + + +6. Specify any tags or pairing keys if you desire. + + +7. Confirm your changes to register the Edge host. + +#### Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the **Edge Hosts** tab. + + +Your Edge host is displayed and marked as **Registered** in the Edge hosts list. + + +### QR Code Registration + +You can provide a QR case-based automated registration to simplify the registration process. Upon boot up, a QR code is displayed on the Edge host's console if enabled during the installation phase. + +Site operators scan the QR code to visit the registration page. This web page pre-populates the Edge host's unique ID in the web app and provides a list of edge sites they can associate with this edge host. + +Site operators can select a site and submit a registration request. The web application automatically creates the Edge host entry in Palette and defines a cluster with that Edge host. This workflow also supports adding Edge hosts to an existing host cluster. + +#### Prerequisites + +- A tenant registration token is required. Refer to the [Create a Registration Token](create-registration-token.md) guide for more information. + + +- Access to the Spectro Cloud GitHub repository that hosts the Palette Edge Registration App. Contact our sales team at [sales@spectrocloud.com](mailto:sales@spectrocloud.com) to gain access. + + +- Sufficient permissions to enable third-party integrations with a GitHub repository. + + +- A [Vercel](https://vercel.com/) account or a similar serverless website hosting service. + + +- Experience deploying and maintaining web applications to serverless website hosting services. + + +- git v2.39.0 or greater. + +#### Enable Palette Edge Registration App + + +We provide you with a sample serverless application, the Palette Edge Registration App. The Palette Edge Registration App is built on Next.js and deployed using the Vercel platform. + +Use the following steps to enable this workflow. + +
+ + +1. Clone the repository. + + +2. Configure Vercel or your hosting provider to [automatically deploy](https://vercel.com/docs/concepts/deployments/git) pull requests against the main branch. + + +3. Update the sample site provided with your site names and locations. Make the required changes in the **pages/index.js** file. The **readme** file provides additional details about the files to be changed and instructions on how to build and test the application locally. + + +4. Map the infrastructure and add-on cluster profiles to be used with each site. Refer to the [Model Edge Native Cluster Profile](../model-profile.md) to learn more about Edge Native cluster profiles. + + +5. Specify site-specific Virtual IP (VIP) addresses or DNS values for each site. + + +6. Compile and test the code locally. + + +7. Create GitHub pull request towards your main branch to automatically trigger the build process and deploy the app. + + +8. Provide the URL of the deployed app to the Edge Installer user data. Use the `stylus.site.registrationURL` parameter. + +
+ + ```yaml + stylus: + site: + paletteEndpoint: api.spectrocloud.com + registrationURL: https://edge-registration-url.vercel.app + ``` + +9. Your next step is to decide how you want to provide the registration URL value. You can include the registration URL in the user data added to the device before shipping. Or you can create a user data ISO and have the registration URL in the secondary user data. Check out the [Perform Site Install](site-user-data.md) to learn more about creating site specific user data. + + +10. Power on the Edge host device and scan the QR code. + + +11. Fill out the required information in the web application and submit the registration request. + +#### Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the **Edge Hosts** tab. + + +Your Edge host is displayed and marked as **Registered** in the Edge hosts list. + + +## Next Steps + +The next step in the installation process is to add the Edge host to a cluster or to create an Edge Native host cluster. Check out the [Create Cluster Definition](cluster-deployment.md) guide to complete the last step of the installation process. diff --git a/docs/docs-content/clusters/edge/site-deployment/site-installation/site-installation.md b/docs/docs-content/clusters/edge/site-deployment/site-installation/site-installation.md new file mode 100644 index 0000000000..9a53740b41 --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/site-installation/site-installation.md @@ -0,0 +1,161 @@ +--- +sidebar_label: "Perform Site Install" +title: "Perform Site Install" +description: "Learn how to deploy the Palette Edge installer on your edge hosts " +hide_table_of_contents: false +tags: ["edge"] +--- + +You perform a site installation by powering on the Edge host. The Edge Installer will start and begin the installation process, which may vary depending on your environment and Edge host type. + + + +The Edge host site installation has three stages, as described in the table. + +| Phase| Description| Required | +| ---| ---| --- | +| Apply Site User Data | As described in the [Multiple User Data Use Case](../../edgeforge-workflow/prepare-user-data.md#multiple-user-data-use-case), you can apply a secondary Edge Installer configuration user date to apply additional settings or override global values. This is optional but may be required for certain use cases. Refer to the [Apply Site User Data](site-user-data.md) guide to learn more. | No | +| Registration | The Edge host is registered with Palette. The Edge host will remain in this phase until the registration process is complete. The *Registration* phase has a unique set of instructions. Refer to [Register Edge Host](edge-host-registration.md) for guidance.| Yes| +|Cluster Provisioning | The Edge host boots into the specified provider Operating System and proceeds with the cluster deployment. You can find the instructions in the [Create Cluster Definition](cluster-deployment.md) resource | Yes | + + +## Installation + +Use the following steps to complete the Edge host installation. + + +:::info + +The community resource, [Painting with Palette](https://www.paintingwithpalette.com/tutorials/) has a great Edge Native tutorial. + +::: + + +Select the target environment for your Edge host. + +- [Bare Metal](#bare-metal) + +- [VMware](#vmware) + + + +### Bare Metal + +#### Prerequisites + +- Access to Palette and the ability to register an Edge host. + +- Access to network information about the physical site, specifically the network Virtual IP Address (VIP). + +- Physical access to the Edge host. + +#### Site Install + +1. If you have a site-specific user data ISO, then insert the USB stick into the Edge host. + + +2. Power on the Edge host. The Edge host will boot into registration mode where it will connect with the Palette endpoint that was specified in the user data. + + +3. The Edge host will remain in a wait mode until you register the device in Palette. Review the [Register Edge Host](edge-host-registration.md) documentation to learn more about each registration method. + +
+ + :::info + + Once the Edge host is registered, Palette will wait for you to create a host cluster and assign the Edge host to the cluster. + + ::: + +4. The last step is to create a cluster definition if you don't have a host cluster that the Edge host can join. Follow the steps in the [Create Cluster Definition](cluster-deployment.md) to complete the site installation. + +When the cluster is created, the installation process will continue. The Palette Edge Host agent will download all required artifacts and reboot the Edge host. + +When the Edge host finishes rebooting, the *Cluster Provisioning* phase begins. In this phase, the system boots into the OS defined in the cluster profile, and cluster configuration begins. Kubernetes components are initialized and configured based on the specifications in the cluster profile. + +Any content bundles you provided are extracted and loaded into the container runtime process. Refer to the [EdgeForge Workflow](../../edgeforge-workflow/edgeforge-workflow.md) to learn more about content bundles. Any [cloud-init](../../edge-configuration/cloud-init.md) stages defined in the OS pack will also be invoked as the OS initializes. + +#### Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the host cluster you created to view its details page. + + +4. Review the **Cluster Status**. Ensure the **Cluster Status** field displays **Running**. + +You can also use `kubectl` to issue commands against the cluster. Check out the [Access Cluster with CLI](../../../cluster-management/palette-webctl.md#access-cluster-with-cli) to learn how to use `kubectl` with a host cluster. + +### VMware + +Use the following steps to complete the Edge host installation in a VMware environment. + +#### Prerequisites + +- Access to Palette and the ability to register an Edge host. + +- Access to network information about the physical site, specifically the network Virtual IP (VIP) address . + +- Physical access to the Edge host. + +- An Edge Installer OVF template. Check out the [Prepare Edge Hosts for Installation](../stage.md) for guidance on how to create an Edge Installer OVF template. + +#### Site Install + +Perform the following steps to proceed with the installation at the site in your VMware environment. + +
+ +1. Log in to vCenter Server using the vSphere Client. + + +2. Navigate to **VMs and Templates** and right-click on the desired folder, then select the option **Deploy VM(s) from this OVF template**. + + +3. Specify the location of the OVF template and start the deployment. + + +4. Proceed through the installation steps and deploy the virtual machine. + + +5. The VM will start up in the registration phase and wait for you to register the Edge host with Palette. If you provided the Edge Installer user data with an `EdgeHostToken` then the Edge host will automatically register with Palette. Otherwise, the Edge host will wait until you manually register the device in Palette. Go ahead and register the Edge host with Palette. Review the [Register Edge Host](edge-host-registration.md) for additional guidance. + +
+ + :::info + + Once the Edge host is registered, Palette will wait for you to create a host cluster and assign the Edge host to the cluster. + + ::: + +6. The last step is to create a cluster if you don't have a host cluster that the Edge host can join. Follow the steps in the [Create Cluster Definition](cluster-deployment.md) to complete the site installation. + +When the cluster is created, the installation process continues. The Palette Edge Host agent will download all required artifacts and reboot the Edge host. + +After the reboot, the *Cluster Provisioning* phase begins. In this phase, the system boots into the OS defined in the cluster profile, and cluster configuration begins. Kubernetes components are initialized and configured based on the specifications in the cluster profile. + +Any content bundles you provided are extracted and loaded into the container runtime process. Refer to the [EdgeForge Workflow](../../edgeforge-workflow/edgeforge-workflow.md) to learn more about content bundles. Any [cloud-init](../../edge-configuration/cloud-init.md) stages defined in the OS pack will also be invoked as the OS initializes. + +#### Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the host cluster you created to view its details page. + + +4. Review the **Cluster Status**. Ensure the **Cluster Status** field displays **Running**. + +You can also use `kubectl` to issue commands against the cluster. Check out the [Access Cluster with CLI](../../../cluster-management/palette-webctl.md) to learn how to use `kubectl` with a host cluster. + + +## Next Steps + +Your Edge host is now registered with Palette and is part of a host cluster. You can repeat the steps from [Prepare Edge Host for Installation](../stage.md) and [Perform Site Install](./site-installation.md) for any additional Edge host you want to add to the host cluster. The next step is for you to become more familiar with Day-2 responsibilities. Check out the [Manage Clusters](../../../cluster-management/cluster-management.md) guide to learn more about Day-2 responsibilities. diff --git a/docs/docs-content/clusters/edge/site-deployment/site-installation/site-user-data.md b/docs/docs-content/clusters/edge/site-deployment/site-installation/site-user-data.md new file mode 100644 index 0000000000..fdcbfb5a88 --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/site-installation/site-user-data.md @@ -0,0 +1,80 @@ +--- +sidebar_label: "Apply Site User Data" +title: "Apply Site User Data" +description: "Learn how to create a secondary Edge Installer configuration user data." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["edge"] +--- + +You can provide site-specific Edge Installer configuration user data if you need to apply new values or override default values from the Edge Installer user data you created in the [Prepare Edge Hosts for Installation](../stage.md) step or, as often referenced, the *Installer Handoff* phase. + +Use the following steps to create an ISO file containing the additional user data. You will load the newly created ISO to a bootable device, such as a USB stick. + +## Prerequisites + +- A bootable device such as a USB drive, or a Preboot Execution Environment (PXE) server. + +- `mkisofs`, or `genisoimage`, or similar ISO management software. + +- `cdrtools` or `wodim` for Windows. + +## Create ISO + +1. Create a file called **user-data** that contains the additional configurations you want to override or inject. + +
+ + ```shell + touch user-data + ``` + +2. Create an empty **meta-data** file: + +
+ + ```shell + touch meta-data + ``` + +3. Create an ISO using the following command. + + MacOS/Linux: + + ```shell + mkisofs -output site-user-data.iso -volid cidata -joliet -rock user-data meta-data + ``` + + Windows: + + ```shell + genisoimage -output site-user-data.iso -volid cidata -joliet -rock user-data meta-data + ``` + + This generates an ISO file called site-user-data.iso in the current directory. +
+ +4. Copy the ISO to a bootable device such as a USB drive. + +
+ + :::info + + You can use several software tools to create a bootable USB drive, such as [balenaEtcher](https://www.balena.io/etcher). For a PXE server, there are open-source projects such as [Fog](https://fogproject.org/download) or [Windows Deployment Services](https://learn.microsoft.com/en-us/windows/deployment/wds-boot-support) for Windows. + + ::: + + +5. Once the Edge host arrives at the physical site. Load the USB drive to the Edge host before powering it on. The Edge Installer will apply the new user data during the installation process. + +
+ + + +## Validate + +You can validate that the ISO image is not corrupted by attempting to flash a bootable device. Most software that creates a bootable device will validate the ISO image before the flash process. + +## Next Steps + +Before you register your Edge host with Palette you must have a tenant registration token. Review the [Create Registration Token](create-registration-token.md) guide for steps on how to create a tenant registration token. diff --git a/docs/docs-content/clusters/edge/site-deployment/stage.md b/docs/docs-content/clusters/edge/site-deployment/stage.md new file mode 100644 index 0000000000..d62644d9f9 --- /dev/null +++ b/docs/docs-content/clusters/edge/site-deployment/stage.md @@ -0,0 +1,252 @@ +--- +sidebar_label: "Prepare Edge Hosts for Installation" +title: "Prepare Edge Hosts for Installation" +description: "Learn how to prepare edge hosts for installation before shipping them out to site for site installation" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["edge"] +--- + + +In this step, you will prepare the Edge host for installation. You will copy the following items to the storage device of the Edge host. + +- The Edge Installer image. + + +- The Edge Installer user data. + + +- Content bundles. + +:::info + +If you need to create any of the items mentioned above, review the [EdgeForge Workflow](../edgeforge-workflow/edgeforge-workflow.md) to learn how to create your own Edge artifacts. + +::: + +You can ship your Edge hosts after you complete this step. Use the following steps to prepare your Edge host for installation. + + +## Prepare Edge Host + +Pick the target environment for your Edge host. + +- [Bare Metal](#bare-metal) + +- [VMware](#vmware) + + + +## Bare Metal + +### Prerequisites + +- Edge Installer ISO file. Check out the [EdgeForge Workflow](/clusters/edge/edgeforge-workflow/palette-canvos/) to learn how to create an Edge Installer image or use the default Edge Installer image. + +- A Bare Metal appliance with USB drives. + + +- The ability to modify the boot order settings to boot from a USB drive. + + +- A USB disk containing the installer ISO + + +The following items are optional and not required but may apply to your use case: + +
+ +- USB disk that contains a user data ISO. This is applicable in [multiple user data](../edgeforge-workflow/prepare-user-data.md) scenarios where you want to override or provide additional configurations after the Edge host is powered on at the physical site. + + +- USB disk containing the content bundle ISO. You can avoid this by creating a custom installer. Refer to the [Build Edge Artifacts](../edgeforge-workflow/palette-canvos.md) guide. + + + +### Installer Handoff + +1. Insert the USB drive containing the Edge Installer ISO and potentially your user data. + + +2. If you created a content bundle and loaded it to a USB disk, then insert the content bundle USB drive. + + +3. Power on the Edge host. + + +4. Wait for the Edge Installer to complete copying content to the hard drive. The Edge host will reboot by default upon completion unless you specify a different option in the Edge Installer configuration user data. + +5. Repeat steps one through four for all Edge hosts. + + +6. Remove the USB disks and ship your Edge host devices to the site for installation. + + +### Validate + +You can validate that the Edge host is ready for the site installation by simulating a site deployment on one of the Edge hosts. The simulation process will require you to complete the installation process and reset the device after the validation. + + +## VMware + +You will create a Virtual Machine Disk (VMDK) from the Edge Installer ISO and upload it to a vCenter environment. In the vCenter environment, you will convert the VMDK to a VM template, and export it out as an OVF template. + +
+ + +### Prerequisites + +- Edge Installer ISO file. Check out the [build images](../edgeforge-workflow/palette-canvos.md) guide to learn how to create an Edge Installer image or use the default Edge Installer image. + + +- vCenter environment with sufficient resources and access privileges to complete the following actions: + - Upload files to a datastore. + - Ability to create VMs. + + + +### Installer Handoff + +1. Log in to vCenter Server by Using the vSphere Client. + + +2. Prepare a build server by launching a VM with Ubuntu version 20.04 or greater in your VMware environment. + + +3. Issue the following commands to prepare your server for VMDK creation. + +
+ + ```shell + apt update + apt install qemu qemu-kvm \ + libvirt-clients libvirt-daemon-system bridge-utils virt-manager + systemctl enable --now libvirtd systemctl enable --now virtlogd + mkdir -p /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + apt update + apt install docker-ce docker-ce-cli containerd.io + docker-compose-plugin + ``` + + If you need a graphical user interface (GUI), add `x11-apps` to the `apt install` command. + +
+ + ```shell + apt install x11-apps + ``` + +4. You can add additional packages for content creation, compression, and preparing your workspace. + +
+ + ```shell + curl -L -o - "https://github.com/vmware/govmomi/releases/latest/download/govc_$( uname -s)_$(uname -m).tar.gz" | tar -C /usr/local/bin -xvzf - govc + mkdir -p ~/workspace/ + cd workspace/ + git clone https://github.com/spectrocloud/stylus-image-builder.git + ``` + + If you need ZSTD for compression or `govc` for interacting with vCenter, use the following command. + + ```shell + apt install zstd govc + ``` + +5. Build the VMDK from the Edge Installer ISO to serve as a template for deploying Edge hosts to virtual machines. Issue the following commands on your build server. + + ```shell + cd ~/workspace/stylus-image-builder/ + chmod +x entrypoint.sh + export ISO_URL=[your-installer-name].iso + export PALETTE_ENDPOINT=[your tenant].spectrocloud.com + export REGISTRATION_URL=[QR Code registration app link] + export EDGE_HOST_TOKEN=[token generated on Palette Portal] + export DISK_SIZE=100000M + EMBED=false make docker-build + nohup make vmdk & + ``` + + :::info + + If you are using a *Tenant Registration Token* for auto-registration, you can omit the environment variable `REGISTRATION_URL`. + + ::: + + A VMDK file was generated in the **stylus-image-builder/images** folder. Rename this VMDK to a preferred installer name. Ensure the VMDK file retains the `.vmdk` extension. + + +6. Transfer the VMDK to a datastore in your VMware environment. Review the commands below and ensure you replace the placeholders with the respective values from your environment. + + ```shell + export GOVC_URL=https://[IP address OR the DNS of vCenter] + export GOVC_USERNAME=[vcenter username] + export GOVC_PASSWORD=[vcenter password] + govc datastore.upload -ds=[datastore name] images/[your-installer-name].vmdk [folder in datastore]]/[your-installer-name].vmdk + govc datastore.cp -ds=[datastore name] [folder in datastore]]/[your-installer-name].vmdk [folder in datastore]]/[your-installer-name]-uncompressed.vmdk + ``` + + If you are using test or development environments, you may need to enable the following option. This environment variable is not recommended for production environments. + +
+ + ```shell + export GOVC_INSECURE=1 + ``` + +7. Create a VM from the VMDK by logging into your vCenter console in the UI. + + +8. Navigate to the **Dataceter/Folder**, under the **VMs and Templates** section. + + +9. Start the **New Virtual** machine deployment wizard. + + +10. Choose a cluster that has access to the datastore used for storing the VMDK. Choose the datastore where VMDK is stored. + + + +11. Select **Ubuntu Linux (64)** as your guest OS version. This is required even though you will be launching an RHEL based clusters + + + +12. Select the Hardware settings. + + + +13. Delete the hard drive displayed by default. Add a new device of the type **Existing Hard Disk**. For this device select the option **Datastore ISO file**. + + + +14. Navigate to the datastore folder with the uncompressed VMDK and select the VMDK. + + + +15. Finish the creation wizard and save your virtual machine. + + +16. Navigate to **VMs and Templates** and right-click on the newly created VM. Select **Template** and **Convert to Template**. + + + +17. Navigate to **VMs and Templates** and right-click on the newly created VM Template. Select **Export to OVF Template**. + + + +You can ship this OVF template along with the Edge host to the physical site. Use the OVM template for the site installation. + +### Validate + +You can validate that the Edge host is ready for the site installation by simulating a site deployment on one of the Edge hosts. The simulation process will require you to complete the installation process and reset the device after the validation. + + + + +## Next Steps + +Now that you have completed the staging process, you can ship the Edge hosts to the destination site. Proceed to the [Perform Site Install](site-installation/site-installation.md) step. \ No newline at end of file diff --git a/docs/docs-content/clusters/imported-clusters/_category_.json b/docs/docs-content/clusters/imported-clusters/_category_.json new file mode 100644 index 0000000000..e7e7c54966 --- /dev/null +++ b/docs/docs-content/clusters/imported-clusters/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 40 +} diff --git a/docs/docs-content/clusters/imported-clusters/attach-add-on-profile.md b/docs/docs-content/clusters/imported-clusters/attach-add-on-profile.md new file mode 100644 index 0000000000..890212445d --- /dev/null +++ b/docs/docs-content/clusters/imported-clusters/attach-add-on-profile.md @@ -0,0 +1,80 @@ +--- +sidebar_label: "Attach an Add-on Profile" +title: "Attach an Add-on Profile" +description: "Learn how to attach an add-on profile to an imported cluster in Palette." +hide_table_of_contents: false +sidebar_position: 10 +tags: ["clusters", "imported clusters"] +--- + + +Imported clusters lack the ability for Palette to manage the core layers found in a cluster profile, such as the Operating System, Kubernetes distribution and version, along with the container network interface and storage interface. +You can, however, use add-on cluster profiles to deploy additional software dependencies into your cluster and have Palette manage these dependencies through the normal cluster profile lifecycle. + + +In this how-to, you learn how to add an add-on cluster profile to an imported cluster. + + +## Prerequisites + +* An imported cluster with full permissions. Refer to the [Migrate to Full Permissions](migrate-full-permissions.md) to learn how to migrate an imported cluster from read-only mode to full-permissions mode. + + +* An add-on cluster profile. Refer to the [Create an Add-on Profile](../../cluster-profiles/create-add-on-profile.md) to learn how to create an add-on cluster profile. + + +## Attach an Add-on Profile + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + + +3. Select your imported cluster to access its details page. + + +4. From the cluster details page, select the **Profile** tab and click on **Attach Profile**. + +
+ + ![The cluster details view when the profile tab is selected](/clusters_imported-clusters_attach-add-on-profile_cluster-details-profile-tab.png) + +
+ +5. Select an add-on profile and click on **Confirm**. + + +6. In the following screen, you can update the add-on profile if desired. Click on **Save** to deploy the add-on cluster profile. + + +7. Navigate to the **Overview** tab to monitor the deployment. When the add-on cluster profile is deployed, the **Cluster Profile** status displays as a green circle next to the layer. +
+ + ![A cluster profile with an add-on profile deployed successfully](/clusters_imported-clusters_attach-add-on-profile_cluster-details-app-deployed.png) + + +
+ + + +You now have an add-on cluster profile deployed onto your imported cluster. Use the steps above to add your custom add-on cluster profile to an imported cluster. + + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. Navigate to left **Main Menu** and select **Clusters**. + + + +3. Select your imported cluster to access its details page. + + + +4. Verify the **Cluster Profile** section of the page has a green circle next to each layer. If your application exposes a service URL, use the URL to visit the application and verify it's operational. + diff --git a/docs/docs-content/clusters/imported-clusters/cluster-import.md b/docs/docs-content/clusters/imported-clusters/cluster-import.md new file mode 100644 index 0000000000..3130794a2e --- /dev/null +++ b/docs/docs-content/clusters/imported-clusters/cluster-import.md @@ -0,0 +1,236 @@ +--- +sidebar_label: "Import a Cluster" +title: "Import a Cluster" +description: "Learn how to import clusters and which Palette operations you can use to manage them." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["clusters", "imported clusters"] +--- + +When importing a cluster into Palette, you can select the mode you want Palette to use when managing the cluster. You can choose between read-only mode or full permission. Refer to the [Imported Clusters](imported-clusters.md#import-modes) reference page to learn more about each mode. + + +Select the mode you want to use when importing a cluster into Palette. + +- [Full Permissions](#full-permissions) + +- [Read-Only Mode](#read-only-mode) + + +## Full Permissions + +### Prerequisites + +- Kubernetes version >= 1.19.X + + +- Ensure your environment has network access to Palette SaaS or your self-hosted Palette instance. + + +- Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed and available in your local workstation. + + +- Access to your cluster environment through kubectl. + + +### Import a Cluster + +1. Log in to [Palette](https://spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Click on **Add New Cluster** and select **Import Cluster** in the pop-up box. + + +4. Fill out the required information and make your selections: + * Cluster Name - The name of the cluster you want to import. + * Cloud Type - Select the infrastructure environment your cluster resides in. Select **Generic** if the environment list doesn't contain your specific environment, but be aware of the limitations with generic clusters. + * Proxy - Optional and only available for generic clusters. Specify a network proxy address or DNS value. + * No Proxy - Optional and only available for generic clusters. Specify a no proxy address or DNS value. + +5. Select **Full-permission mode** and click on **Create & Open Cluster Instance** to start the import. + + + + +6. You will be redirected to the cluster details page. A set of instructions with commands is displayed on the right side of the screen. You will need to issue the following commands to complete the import process. + +
+ + + ![A view of the cluster details page with the sidebar instructions box](/clusters_imported-clusters_full-permissions-instructions.png) + +
+ +7. We recommend you install the metrics server so that Palette can expose and provide you with information about the cluster. Installing the metrics server is not required but is needed for Palette to expose cluster metrics. To enable the metrics server, open a terminal session and issue the commands below against the Kubernetes cluster you want to import. + +
+ + ```shell + helm repo add bitnami https://charts.bitnami.com/bitnami && \ + helm install my-release bitnami/metrics-server + ``` + +8. To install the Palette agent, issue the command displayed in the cluster details page **Install the agent** section against the Kubernetes cluster you want to import. The command is customized for your cluster as it contains the assigned cluster ID. Below is an example output of the install command. + +
+ + ```hideClipboard shell + kubectl apply --filename https://api.spectrocloud.com/v1/spectroclusters/6491d4a94c39ad82d3cc30ae/import/manifest + ``` + + Output + ```hideClipboard shell + namespace/cluster-6491d4a94c39ad82d3cc30ae created + serviceaccount/cluster-management-agent created + clusterrole.rbac.authorization.k8s.io/read-only-mode created + clusterrolebinding.rbac.authorization.k8s.io/read-only-mode created + configmap/log-parser-config created + configmap/upgrade-info-8kfc2m8mt8 created + configmap/version-info-kbk5hk992f created + secret/spectro-image-pull-secret created + priorityclass.scheduling.k8s.io/spectro-cluster-critical created + deployment.apps/cluster-management-agent-lite created + configmap/cluster-info created + configmap/hubble-info created + secret/hubble-secrets created + ``` + +9. When the Palette agent completes initializing, the cluster import procedures at right will disappear, and your cluster will transition to **Running** status within a few minutes. + +
+ + ![A view of an imported cluster's details page](/clusters_imported-clusters_full-permissions.png) + +
+ +You now have imported a cluster into Palette with full permissions. + + + +### Validate + +1. Log in to [Palette](https://spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select your imported cluster from the cluster list. + + +4. Review the **Cluster Status** row from the cluster details view. A successful cluster import will have the cluster status **Running**. + + +## Read-only Mode + +### Prerequisites + +- Kubernetes version >= 1.19.X + + +- Ensure your environment has network access to Palette SaaS or your self-hosted Palette instance. + + +- Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed and available in your local workstation. + + +- Access to your cluster environment through kubectl. + +### Import a Cluster + +1. Log in to [Palette](https://spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Click on **Add New Cluster** and select **Import Cluster** in the pop-up box. + + +4. Fill out the required information and make the proper selections: + * Cluster Name - The name of the cluster you want to import. + * Cloud Type - Select the infrastructure environment your cluster resides in. Select **Generic** if the environment list doesn't contain your specific environment but be aware of the limitations with generic clusters. + * Proxy - Optional and only available for generic clusters. Specify a network proxy address or DNS value. + * No Proxy - Optional and only available for generic clusters. Specify a no proxy address or DNS value. + +5. Select **Read-only mode** and click on **Create & Open Cluster Instance** to start the import action. + + +6. You will be redirected to the cluster details page. A set of instructions with commands is displayed on the right-hand side of the screen. You will need to issue the following commands to complete the import process. + +
+ + + ![A view of the cluster details page with the sidebar instructions box](/clusters_imported-clusters_read-only-instructions.png) + +
+ +7. We recommend you install the metrics server so that Palette can expose and provide you with information about the cluster. Installing the metrics server is not required but is needed for Palette to expose cluster metrics. Open a terminal session and issue the commands below against the Kubernetes cluster you want to import if you want to enable the metrics server. + +
+ + ```shell + helm repo add bitnami https://charts.bitnami.com/bitnami && \ + helm install my-release bitnami/metrics-server + ``` + + +8. To install the Palette agent, issue the command displayed in the cluster details page **Install the** read-only agent** section against the Kubernetes cluster you want to import. The command is customized for your cluster as it contains the assigned cluster ID. Below is an example output of the install command. + +
+ + ```hideClipboard shell + kubectl apply --filename https://api.spectrocloud.com/v1/spectroclusters/6491d4a94c39ad82d3cc30ae/import/manifest + ``` + + Output + ```hideClipboard shell + namespace/cluster-6491d4a94c39ad82d3cc30ae created + serviceaccount/cluster-management-agent created + clusterrole.rbac.authorization.k8s.io/read-only-mode created + clusterrolebinding.rbac.authorization.k8s.io/read-only-mode created + configmap/log-parser-config created + configmap/upgrade-info-8kfc2m8mt8 created + configmap/version-info-kbk5hk992f created + secret/spectro-image-pull-secret created + priorityclass.scheduling.k8s.io/spectro-cluster-critical created + deployment.apps/cluster-management-agent-lite created + configmap/cluster-info created + configmap/hubble-info created + secret/hubble-secrets created + ``` + +9. Once the Palette agent completes the initialization, the side view drawer on the right will disappear, and your cluster will transition to a status of **Running** after a few moments. + +
+ + ![A view of an imported cluster's details page](/clusters_imported-clusters_read-only.png) + +
+ + +You now have imported a cluster into Palette in read-only mode. Keep in mind that a cluster imported in read-only mode has limited capabilities. You can migrate to full permissions anytime by clicking **Migrate To Full Permissions**. + + + +### Validate + +1. Log in to [Palette](https://spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select your imported cluster from the cluster list. + + +4. Review the **Cluster Status** row from the cluster details view. A successful cluster import displays cluster status as **Running**. + + + +## Next Steps + +Depending on what mode you selected for the migration, your next step is to either [Attach an Add-on Profile](attach-add-on-profile.md) or you can [Migrate to Full Permissions](migrate-full-permissions.md). diff --git a/docs/docs-content/clusters/imported-clusters/imported-clusters.md b/docs/docs-content/clusters/imported-clusters/imported-clusters.md new file mode 100644 index 0000000000..004fd2160c --- /dev/null +++ b/docs/docs-content/clusters/imported-clusters/imported-clusters.md @@ -0,0 +1,101 @@ +--- +sidebar_label: "Imported Clusters" +title: "Imported Clusters" +description: "Learn how to manage imported clusters and what operations are supported with Palette." +hide_table_of_contents: false +sidebar_custom_props: + icon: "cloud-arrow-down" +tags: ["clusters", "imported clusters"] +--- + +Existing Kubernetes clusters not deployed through Palette can be imported into Palette for visibility, limited Day -2 management, and additional capabilities such as application lifecycle management. You can import Kubernetes clusters from various infrastructure providers, such as public and private clouds and bare-metal environments. + +Palette supports importing _generic_ or _cloud-specific_ clusters. Cloud-specific clusters enable more functionality because Palette understands how to interact with the infrastructure provider's API. Cloud-specific clusters provide the same experience as Palette deployed clusters. + +The generic type is for a cluster that is deployed in an environment where Palette lacks integration with the underlying infrastructure provider's API. Palette can support basic operations for generic clusters, such as reporting metrics, conducting scans, scheduling backups, and applying and managing add-on profiles. However, Day-2 activities are not supported in generic clusters. + + + Refer to the [Supported Infrastructure Providers](imported-clusters.md#supported-infrastructure-providers) section to learn more about supported infrastructure environments. + + +To get started with a cluster import, refer to the [Import a Cluster](cluster-import.md) guide to learn more. + +## Import Modes + +To determine Palette's control over the imported cluster, you can choose the management mode you prefer. Refer to the table below for more information on each mode. + + +| Mode | Description | +|---|---| +| Read-only| This mode allows you to access information about the cluster, such as event logs, cost, and health checks. The read-only mode does not support Day-2 activities. | +| Full Permission| This mode provides full cluster management, depending on the cluster, generic, or cloud-specific. This mode also supports the ability to deploy add-on cluster profiles. | + + + +## Supported Infrastructure Providers + + +The following infrastructure providers are supported for cluster imports. If an environment is not listed below, select the **Generic** type when importing a cluster. + + +
+ +| Infrastructure Provider | Type | +|---|---| +| AWS | Cloud Specific | +| Azure | Cloud Specific | +| Google Cloud Platform | Cloud Specific | +| VMware | Cloud Specific | +|OpenShift |Cloud Specific | +| AWS EKS-Anywhere | Cloud Specific | +| Generic| Generic| + + +
+ +### Self-Hosted Support + +Self-hosted Palette also supports importing clusters. You must ensure network connectivity is available between the target import cluster and the Palette instance. + +
+ +## Limitations + +A few restrictions apply to all cluster imports that you need to be aware of before importing a cluster. + +
+ +| Limitation | Description| +|---|---| +| Full Cluster Profile usage| You cannot use a full cluster profile. You are limited to using add-on profiles when deploying cluster profiles to imported clusters.| +| Kubeconfig file access| You cannot download the cluster's kubeconfig file from Palette. You must use the underlying infrastructure provider to access the kubeconfig file.| + + +
+ + +:::caution + +Imported generic clusters lack many Day-2 management operations such as scaling nodes, adding worker pools, or any operations that require Palette to have knowledge of the underlying infrastructure. + +::: + +
+ +## Delete Imported Cluster + +You can remove a cluster by following the standard cluster removal steps. Refer to the [Delete a Cluster](../cluster-management/remove-clusters.md) for instructions. Be aware that Palette will not delete the actual cluster. Palette will remove the link to the imported cluster and instruct the Palette agent to remove itself from the cluster and all of the agent's dependencies that were installed during the import process. To delete the cluster, you must manually perform the delete action in the hosting infrastructure provider. + + +## Resources + +- [Import a Cluster](cluster-import.md) + + +- [Attach an Add-on Profile](attach-add-on-profile.md) + + +- [Migrate to Full Permissions](migrate-full-permissions.md) + + + diff --git a/docs/docs-content/clusters/imported-clusters/migrate-full-permissions.md b/docs/docs-content/clusters/imported-clusters/migrate-full-permissions.md new file mode 100644 index 0000000000..dbd8110143 --- /dev/null +++ b/docs/docs-content/clusters/imported-clusters/migrate-full-permissions.md @@ -0,0 +1,114 @@ +--- +sidebar_label: "Migrate to Full Permissions" +title: "Migrate to Full Permissions" +description: "Learn how to migrate an imported cluster from read-only mode to full-permissions mode." +hide_table_of_contents: false +sidebar_position: 20 +tags: ["clusters", "imported clusters"] +--- + +## Prerequisites + +* An imported cluster in read-only mode. Refer to the [Import a Cluster](cluster-import.md) guide to learn how to import a cluster into Palette. + + +* Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed and available in your local workstation. + + +- Access to your cluster environment through kubectl. + + +## Migrate to Full Permissions + +1. Log in to [Palette](https://spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + + +3. Select your imported cluster from the cluster list. + + +4. From the cluster details page, click on **Migrate To Full Permissions** to start the migration process. A prompt will ask you to confirm your decision. Select **OK**. + +
+ + ![The cluster details view with an arrow pointing to the migrate button](/clusters_imported-clusters_migrate-full-permissions_cluster-details-page.png) + +
+ +5. A side view drawer will slide out from the right side of the cluster details page. Copy the displayed command to your clipboard. + + + +6. Open a terminal on your local workstation and validate you are in the correct Kubernete context. You can use the following command to verify the Kubernetes context. If you find yourself in an incorrect Kubernetes context, switch to the proper context so you are interacting with the imported cluster when using kubectl. + +
+ + ```shell + kubectl config current-context + ``` + +7. Issue the command you copied in your terminal to start the migration. Your terminal output will look similar to the example output below. + +
+ + ```hideClipboard shell + namespace/cluster-6495ea8d4c39b720c58a5f5f configured + serviceaccount/cluster-management-agent unchanged + clusterrolebinding.rbac.authorization.k8s.io/cma-lite-cluster-admin-binding created + configmap/log-parser-config unchanged + configmap/upgrade-info-8kfc2m8mt8 unchanged + configmap/version-info-kbk5hk992f unchanged + secret/spectro-image-pull-secret unchanged + priorityclass.scheduling.k8s.io/spectro-cluster-critical configured + deployment.apps/cluster-management-agent-lite configured + configmap/cluster-info unchanged + configmap/hubble-info unchanged + secret/hubble-secrets configured + customresourcedefinition.apiextensions.k8s.io/awscloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/azurecloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/clusterprofiles.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/coxedgecloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/edgecloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/edgenativecloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/gcpcloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/libvirtcloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/maascloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/nestedcloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/openstackcloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/packs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/spectroclusters.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/tencentcloudconfigs.cluster.spectrocloud.com created + customresourcedefinition.apiextensions.k8s.io/vspherecloudconfigs.cluster.spectrocloud.com created + serviceaccount/palette-manager created + clusterrolebinding.rbac.authorization.k8s.io/palette-lite-cluster-admin-binding created + configmap/palette-version-info-dd8mkdffbt created + priorityclass.scheduling.k8s.io/palette-spectro-cluster-critical created + deployment.apps/palette-lite-controller-manager created + job.batch/palette-import-presetup-job created + ``` + + +8. In a few minutes, the side drawer will disappear, and the **Profile**, **Workloads**, **Scan**, and **Backups** tabs will become unlocked and available for interaction. + +
+ + ![A cluster details page with an imported cluster after a completed migration](/clusters_imported-clusters_migrate-full-permissions_cluster-details-page-import-complete.png) + + +You now have successfully migrated a read-only mode cluster to full-permissions mode. Imported clusters in full-permissions mode allow Palette to manage more Day-2 activities. You can also now deploy add-on cluster profiles to the cluster. Refer to the [Attach an Add-on Profile](attach-add-on-profile.md) guide to learn more. + +## Validate + +1. Log in to [Palette](https://spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select your imported cluster from the cluster list. + + +4. Review the **Cluster Status** row from the cluster details view. A successful cluster import displays cluster status as **Running**. \ No newline at end of file diff --git a/docs/docs-content/clusters/palette-virtual-clusters/_category_.json b/docs/docs-content/clusters/palette-virtual-clusters/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/clusters/palette-virtual-clusters/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md b/docs/docs-content/clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md new file mode 100644 index 0000000000..3fd36c99ab --- /dev/null +++ b/docs/docs-content/clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md @@ -0,0 +1,116 @@ +--- +sidebar_label: "Configure OIDC for a Virtual Cluster" +title: "Configure OIDC for a Virtual Cluster" +description: "Learn how to configure OIDC for Palette Virtual Clusters." +icon: "" +hide_table_of_contents: false +sidebar_position: 5 +tags: ["clusters", "cluster groups", "virtual clusters"] +--- + + +Enabling OpenID Connect (OIDC) Identity Provider (IDP) for Palette Virtual Clusters offers several benefits. First, it enables single sign-on, allowing you to log in once and access multiple virtual clusters without the need for separate authentication. Second, it facilitates centralized user management, making it easier to manage user accounts, permissions, and access control in a single location. Finally, OIDC integration allows for seamless integration with third-party identity providers, ensuring consistent authentication and authorization across the infrastructure. Overall, enabling OIDC enhances security, simplifies user management, and provides a seamless authentication experience for users accessing virtual clusters. + + +## Prerequisites + +- A healthy host cluster that you will use to create a cluster group. + +- A cluster group. Review [Create and Manage Cluster Groups](../../clusters/cluster-groups/create-cluster-group.md) for guidance. + +:::caution + +We recommend configuring a cluster group with OIDC *before* you create virtual clusters. This will ensure that OIDC information from the cluster group configuration is properly inserted in the kubeconfig file that is generated for the virtual cluster. + +::: + +- The OIDC issuer URL, OIDC client ID, and OIDC secret. You can obtain this information from your identity provider. + +- At a minimum, the `cluster.delete` permission to access the generated kubeconfig. For more information, check out [Kubeconfig files](../cluster-management/kubeconfig.md#kubeconfig-files). If you are deploying virtual clusters, you need the `clusterGroup.update` permission. + +- [kubelogin](https://github.com/int128/kubelogin) installed. This is a kubectl plugin for Kubernetes OIDC authentication, also known as `kubectl oidc-login`. + + +## Configure OIDC + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + +2. Navigate to the left **Main Menu** and click on **Cluster Groups**. + + If a cluster group does not exist, you will need to create a host cluster and then create the cluster group. Refer to [Create and Manage Cluster Groups](../../clusters/cluster-groups/create-cluster-group.md) for guidance. + +3. Select the cluster group to which you will deploy a virtual cluster. + +4. In the **Host Clusters** tab that displays on the next page, click on **Settings**. + +5. In the slide panel that opens, select the **Settings** option. The cluster group YAML file displays. + +6. Locate the `vcluster.extraArgs` parameter section of the cluster group configuration file and uncomment the lines shown in the example. + +![Screenshot of the cluster group YAML showing oidc-related parameters to uncomment and update.](/clusters_palette-virtual-clusters_configure-vcluster-oidc.png) + +7. Update the `vcluster.extraArgs` section with your identity provider information. + + ```yaml + vcluster: + extraArgs: + - --kube-apiserver-arg=”oidc-issuer-url=" + - --kube-apiserver-arg="oidc-client-id=" + - --kube-apiserver-arg="oidc-username-claim=email" + - --kube-apiserver-arg="oidc-groups-claim=groups" + ``` + +8. If your identity provider requires a client secret, uncomment the `oidc-client-secret` parameter in the `clientConfig` section of cluster group YAML file, and add the client secret. + + ```yaml + clientConfig: + oidc-client-secret: secret-value + ``` + +9. From the **User Menu**, switch to *App Mode*. From the left **Main Menu**, click on **Virtual Clusters**. + +10. Deploy a virtual cluster to the cluster group that you configured with OIDC. For steps, review the [Deploy a Virtual Cluster](../palette-virtual-clusters/deploy-virtual-cluster.md#deploy-a-virtual-cluster) guide. + + :::info + + If the cluster group is part of a project, you can deploy a virtual cluster in *Cluster Mode*. From the **Main Menu**, click on **Cluster Groups** and select the **Virtual Clusters** tab. + + ::: + + When the virtual cluster is finished deploying and in **Running** state, a kubeconfig file is generated that contains OIDC information inserted into it from the cluster group configuration. + +11. Use the **Kubeconfig** link that displays on the virtual clusters overview page to download the kubeconfig file. This will give you access to the Kubernetes cluster. + + From the **User Menu**, switch to *App Mode*, click on **Virtual Clusters** and select the virtual cluster. In *Cluster Mode* navigate to the **Main Menu**, click on **Cluster Groups**, select the **Virtual Clusters** tab, and select the virtual cluster. + +12. Create the proper Kubernetes *roles* and *roleBindings* required to map the OIDC users with a Kubernetes role. Refer to the [Create Role Bindings](../cluster-management/cluster-rbac.md#create-role-bindings) for additional guidance. + +:::caution + +Configuring OIDC requires you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](../cluster-management/cluster-rbac.md#create-role-bindings). Refer to [Use RBAC with OIDC](../../integrations/kubernetes.md#use-rbac-with-oidc) for an example. + +::: + + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + +2. Navigate to your virtual clusters. In *Cluster Mode*, if the cluster group is part of a project, navigate to the left **Main Menu**, click on **Cluster Groups** and select the **Virtual Clusters** tab. Alternatively, you can switch to *App Mode* from the **User Menu** and select **Virtual Clusters**. + +3. Select the virtual cluster you deployed to the cluster group that has OIDC configured. + +4. Use the **Kubeconfig** link to download the kubeconfig file, and ensure you can connect to the cluster. Refer to the [Kubectl](../cluster-management/palette-webctl.md) guide for detailed steps. + + +## Resources + +- [Create and Manage Cluster Groups](../../clusters/cluster-groups/create-cluster-group.md) + +- [Deploy a Virtual Cluster](../palette-virtual-clusters/deploy-virtual-cluster.md#deploy-a-virtual-cluster) + +- [Create Role Bindings](../cluster-management/cluster-rbac.md#create-role-bindings) + +- [Use RBAC with OIDC](../../integrations/kubernetes.md#use-rbac-with-oidc) + +- [Kubectl](../cluster-management/palette-webctl.md) \ No newline at end of file diff --git a/docs/docs-content/clusters/palette-virtual-clusters/deploy-virtual-cluster.md b/docs/docs-content/clusters/palette-virtual-clusters/deploy-virtual-cluster.md new file mode 100644 index 0000000000..0acc58c700 --- /dev/null +++ b/docs/docs-content/clusters/palette-virtual-clusters/deploy-virtual-cluster.md @@ -0,0 +1,95 @@ +--- +sidebar_label: "Deploy a Virtual Cluster to a Cluster Group" +title: "Deploy a Virtual Cluster to a Cluster Group" +description: "Learn how to add Palette Virtual Clusters to a Cluster Group" +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["clusters", "cluster groups", "virtual clusters"] +--- + + +You can deploy Palette Virtual Clusters to a [cluster group](../cluster-groups/cluster-groups.md). The advantages of a virtual cluster environment are: +- You can operate with admin-level privileges while ensuring strong isolation. +- Virtual clusters reduce operational overhead and improve resource utilization. + +Use the following steps to deploy a virtual cluster. + +## Prerequisites + +- A Spectro Cloud account. + +- A cluster group. Refer to the [Create and Manage Cluster Groups](/clusters/cluster-groups/create-cluster-group) guide to learn how to create a cluster group. + +- Attach any required policies in your cloud account that must be added to your virtual cluster deployment. + - For AWS, refer to the [Required IAM Policies](../public-cloud/aws/required-iam-policies.md#global-role-additional-policies) documentation. + - For Azure, no additional policies are required. + +
+ + :::info + + Palette does not support _Usage_ and _Cost_ metrics for Virtual Clusters running on Google Kubernetes Engine (GKE). + + ::: + +## Add Node-Level Policies in your Cloud Account + +In some situations additional node-level policies must be added to your deployment. + +To add node-level policies: + +1. In **Cluster Mode**, switch to the **Tenant Admin** project. + + +2. Select **Tenant Settings** in the **Main Menu**. + + +3. Click **Cloud Accounts** and ensure **Add IAM policies** is enabled for your cloud account. If an account does not already exist, you must add one. + + +4. You can specify any additional policies to include in virtual clusters deployed with this cloud account. + + - For AWS, add the **AmazonEBSCSIDriver** policy so that the virtual clusters can access the underlying host cluster's storage. Check out the [Palette required IAM policies](../public-cloud/aws/required-iam-policies.md) documentation to learn more about additional IAM policies. + + +5. Confirm your changes. + +## Deploy a Virtual Cluster + +Follow these steps to deploy a virtual cluster to a cluster group: + +1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. + + +2. Select **Virtual Clusters** from the left **Main Menu**. + + +3. Click the **+New Virtual Cluster**. + + +4. Select your cluster group from the **Select cluster group drop-down Menu**, and type a name for the virtual cluster. + + +5. Assign the CPU, Memory, and Storage size for the cluster. + + +6. Deploy the cluster. + + +## Validate + +To validate that your virtual cluster is available and ready for use, log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. Select **Virtual Clusters** from the left **Main Menu**. Your cluster is ready for use if the status is **Running**. + + +## Resources + +- [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) + +- [CPU resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu) + +- [Memory resource units](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory) + +- [Amazon EBS CSI driver - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) + +- [Creating the Amazon EBS CSI driver IAM role for service accounts - Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/csi-iam-role.html) diff --git a/docs/docs-content/clusters/palette-virtual-clusters/palette-virtual-clusters.md b/docs/docs-content/clusters/palette-virtual-clusters/palette-virtual-clusters.md new file mode 100644 index 0000000000..4e546250e9 --- /dev/null +++ b/docs/docs-content/clusters/palette-virtual-clusters/palette-virtual-clusters.md @@ -0,0 +1,35 @@ +--- +sidebar_label: "Palette Virtual Clusters" +title: "Create Palette Virtual Clusters" +description: "Create virtual clusters in Palette" +hide_table_of_contents: false +sidebar_custom_props: + icon: "nodes" +tags: ["clusters", "virtual clusters"] +--- + + +Palette Virtual Clusters are nested Kubernetes clusters within a Host Cluster. Virtual clusters share the host cluster resources, such as CPU, memory, storage, container network interface (CNI), and container storage interface (CSI). By default, virtual clusters use [k3s](https://github.com/k3s-io/k3s), a highly available, certified Kubernetes distribution designed for production workloads. + +Palette provisions and orchestrates virtual clusters to make the lightweight Kubernetes technology stack and tools ecosystem available to you. Deploy virtual clusters on both new and imported Host Clusters and attach application profiles. + +Palette also supports Day-2 operations such as upgrades, backup, and restore to keep virtual clusters secure, compliant, and up to date. Additionally, Palette provides visibility into the workloads deployed inside your virtual clusters and the associated costs. + +## Get Started + + +To get started, refer to [Add Virtual Clusters to a Cluster Group](deploy-virtual-cluster.md). + + +## Network Connectivity + +Virtual clusters support two network endpoint types: Load Balancer and Ingress. The network endpoint type determines how virtual clusters are exposed to external traffic. You specify the network endpoint type in Cluster Group Settings. + +- **Load Balancer**: The Host Cluster must support dynamic provisioning of load balancers, either via a Cloud Controller Manager in the public cloud or a bare metal load balancer provider such as MetalLB. + +- **Ingress**: The Nginx Ingress Controller must be deployed on the Host Cluster with SSL passthrough enabled. This allows TLS termination to occur at the virtual cluster's Kubernetes API server. + + A wildcard DNS record must be configured that maps to the load balancer associated with the NGINX Ingress Controller. For example: + + `*.myapp.mydomain.com` + diff --git a/docs/docs-content/clusters/public-cloud/_category_.json b/docs/docs-content/clusters/public-cloud/_category_.json new file mode 100644 index 0000000000..3fca6fb9f9 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 0 +} diff --git a/docs/docs-content/clusters/public-cloud/aws/_category_.json b/docs/docs-content/clusters/public-cloud/aws/_category_.json new file mode 100644 index 0000000000..3fca6fb9f9 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/aws/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 0 +} diff --git a/docs/docs-content/clusters/public-cloud/aws/add-aws-accounts.md b/docs/docs-content/clusters/public-cloud/aws/add-aws-accounts.md new file mode 100644 index 0000000000..5a18387b51 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/aws/add-aws-accounts.md @@ -0,0 +1,249 @@ +--- +sidebar_label: "Register and Manage AWS Accounts" +title: "Add an AWS Account to Palette" +description: "Learn how to add an AWS account to Palette." +hide_table_of_contents: false +tags: ["public cloud", "aws", "iam"] +sidebar_position: 10 +--- + + +Palette supports integration with AWS Cloud Accounts. This also includes support for [AWS GovCloud (US)](https://aws.amazon.com/govcloud-us/?whats-new-ess.sort-by=item.additionalFields.postDateTime&whats-new-ess.sort-order=desc) accounts. This section explains how to create an AWS cloud account in Palette. You can use any of the following authentication methods to register your cloud account. + +- AWS + - [Static Access Credentials](#static-access-credentials) + - [Dynamic Access Credentials](#dynamic-access-credentials) +- AWS GovCloud + - [Static Access Credentials](#static-access-credentials-1) + - [Dynamic Access Credentials](#dynamic-access-credentials-1) + + + +## AWS Account + +
+ +### Static Access Credentials + +To add an AWS cloud account using static access credentials follow these steps: + +#### Prerequisites + +- An AWS account +- Sufficient access to create an IAM role or IAM user. +- Palette IAM policies. Please review the [Required IAM Policies](required-iam-policies.md) section for guidance. + + +#### Add AWS Account to Palette +1. Create an IAM Role or IAM User for Palette. Use the following resources if you need additional help. + - [IAM Role creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html). + - [IAM User creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). + + +2. In the AWS console, assign the Palette required IAM policies to the role or the IAM user that Palette will use. + + +3. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. + + +4. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add AWS Account**. + + +5. In the cloud account creation wizard provide the following information: + * **Account Name:** Custom name for the cloud account. + + * **Description:** Optional description for the cloud account. + * **Partition:** Choose **AWS** from the drop-down menu. + + * **Credentials:** + * AWS Access key + * AWS Secret access key + + +6. Click the **Validate** button to validate the credentials. + +7. Once the credentials are validated, the **Add IAM Policies** toggle displays. Toggle **Add IAM Policies** on. + +8. A drop-down menu displays a lists of available AWS IAM policies in your AWS account. Select any desired IAM policies you want to assign to Palette IAM role or IAM user. + + +#### Validate + +You can validate the account is available in Palette by reviewing the list of cloud accounts. To review the list of cloud accounts navigate to the left **Main Menu**. Click on **Tenant Settings**. Next, click on **Cloud Accounts**. Your newly added AWS cloud account is listed under the AWS sections. + + + +### Dynamic Access Credentials + +To add an AWS cloud account using STS credentials follow the steps below: + +#### Prerequisites + +- An AWS account +- Sufficient access to create an IAM role or IAM user. +- Palette IAM policies. Please review the [Required IAM Policies](required-iam-policies.md) section for guidance. + + +#### Add AWS Account to Palette + +1. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. + + +2. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add AWS Account**. + + +3. In the cloud account creation wizard give the following information: + * **Account Name** + * **Description** + * Select **STS** authentication for validation: + + +4. You will be provided with information on the right hand-side of the wizard. You will need this information to create an IAM Role for Palette. The following table lists out the information provided by the wizard after your selects **STS**. + + |**Parameter**|**Description**| + |---------|---------------| + |**Trusted Entity Type**| Another AWS account| + |**Account ID**|Copy the Account ID displayed on the UI| + |**Require External ID**| Enable| + |**External ID**|Copy the External ID displayed on the UI| + |**Permissions Policy**|Search and select the 4 policies added in step #2| + |**Role Name**|SpectroCloudRole| + +5. In the AWS console, create a new IAM role for Palette. Use the following resources if you need additional help. + - [IAM Role creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html). + - [IAM User creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). + + +6. In the AWS console, assign the [Palette required IAM policies](required-iam-policies.md) to the role that Palette will use. + + +7. In the AWS console, browse to the **Role Details** page and copy the Amazon Resource Name (ARN) for the role. + + +8. In Palette, paste the role ARN into the **ARN** input box. + + +9. Click the **Validate** button to validate the credentials. + + +#### Validate + +You can validate the account is available in Palette by reviewing the list of cloud accounts. To review the list of cloud accounts navigate to the left **Main Menu**. Click on **Tenant Settings**. Next, click on **Cloud Accounts**. Your newly added AWS cloud account is listed under the AWS sections. + + + + + + +## AWS GovCloud Account + +Palette supports integration with [AWS GovCloud (US)](https://aws.amazon.com/govcloud-us/?whats-new-ess.sort-by=item.additionalFields.postDateTime&whats-new-ess.sort-order=desc). Using Palette you can deploy Kubernetes clusters to your AWS GovCloud account. To get started with AWS GovCloud and Palette, use the following steps. +
+ +### Static Access Credentials + +#### Prerequisites + +- An AWS account +- Sufficient access to create an IAM role or IAM user. +- Palette IAM policies. Please review the [Required IAM Policies](required-iam-policies.md) section for guidance. + +#### Add AWS GovCloud Account to Palette + +1. Create an IAM Role or IAM User for Palette. Use the following resources if you need additional help. + - [IAM Role creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html). + - [IAM User creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). + + +2. In the AWS console, assign the Palette required IAM policies to the role or the IAM user that Palette will use. + + +3. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. + + +4. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add AWS Account**. + + +5. In the cloud account creation wizard provide the following information: + * **Account Name:** Custom name for the cloud account. + + * **Description:** Optional description for the cloud account. + * **Partition:** Choose **AWS GovCloud US** from the drop-down menu. + + * **Credentials:** + * AWS Access key + * AWS Secret access key + + +6. Click the **Validate** button to validate the credentials. + +7. Once the credentials are validated, the **Add IAM Policies** toggle displays. Toggle **Add IAM Policies** on. + +8. A drop-down menu displays a lists of available AWS IAM policies in your AWS account. Select any desired IAM policies you want to assign to Palette IAM role or IAM user. + + +#### Validate + +You can validate the account is available in Palette by reviewing the list of cloud accounts. To review the list of cloud accounts navigate to the left **Main Menu**. Click on **Tenant Settings**. Next, click **Cloud Accounts**. Your newly added AWS cloud account is listed under the AWS sections. + +### Dynamic Access Credentials + +To add an AWS GovCloud cloud account using STS credentials follow the steps below: + +#### Prerequisites + +- An AWS account +- Sufficient access to create an IAM role or IAM user. +- Palette IAM policies. Please review the [Required IAM Policies](required-iam-policies.md) section for guidance. + + +#### Add AWS GovCloud Account to Palette + +1. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. + + +2. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add AWS Account**. + + +3. In the cloud account creation wizard give the following information: + * **Account Name** + * **Description** + * Select **STS** authentication for validation: + + +4. You will be provided with information on the right hand-side of the wizard. You will need this information to create an IAM Role for Palette. The following table lists out the information provided by the wizard after you selects **STS**. + + |**Parameter**|**Description**| + |---------|---------------| + |**Trusted Entity Type**| Another AWS account| + |**Account ID**|Copy the Account ID displayed on the UI| + |**Require External ID**| Enable| + |**External ID**|Copy the External ID displayed on the UI| + |**Permissions Policy**|Search and select the 4 policies added in step #2| + |**Role Name**|SpectroCloudRole| + +5. In the AWS console, create a new IAM role for Palette. Use the following resources if you need additional help. + - [IAM Role creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html). + - [IAM User creation guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). + + +6. In the AWS console, assign the [Palette required IAM policies](required-iam-policies.md) to the role that Palette will use. + + +7. In the AWS console, browse to the **Role Details** page and copy the Amazon Resource Name (ARN) for the role. + + +8. In Palette, paste the role arn into the **ARN** input box. + + +9. Click the **Validate** button to validate the credentials. + + +#### Validate + +You can validate the account is available in Palette by reviewing the list of cloud accounts. To review the list of cloud accounts navigate to the left **Main Menu**. Click on **Tenant Settings**. Next, click on **Cloud Accounts**. Your newly added AWS cloud account is listed under the AWS sections. + + +## Next Steps + +Now that you have added an AWS account to Palette, you can start deploying Kubernetes clusters to your AWS account. To learn how to get started with deploying Kubernetes clusters to AWS, check out the [Create and Manage AWS IaaS Cluster](create-cluster.md) guide or the [Create and Manage AWS EKS Cluster](eks.md) guide. \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/aws/architecture.md b/docs/docs-content/clusters/public-cloud/aws/architecture.md new file mode 100644 index 0000000000..027aa8b170 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/aws/architecture.md @@ -0,0 +1,105 @@ +--- +sidebar_label: "Architecture" +title: "AWS Architecture" +description: "Learn about Palette and the architecture used to support Palette" +hide_table_of_contents: false +tags: ["public cloud", "aws", "architecture"] +sidebar_position: 0 +--- + +The following are some architectural highlights of the Amazon Web Services (AWS) clusters that Palette provisions: + + +- Kubernetes nodes can be distributed across multiple availability zones (AZs) to achieve high availability (HA). For each of the AZs that you select, a public subnet and a private subnet is created. + + +- All the control plane nodes and worker nodes are created within the private subnets, so there is no direct public access available. + + +- A Network Address Translation (NAT) Gateway is created in the public subnet of each AZ, to allow nodes in the private subnet to be able to go out to the internet or call other AWS services. + + +- An Internet Gateway (IG) is created for each Virtual Private Cloud (VPC), to allow Secure Shell Protocol (SSH) access to the bastion node for debugging purposes. SSH into Kubernetes nodes is only available through the bastion node. A bastion node helps to provide access to the Amazon Elastic Compute Cloud (EC2) instances. This is because the EC2 instances are created in a private subnet and the bastion node operates as a secure, single point of entry into the infrastructure. The bastion node can be accessed via SSH or Remote Desktop (RDP). + + +- The Kubernetes API Server endpoint is accessible through an Elastic Load Balancing (ELB), which load balances across all the control plane nodes. + + ![A diagram of AWS architecture](/clusters_aws_architecture_aws_cluster_architecture.png) + + +## AWS EKS Architecture + +Palette also supports deploying and managing AWS Elastic Kubernetes Service (EKS) clusters. Review the architectural highlights pertaining to EKS when managed by Palette. + +- Cluster resources such as Virtual Machines (VMs) can be provisioned into an existing infrastructure (gateways, VPCs, subnets etc.) as part of static provisioning as well as new dedicated infrastructure as part of dynamic provisioning. + + +- Palette supports the usage of [EKS Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate-profile.html). + + +- Spot instance support + + ![eks_cluster_architecture.png](/clusters_aws_create-and-manage-aws-eks-cluster_architecture.png) + + +### Disable OIDC Associate Provider +You can disable the OIDC associate provider if the service provider restricts the cluster deployment with the OIDC associate provider in the enabled state. Customize the EKS Kubernetes pack YAML values with the following option: + +```yaml + disableAssociateOIDCProvider: true +``` + +## AWS Instance Type and Pod Capacity +Choose the instance type and the number of instances to be launched by calculating the number of expected pods. You should also calculate the number of pods scheduled per node for an instance type. Improperly sized nodes can cause cluster creation to fail due to resource unavailability. + +The following section describes calculating the pod capacity for AWS instance types. This calculation will help you select the proper instance type and the number of desired workers in a worker pool. We recommend for most workloads choosing an instance that can support at least 30 pods. + +### Formula for Pod Calculation +Number of pods = N * (M-1) + 2 + +Where: +* **N** is the number of Elastic Network Interfaces (ENI) of the instance type (Maximum network interfaces). +* **M** is the number of IP addresses of a single ENI (Private IPv4 addresses per interface/IPv6 addresses per interface). +* Values for **N** and **M** for each instance type can be referred from [this document](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI). + +## Example Calculation: +* For instance type = t3.medium +* For values of N = 3, and M = 6 (values derived from AWS [document](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) ) +* N * (M-1) + 2 = 3(6-1)+2 =17 pods/instances +* In this example, at least two (2) t3.medium instances are needed to reach the minimum of 30 pods threshold. + +When setting the desired size of the worker pool, make the choice as per pod requirement. In the example provided, two instances of t3.medium are needed to satisfy the resource requirement of an EKS cluster. + + +## Spot Instances + +By default, worker pools are configured to use on-demand instances. However, to take advantage of discounted spot instance pricing you can specify spot instances when creating a cluster. The **On-Spot** option can be selected in the node configuration page during cluster creation. This option allows you to specify a maximum bid price for the nodes as a percentage of the on-demand price. Palette tracks the current price for spot instances and launches nodes, when the spot price falls in the specified range. + +## Tags + +You can assign tags to clusters deployed to AWS. Tags can help you with user access control management and more granularly restrict access to various Palette resources, including clusters. Check out the [Resource Filters](../../cluster-management/cluster-tag-filter/create-add-filter.md) documentation page to learn more about using tags to restrict resource access. + +The custom tags you create are assigned to the clusters during the creation process. Tags follow the key-value-pair format: `department: finance`. +In addition to the custom tags provided by you, Palette-provisioned AWS resources will receive the following default tags. + +| Key | Value | Description | +|------------------------------------------------------------|---------------|--------------------------------------------------------------------------------| +| `Name` | [clusterName-resource] | The name of the AWS resource. Use the format [cluster name] - [resource type name]. Example: `mycluste2r-vpc` | +| `kubernetes.io/cluster/[clusterName]`. | owned | This tag only applies to cluster nodes. Used for Palette internal purposes to help manage the lifecycle of the cluster. | +| `sigs.k8s.io/cluster-api-provider-aws/cluster/[clusterName]` | owned | Used for Palette internal purposes to help manage the lifecycle of the cluster. | +| `sigs.k8s.io/cluster-api-provider-aws/role` | common | Used for Palette internal purposes to help manage the lifecycle of the cluster. | +| `spectro__ownerUid` | [uniqueId] | The Palette tenant's id. Example: `1356fc37ab1aac03a5d66b4c`. | + + + +## Automatic Network Discovery + + +You must add a set of specific tags to enable automatic subnet discovery by Palette for integration with the AWS load balancer service. Add the following tags Virtual Private Network (VPC) public subnets. Replace the value `yourClusterName` with your cluster's name. + +
+ +- `kubernetes.io/role/elb = 1` +- `sigs.k8s.io/cluster-api-provider-aws/role = public` +- `kubernetes.io/cluster/[yourClusterName] = shared` +- `sigs.k8s.io/cluster-api-provider-aws/cluster/[yourClusterName] = owned` \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/aws/aws.md b/docs/docs-content/clusters/public-cloud/aws/aws.md new file mode 100644 index 0000000000..eb5d08a757 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/aws/aws.md @@ -0,0 +1,41 @@ +--- +sidebar_label: "AWS" +title: "Amazon Web Services (AWS)" +description: "The methods of creating clusters for a speedy deployment on any CSP" +tags: ["public cloud", "aws"] +hide_table_of_contents: false +--- + + +Palette supports integration with [Amazon Web Services](https://aws.amazon.com). You can deploy and manage [Host Clusters](../../../glossary-all.md#hostcluster) in AWS. To get started check out the [Register and Manage AWS Accounts](add-aws-accounts.md). + + + +## Get Started + +Learn how to deploy a cluster to AWS by using Palette. Check out the [Deploy a Cluster with Palette](../deploy-k8s-cluster.md) tutorial to get started. + + +## Resources + +To learn more about Palette and AWS clusters, check out the following resources: + +- [Register and Manage AWS Accounts](add-aws-accounts.md) + + +- [Create and Manage AWS IaaS Cluster](create-cluster.md) + + +- [Create and Manage AWS EKS Cluster](eks.md) + + +- [Cluster Management Day Two Operations](../../cluster-management/cluster-management.md) + + +- [AWS Architecture](architecture.md) + + +- [Required IAM Policies](required-iam-policies.md) + + +- [Cluster Removal](../../cluster-management/remove-clusters.md) diff --git a/docs/docs-content/clusters/public-cloud/aws/create-cluster.md b/docs/docs-content/clusters/public-cloud/aws/create-cluster.md new file mode 100644 index 0000000000..950c051e8a --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/aws/create-cluster.md @@ -0,0 +1,171 @@ +--- +sidebar_label: "Create and Manage AWS IaaS Cluster" +title: "Create and Manage AWS Cluster" +description: "Learn how to add and manage a cluster deployed to AWS." +hide_table_of_contents: false +tags: ["public cloud", "aws"] +sidebar_position: 20 +--- + + +Palette supports creating and managing Kubernetes clusters deployed to an AWS account. This section guides you on how to create a Kubernetes cluster in AWS that is managed by Palette. + +## Prerequisites + +The following prerequisites must be met before deploying a cluster to AWS: + +- Access to an AWS cloud account + + +- You have added an AWS account in Palette. Review the [Add AWS Account](add-aws-accounts.md) for guidance. + + +- An infrastructure cluster profile. Review the [Create Cluster Profiles](../../../cluster-profiles/task-define-profile.md) for guidance. + + +- An [EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the target region. + + +- Palette creates compute, network, and storage resources in AWS during the provisioning of Kubernetes clusters. Ensure there is sufficient capacity in the preferred AWS region for the creation of the following resources: + - vCPU + - VPC + - Elastic IP + - Internet Gateway + - Elastic Load Balancers + - NAT Gateway + +
+ + :::info + + The following tags should be added to the virtual private network (VPC) public subnets to enable automatic subnet discovery for integration with AWS load balancer service. Replace the value `yourClusterName` with your cluster's name. + + - `kubernetes.io/role/elb = 1` + - `sigs.k8s.io/cluster-api-provider-aws/role = public` + - `kubernetes.io/cluster/[yourClusterName] = shared` + - `sigs.k8s.io/cluster-api-provider-aws/cluster/[yourClusterName] = owned` + + ::: + + +## Deploy an AWS Cluster + +Use the following steps to provision a new AWS cluster: + +1. Log in to [Palette](https://console.spectrocloud.com) and ensure you are in the correct project scope. + + +2. Navigate to the left **Main Menu** and click on **Clusters** + + +3. Click on **Add New Cluster** + + +4. You will receive a prompt asking you if you want to deploy a new cluster or import an existing cluster. Click on **Deploy New Cluster** + + +5. Select **AWS** and click on **Start AWS Configuration** + + +6. Populate the wizard page with the following information: name, description, tags and select AWS account. Tags on a cluster are propagated to the VMs deployed to the computing environments. Click on **Next** after you have filled out all the required information. + + +7. Select a cluster profile. Click on **Next**. + + +8. Review and customize pack parameters, as desired. By default, parameters for all packs are set with values, defined in the cluster profile. + + +9. Provide the AWS cloud account and placement information. + +
+ + |**Parameter**| **Description**| + |-------------|---------------| + |**Cloud Account** | Select the desired cloud account. AWS cloud accounts with AWS credentials need to be pre-configured in project settings.| + |**Region** | Choose the preferred AWS region where you would like to provision clusters.| + |**SSH Key Pair Name** | Choose the desired SSH Key pair. SSH key pairs need to be pre-configured on AWS for the desired regions. The selected key is inserted into the provisioned VMs.| + |**Static Placement** | Check the **Static Placement** box if you want to deploy resources into pre-existing VPCs and subnets. Review the [Static Placement](#static-placement) table below to learn more about the required input fields.| + | **Private API Server LB**| Enable to deploy the cluster load balancer in a private subnet. This feature requires Palette to have direct network connectivity with the private subnet or a [Private Cluster Gateway](../../data-center/maas/install-manage-maas-pcg.md) deployed in the environment.| + +
+ + #### Static Placement + + |Parameter|Description| + |---|---| + |**VPCID**: Select the Virtual Private Cloud (VPC) ID network from the **drop-down Menu**.| + |**Control plane subnet**: Select the control plane network from the **drop-down Menu**.| + |**Worker Network**: Select the worker network from the **drop-down Menu**. | + + + +10. Configure the master and worker node pools. A master and a worker node pool are configured by default. This is the section where you can specify the availability zones (AZ), instance types, [instance cost type](architecture.md#spot-instances), disk size, and the number of nodes. Click on **Next** after you have completed configuring the node pool. + +
+ + :::info + + You can add new worker pools if you need to customize certain worker nodes to run specialized workloads. As an example, the default worker pool may be configured with the m3.large instance types for general-purpose workloads, and another worker pool with instance type g2.2xlarge can be configured to run GPU workloads. + + ::: + + + + + + +12. An optional taint label can be applied to a node pool during the cluster creation. For an existing cluster, the taint label can be edited, review the [Node Pool](../../cluster-management/node-pool.md) management page to learn more. Toggle the **Taint** button to create a label. + + +13. Enable or disable node pool taints. If tainting is enabled, then you need to provide values for the following parameters: + + |**Parameter**| **Description**| + |-------------|---------------| + |**Key** |Custom key for the taint.| + |**Value** | Custom value for the taint key.| + | **Effect** | Make the choice of effect from the drop-down menu. Review the effect table bellow for more details. | + + #### Effect Table + + |**Parameter**| **Description**| + |-------------|---------------| + | **NoSchedule**| A pod that cannot tolerate the node taint and should not be scheduled to the node. + | **PreferNoSchedule**| The system will avoid placing a non-tolerant pod to the tainted node but is not guaranteed. + | **NoExecute**| New pods will not be scheduled on the node, and existing pods on the node if any on the node will be evicted they do not tolerate the taint. | + + +14. If you checked the **Static Placement** box in the **Cluster config** page, you can specify additional AWS [security groups](https://docs.aws.amazon.com/vpc/latest/userguide/security-groups.html) to apply to the worker group nodes. Use the **Additional Security Groups (Optional) drop-down Menu** to select additional security groups. + + +15. Click on **Next**. + + +16. The settings page is where you can configure the patching schedule, security scans, backup settings, and set up Role Based Access Control (RBAC). Review the cluster settings and make changes if needed. Click on **Validate**. + +17. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Provisioning IaaS clusters can take 15 - 30 minutes depending on the cluster profile and the node pool configuration. + +The cluster details page of the cluster contains the status and details of the deployment. Use this page to track the deployment progress. + + +## Validate + +You can validate that your cluster is up and available by reviewing the cluster details page. + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. The **Clusters** page contains a list of the available clusters Palette manages. Click on the row for the cluster you wish to review its details page. + + + +4. From the cluster details page, verify the **Cluster Status** field displays **Running**. + + +## Next Steps + +Now that you have a Kubernetes cluster deployed, you can start developing and deploying applications to your clusters. We recommend you review the day two responsibilities and become familiar with the cluster management tasks. Check out the [Manage Clusters](../../cluster-management/cluster-management.md) documentation to learn more about day two responsibilities. \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/aws/eks.md b/docs/docs-content/clusters/public-cloud/aws/eks.md new file mode 100644 index 0000000000..29c9a56249 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/aws/eks.md @@ -0,0 +1,172 @@ +--- +sidebar_label: "Create and Manage AWS EKS Cluster" +title: "Create and Manage AWS EKS Cluster" +description: "Learn how to deploy and manage AWS EKS clusters with Palette" +hide_table_of_contents: false +tags: ["public cloud", "aws"] +sidebar_position: 30 +--- + + +Palette supports creating and managing AWS Elastic Kubernetes Service (EKS) clusters deployed to an AWS account. This section guides you on how to create an AWS EKS cluster in AWS that is managed by Palette. + +## Prerequisites + +The following prerequisites must be met before deploying a cluster to AWS: + +- Access to an AWS cloud account +- Palette integration with AWS account. Review the [Add AWS Account](add-aws-accounts.md) for guidance. +- An infrastructure cluster profile for AWS EKS. Review the [Create Cluster Profiles](../../../cluster-profiles/task-define-profile.md) for guidance. +- An [EC2 Key Pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the target region. +- Palette creates compute, network, and storage resources in AWS during the provisioning of Kubernetes clusters. Ensure there is sufficient capacity in the preferred AWS region for the creation of the following resources: + - vCPU + - VPC + - Elastic IP + - Internet Gateway + - Elastic Load Balancers + - NAT Gateway + + +:::info + +The following tags should be added to the virtual private network (VPC) public subnets to enable automatic subnet discovery for integration with AWS load balancer service. Replace the value `yourClusterName` with your cluster's name. +- `kubernetes.io/role/elb = 1` +- `sigs.k8s.io/cluster-api-provider-aws/role = public` +- `kubernetes.io/cluster/[yourClusterName] = shared` +- `sigs.k8s.io/cluster-api-provider-aws/cluster/[yourClusterName] = owned` + +::: + +## Deploy an AWS Cluster + +Use the following steps to provision a new AWS EKS cluster: + +1. Ensure you are in the correct project scope. + + +2. Navigate to the left **Main Menu** and click on **Clusters** + + +3. Click on **Add New Cluster** + + +4. You will receive a prompt asking you if you want to deploy a new cluster or import an existing cluster. Click on **Deploy New Cluster** + + +5. Select **AWS** and click on **Start AWS Configuration** + + +6. Populate the wizard page with the following information: name, description, tags and AWS account. Tags on a cluster are propagated to the VMs deployed to the target environments. Click on **Next** after you have filled out all the required information. + +7. Selected **Managed Kubernetes** and click on your cluster profile that supports AWS EKS. Click on **Next**. + + +8. Review and customize pack parameters, as desired. By default, parameters for all packs are set with values, defined in the cluster profile. Click on **Next**. + + +9. Provide the AWS cloud account and placement information. + + |**Parameter**| **Description**| + |-------------|---------------| + |**Cloud Account** | Select the desired cloud account. AWS cloud accounts with AWS credentials need to be pre-configured in project settings.| + |**Static Placement** | By default, Palette uses dynamic placement, wherein a new VPC with a public and private subnet is created to place cluster resources for every cluster.
These resources are fully managed by Palette and deleted, when the corresponding cluster is deleted. Turn on the **Static Placement** option if it's desired to place resources into preexisting VPCs and subnets.| + |**Region** | Choose the preferred AWS region where you would like the clusters to be provisioned.| + |**SSH Key Pair Name** | Choose the desired SSH Key pair. SSH key pairs need to be pre-configured on AWS for the desired regions. The selected key is inserted into the VMs provisioned.| + |**Cluster Endpoint Access**| Select Private, Public or Private & Public, in order to control communication with the Kubernetes API endpoint. For more information, refer to the [Amazon EKS cluster endpoint access control](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) reference guide. :::caution If you set the cluster endpoint to Public, specify `0.0.0.0/0` in the Public Access CIDR field to open it to all possible IP addresses. Otherwise, Palette will not open it up entirely. :::| + |**Public Access CIDR** |This setting controls which IP address CIDR range can access the cluster. To fully allow unrestricted network access, enter `0.0.0.0/0` in the field. For more information, refer to the [Amazon EKS cluster endpoint access control](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) reference guide.| + |**Enable Encryption**|The user can enable secret encryption by toggling **Enable Encryption**. Provide the provider KMS key ARN to complete the wizard. Review [EKS Cluster Encryption](#eks-cluster-secrets-encryption) for more details.| + |**Worker Pool Update**|Optionally enable the option to update the worker pool in parallel.| + + +10. Make the choice of updating the worker pool in parallel, if required. Click on **Next**. + + +11. Configure the master and worker node pools. A single master and a worker node pool are configured by default. This is the section where you can specify the availability zones (AZ), instance types, [instance cost type](architecture#spot-instances), disk size, and the number of nodes. Use the following tables to better understand the available input options. + + |**Parameter**| **Description**| + |-------------|----------------| + |**Name** | A descriptive name for the node pool.| + |**Size** | Make your choice of minimum, maximum and desired sizes for the worker pool. The size of the worker pool will scale between the minimum and maximum size under varying workload conditions. Review the [AWS Instance Type and Pod Capacity](architecture#formula-for-pod-calculation) documentation for help in determining the proper instance type and size. | + |[Taints](../../cluster-management/taints.md#taints): |Optionally enable node affinity optionally to attracts pods to a set of nodes| + |[Labels](../../cluster-management/taints.md#labels): |Optionally enable labels to constrain a pod to only run on a particular set of nodes| + |**Instance Type** | Select the AWS instance type to be used for all nodes in the node pool.| + + * Cloud Configuration settings: + + |**Parameter**| **Description**| + |-------------|----------------| + |**Instance Option**:| Choose between on-demand or spot instances| + |**Instance Type**:| Choose an instance type | + |**Availability Zones**:|Select at least one availability zone within the VPC| + |**Disk Size**|Make the choice of disk size as per requirement| + + * You can create one or more Fargate profiles for the EKS cluster to use. + + |**Parameter**| **Description**| + |-------------|---------------| + |**Name** |Provide a name for the Fargate profile.| + |**Subnets** |Pods running on Fargate Profiles are not assigned public IP addresses, so only private subnets (with no direct route to an Internet Gateway) are accepted for this parameter. For dynamic provisioning, this input is not required and subnets are automatically selected.| + |**Selectors** |Define pod selector by providing a target namespace and optionally labels. Pods with matching namespace and app labels are scheduled to run on dynamically provisioned compute nodes.
You can have up to five selectors in a Fargate profile and a pod only needs to match one selector to run using the Fargate profile.| + +:::info + +You can add new worker pools if you need to customize certain worker nodes to run specialized workloads. As an example, the default worker pool may be configured with the m3.large instance types for general-purpose workloads, and another worker pool with instance type g2.2xlarge can be configured to run GPU workloads. + +::: + +12. An optional taint label can be applied to a node pool during the cluster creation. For a an existing cluster, the taint label can be edited, review the [Node Pool](../../cluster-management/node-pool.md) management page to learn more. Toggle the **Taint** button to create a label. + + +13. Enable or disable node pool taints. If tainting is enabled then you need provide values for the following parameters: + + |**Parameter**| **Description**| + |-------------|---------------| + |**Key** |Custom key for the taint.| + |**Value** | Custom value for the taint key.| + | **Effect** | Make the choice of effect from the drop-down menu. Review the effect table bellow for more details. | + + #### Effect Table + + |**Parameter**| **Description**| + |-------------|---------------| + | **NoSchedule**| A pod that cannot tolerate the node taint and should not be scheduled to the node. + | **PreferNoSchedule**| The system will avoid placing a non-tolerant pod to the tainted node but is not guaranteed. + | **NoExecute**| New pods will not be scheduled on the node, and existing pods on the node if any on the node will be evicted they do not tolerate the taint. | + +14. Click on **Next**. + +15. The settings page is where you can configure patching schedule, security scans, backup settings, setup role based access control (RBAC), and enable [Palette Virtual Clusters](../../../devx/palette-virtual-clusters/palette-virtual-clusters.md). Review the settings and make changes if needed. Click on **Validate**. + +16. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Be aware that provisioning an AWS EKS clusters can take several minutes. + +The cluster details page of the cluster contains the status and details of the deployment. Use this page to track the deployment progress. + + +## Validate + +You can validate your cluster is up and running by reviewing the cluster details page. Navigate to the left **Main Menu** and click on **Clusters**. The **Clusters** page contains a list of all available clusters managed by Palette. Click on the row for the cluster you wish to review its details page. Ensure the **Cluster Status** field contains the value **Running**. + + +## EKS Cluster Secrets Encryption + +Palette encourages using AWS Key Management Service (KMS) to provide envelope encryption of Kubernetes secrets stored in Amazon Elastic Kubernetes Service (EKS) clusters. This encryption is +a defense-in-depth security strategy to protect sensitive data such as passwords, docker registry credentials, and TLS keys stored as [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/). + +### Prerequisites + +* KMS key created in the AWS account. +* KMS key is of the type symmetric. +* KMS key policy permits the following actions; encrypt and decrypt. + +### Configure KMS + +The IAM User or IAM role that Palette is using must have the following IAM permissions. + +```json hideClipboard +kms:CreateGrant, +kms:ListAliases, +kms:ListKeys, +kms:DescribeKeys +``` +Ensure the IAM role or IAM user can perform the required IAM permissions on the KMS key that will be used for EKS. +You can enable secret encryption during the EKS cluster creation process by toggling the encryption button providing the Amazon Resource Name (ARN) of the encryption key. The encryption option is available on the cluster creation wizard's **Cluster Config** page. diff --git a/docs/docs-content/clusters/public-cloud/aws/required-iam-policies.md b/docs/docs-content/clusters/public-cloud/aws/required-iam-policies.md new file mode 100644 index 0000000000..bf627ba61b --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/aws/required-iam-policies.md @@ -0,0 +1,862 @@ +--- +sidebar_label: "Required IAM Policies" +title: "Required IAM Policies" +description: "A list of required IAM policies that Palette requires." +hide_table_of_contents: false +tags: ["public cloud", "aws", "iam"] +sidebar_position: 40 +--- + +Palette requires proper Amazon Web Services (AWS) permissions to operate and perform actions on your behalf. +The following policies include all the permissions needed for cluster provisioning with Palette. +
+ +* **PaletteControllersPolicy** + + +* **PaletteControlPlanePolicy** + + +* **PaletteNodesPolicy** + + +* **PaletteDeploymentPolicy** + +Additional IAM policies may be required depending on the use case. For example, AWS Elastic Kubernetes Service (EKS) requires the **PaletteControllersEKSPolicy**. Check out the [Controllers EKS Policy](#controllers-eks-policy) section to review the IAM policy. + + +:::caution + +You can attach a maximum of ten managed policies to an IAM User or role. Exceeding this limit will result in cluster deployment failures. If you find yourself in a scenario where you are exceeding the limit, consider combining policies into a custom-managed policy. +You can learn more about AWS IAM limits in the [IAM Quotas](https://docs.aws.amazon.com/us_en/IAM/latest/UserGuide/reference_iam-quotas.html) reference guide. + +::: + + + + + + +**Last Update**: April 20, 2023 + +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "iam:DeleteOpenIDConnectProvider", + "iam:GetOpenIDConnectProvider", + "iam:ListOpenIDConnectProviders", + "iam:TagOpenIDConnectProvider", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeInstanceRefreshes", + "ec2:AllocateAddress", + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateInternetGateway", + "ec2:CreateLaunchTemplate", + "ec2:CreateLaunchTemplateVersion", + "ec2:CreateNatGateway", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVpc", + "ec2:DeleteInternetGateway", + "ec2:DeleteLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:DeleteNatGateway", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVpc", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:DetachInternetGateway", + "ec2:DisassociateAddress", + "ec2:DisassociateRouteTable", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVpcAttribute", + "ec2:ReleaseAddress", + "ec2:ReplaceRoute", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:TerminateInstances", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RemoveTags", + "iam:CreateOpenIDConnectProvider", + "tag:GetResources" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + }, + { + "Action": [ + "autoscaling:CreateAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup", + "autoscaling:CreateOrUpdateTags", + "autoscaling:StartInstanceRefresh", + "autoscaling:DeleteAutoScalingGroup", + "autoscaling:DeleteTags" + ], + "Resource": [ + "arn:*:autoscaling:*:*:autoScalingGroup:*:autoScalingGroupName/*" + ], + "Effect": "Allow" + }, + { + "Condition": { + "StringLike": { + "iam:AWSServiceName": "autoscaling.amazonaws.com" + } + }, + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Resource": [ + "arn:*:iam::*:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling" + ], + "Effect": "Allow" + }, + { + "Condition": { + "StringLike": { + "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" + } + }, + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Resource": [ + "arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing" + ], + "Effect": "Allow" + }, + { + "Condition": { + "StringLike": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + }, + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Resource": [ + "arn:*:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot" + ], + "Effect": "Allow" + }, + { + "Action": [ + "iam:PassRole" + ], + "Resource": [ + "arn:*:iam::*:role/*.cluster-api-provider-aws.sigs.k8s.io" + ], + "Effect": "Allow" + }, + { + "Action": [ + "secretsmanager:CreateSecret", + "secretsmanager:DeleteSecret", + "secretsmanager:TagResource" + ], + "Resource": [ + "arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/*" + ], + "Effect": "Allow" + }, + { + "Action": [ + "s3:DeleteObject", + "s3:PutBucketOwnershipControls", + "s3:PutBucketPolicy", + "s3:PutBucketPublicAccessBlock", + "s3:PutObjectAcl", + "s3:PutObject" + ], + "Resource": [ + "arn:*:s3:::*" + ], + "Effect": "Allow" + } + ] +} +``` + + + + + +**Last Update**: April 20, 2023 + +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeImages", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + } + ] +} +``` + + + + + +**Last Update**: May 2, 2021 + +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + }, + { + "Action": [ + "secretsmanager:DeleteSecret", + "secretsmanager:GetSecretValue" + ], + "Resource": [ + "arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/*" + ], + "Effect": "Allow" + }, + { + "Action": [ + "ssm:UpdateInstanceInformation", + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel", + "s3:GetEncryptionConfiguration" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + } + ] +} +``` + + + + +**Last Update**: April 20, 2023 + +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "cloudformation:CreateStack", + "cloudformation:DescribeStacks", + "cloudformation:UpdateStack", + "ec2:CreateSnapshot", + "ec2:DeleteSnapshot", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumesModifications", + "ec2:DescribeKeyPairs", + "iam:AttachGroupPolicy", + "iam:CreatePolicy", + "iam:CreatePolicyVersion", + "iam:DeletePolicy", + "iam:DeletePolicyVersion", + "iam:DetachGroupPolicy", + "iam:GetGroup", + "iam:GetInstanceProfile", + "iam:GetPolicy", + "iam:GetUser", + "iam:ListPolicies", + "iam:ListPolicyVersions", + "pricing:GetProducts", + "sts:AssumeRole", + "sts:GetServiceBearerToken", + "iam:AddRoleToInstanceProfile", + "iam:AddUserToGroup", + "iam:CreateGroup", + "iam:CreateInstanceProfile", + "iam:CreateUser", + "iam:DeleteGroup", + "iam:DeleteInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:RemoveUserFromGroup" + ], + "Resource": "*" + } + ] +} +``` + + + + + + +## Controllers EKS Policy + +If you plan to deploy host clusters to AWS EKS, make sure to attach the **PaletteControllersEKSPolicy**. + +**Last Update**: April 20, 2023 + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "ssm:GetParameter" + ], + "Resource": [ + "arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/*" + ], + "Effect": "Allow" + }, + { + "Condition": { + "StringLike": { + "iam:AWSServiceName": "eks.amazonaws.com" + } + }, + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Resource": [ + "arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS" + ], + "Effect": "Allow" + }, + { + "Condition": { + "StringLike": { + "iam:AWSServiceName": "eks-nodegroup.amazonaws.com" + } + }, + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Resource": [ + "arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup" + ], + "Effect": "Allow" + }, + { + "Condition": { + "StringLike": { + "iam:AWSServiceName": "eks-fargate.amazonaws.com" + } + }, + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Resource": [ + "arn:*:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate" + ], + "Effect": "Allow" + }, + { + "Action": [ + "iam:AddClientIDToOpenIDConnectProvider", + "iam:CreateOpenIDConnectProvider", + "iam:DeleteOpenIDConnectProvider", + "iam:ListOpenIDConnectProviders", + "iam:UpdateOpenIDConnectProviderThumbprint" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + }, + { + "Action": [ + "iam:GetRole", + "iam:ListAttachedRolePolicies", + "iam:DetachRolePolicy", + "iam:DeleteRole", + "iam:CreateRole", + "iam:TagRole", + "iam:AttachRolePolicy" + ], + "Resource": [ + "arn:*:iam::*:role/*" + ], + "Effect": "Allow" + }, + { + "Action": [ + "iam:GetPolicy" + ], + "Resource": [ + "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" + ], + "Effect": "Allow" + }, + { + "Action": [ + "eks:DescribeCluster", + "eks:ListClusters", + "eks:CreateCluster", + "eks:TagResource", + "eks:UpdateClusterVersion", + "eks:DeleteCluster", + "eks:UpdateClusterConfig", + "eks:UntagResource", + "eks:UpdateNodegroupVersion", + "eks:DescribeNodegroup", + "eks:DeleteNodegroup", + "eks:UpdateNodegroupConfig", + "eks:CreateNodegroup", + "eks:AssociateEncryptionConfig", + "eks:ListIdentityProviderConfigs", + "eks:AssociateIdentityProviderConfig", + "eks:DescribeIdentityProviderConfig", + "eks:DisassociateIdentityProviderConfig" + ], + "Resource": [ + "arn:*:eks:*:*:cluster/*", + "arn:*:eks:*:*:nodegroup/*/*/*" + ], + "Effect": "Allow" + }, + { + "Action": [ + "ec2:AssociateVpcCidrBlock", + "ec2:DisassociateVpcCidrBlock", + "eks:ListAddons", + "eks:CreateAddon", + "eks:DescribeAddonVersions", + "eks:DescribeAddon", + "eks:DeleteAddon", + "eks:UpdateAddon", + "eks:TagResource", + "eks:DescribeFargateProfile", + "eks:CreateFargateProfile", + "eks:DeleteFargateProfile" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + }, + { + "Condition": { + "StringEquals": { + "iam:PassedToService": "eks.amazonaws.com" + } + }, + "Action": [ + "iam:PassRole" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + }, + { + "Condition": { + "ForAnyValue:StringLike": { + "kms:ResourceAliases": "alias/cluster-api-provider-aws-*" + } + }, + "Action": [ + "kms:CreateGrant", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + } + ] +} +``` + +## Restricting Palette VPC Permissions + +You can choose to have Palette operate in a static or dynamic environment. You can configure Palette to perform an AWS cluster creation into an existing VPC. The following policy allows Palette to operate but restricts its access to the [Principle of Least Privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). + + +
+
+ + + + +This is a policy for those who want to restrict Palette to a single VPC and not give Palette access to create or delete VPCs. + +
+ + +### Minimum Dynamic Permissions + + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DescribeInstances", + "iam:RemoveRoleFromInstanceProfile", + "ec2:AttachInternetGateway", + "iam:AddRoleToInstanceProfile", + "ec2:DeleteRouteTable", + "ec2:AssociateRouteTable", + "ec2:DescribeInternetGateways", + "ec2:CreateRoute", + "ec2:CreateInternetGateway", + "ec2:DescribeVolumes", + "ec2:DescribeKeyPairs", + "ec2:DescribeNetworkAcls", + "ec2:DescribeRouteTables", + "ec2:CreateTags", + "ec2:CreateRouteTable", + "ec2:RunInstances", + "ec2:ModifyInstanceAttribute", + "ec2:TerminateInstances", + "ec2:DetachInternetGateway", + "ec2:DisassociateRouteTable", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeIpv6Pools", + "ec2:DeleteVpc", + "ec2:CreateSubnet", + "ec2:DescribeSubnets", + "iam:CreateInstanceProfile", + "ec2:DisassociateAddress", + "ec2:DescribeAddresses", + "ec2:CreateNatGateway", + "ec2:DescribeRegions", + "ec2:CreateVpc", + "ec2:DescribeDhcpOptions", + "ec2:DescribeVpcAttribute", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:CreateSecurityGroup", + "ec2:ModifyVpcAttribute", + "iam:DeleteInstanceProfile", + "ec2:ReleaseAddress", + "iam:GetInstanceProfile", + "ec2:DescribeTags", + "ec2:DeleteRoute", + "ec2:DescribeNatGateways", + "ec2:DescribeIpamPools", + "ec2:AllocateAddress", + "ec2:DescribeSecurityGroups", + "ec2:DescribeImages", + "ec2:DescribeVpcs", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:DescribeTags", + "secretsmanager:CreateSecret", + "secretsmanager:DeleteSecret", + "secretsmanager:TagResource", + "secretsmanager:GetSecretValue", + "autoscaling:StartInstanceRefresh", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "eks:DescribeCluster", + "eks:ListClusters", + "cloudformation:CreateStack", + "cloudformation:DescribeStacks", + "cloudformation:UpdateStack", + "ecr:GetAuthorizationToken", + "iam:PassRole", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DescribeTargetHealth", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage", + "ec2:DeleteInternetGateway", + "ec2:DeleteNatGateway", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ssm:UpdateInstanceInformation", + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel", + "pricing:GetProducts", + "sts:AssumeRole", + "ec2:ReplaceRoute", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:AssociateAddress", + "tag:GetResources", + "ec2:ModifySubnetAttribute" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "iam:PassRole" + ], + "Resource": [ + "arn:*:iam::*:role/*.cluster-api-provider-aws.sigs.k8s.io" + ] + } + ] +} +``` + +
+ + + + +This is a policy for those who want to restrict Palette to a single VPC and not give Palette access to create or delete VPCs. + +
+ +### Minimum Static Permissions + + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DescribeInstances", + "iam:RemoveRoleFromInstanceProfile", + "pricing:GetProducts", + "sts:AssumeRole", + "ec2:DescribeRegions", + "ec2:DescribeKeyPairs", + "ec2:DescribeVpcs", + "ec2:DescribeVpcAttribute", + "ec2:DescribeSubnets", + "cloudformation:DescribeStacks", + "cloudformation:CreateStack", + "cloudformation:UpdateStack", + "ec2:DescribeRouteTables", + "ec2:DescribeNatGateways", + "ec2:DescribeSecurityGroups", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeTags", + "secretsmanager:CreateSecret", + "secretsmanager:TagResource", + "secretsmanager:GetSecretValue", + "secretsmanager:DeleteSecret", + "iam:GetInstanceProfile", + "iam:AddRoleToInstanceProfile", + "iam:CreateInstanceProfile", + "iam:DeleteInstanceProfile", + "ec2:RunInstances", + "ec2:ModifyInstanceAttribute", + "ec2:TerminateInstances", + "autoscaling:StartInstanceRefresh", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "ssm:UpdateInstanceInformation", + "ec2:DescribeAvailabilityZones", + "eks:DescribeCluster", + "eks:ListClusters", + "ec2:CreateSecurityGroup", + "ec2:DeleteSecurityGroup", + "ec2:RevokeSecurityGroupIngress", + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DescribeTargetHealth", + "ec2:CreateTags", + "ec2:DescribeNetworkInterfaces", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "ec2:DisassociateAddress", + "ec2:DescribeAddresses", + "ec2:DescribeVolumes", + "ec2:DescribeImages", + "ec2:ModifyVpcAttribute", + "s3:GetEncryptionConfiguration", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:DescribeVolumesModifications", + "ec2:DetachVolume", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "ec2:DetachInternetGateway", + "ec2:DeleteNetworkInterface", + "tag:GetResources", + "ec2:ReleaseAddress", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:AllocateAddress", + "ec2:AssociateAddress" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "iam:PassRole" + ], + "Resource": [ + "arn:*:iam::*:role/*.cluster-api-provider-aws.sigs.k8s.io" + ] + } + ] +} +``` + +
+ +
+ +:::info + + +The following are important points to be aware of. + +- Ensure that the role created contain all the policies defined above. + +- These IAM policies cannot be used as an inline policy, as it exceeds the 2048 non-whitespace character limit by AWS. + +- The following warning is expected and can be ignored: These policies define some actions, resources, or conditions that do not provide permissions. To grant access, policies must have an action that has an applicable resource or condition. + +::: + +## Global Role Additional Policies: + +There may be situations where additional node-level policies must be added to your deployment. For instance, when you create a host cluster with the **AWS EBS CSI** storage layer, ensure **AmazonEBSCSIDriverPolicy** is included. To add additional node-level policies, switch to the **Tenant Admin** project, and click on the **Tenant Settings** on the **Main Menu**. Click on **Cloud Accounts**. Add an account if one does not exists. After validation of the AWS credentials, ensure `Add IAM policies` are enabled. You can specify additional amazon resource names (ARN) to be attached. The attached policies will be included to all the clusters launched with this specific AWS cloud Account. + +
+ +** AmazonEBSCSIDriverPolicy:** + +```yml +roleName: "custom-ng-role" + roleAdditionalPolicies: + - "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" +``` diff --git a/docs/docs-content/clusters/public-cloud/azure/_category_.json b/docs/docs-content/clusters/public-cloud/azure/_category_.json new file mode 100644 index 0000000000..094470741d --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/azure/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 10 +} diff --git a/docs/docs-content/clusters/public-cloud/azure/aks.md b/docs/docs-content/clusters/public-cloud/azure/aks.md new file mode 100644 index 0000000000..6d48d6fbd6 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/azure/aks.md @@ -0,0 +1,304 @@ +--- +sidebar_label: "Create and Manage Azure AKS Cluster" +title: "Create and Manage Azure AKS Cluster" +description: "The methods of creating clusters for a speedy deployment on any CSP" +hide_table_of_contents: false +tags: ["public cloud", "azure"] +sidebar_position: 30 +--- + + +Palette supports creating and managing Kubernetes clusters deployed to an Azure subscription. This section guides you on how to create an IaaS Kubernetes cluster in Azure that is managed by Palette. + +Azure clusters can be created under the following scopes: + +* Tenant admin + +* Project Scope - This is the recommended scope. + +Be aware that clusters that are created under the **Tenant Admin** scope are not visible under Project scope . + + +# Prerequisites + +These prerequisites must be met before deploying an AKS workload cluster: + +1. You need an active Azure cloud account with sufficient resource limits and permissions to provision compute, network, and security resources in the desired regions. + + +2. You will need to have permissions to deploy clusters using the AKS service on Azure. + + +3. Register your Azure cloud account in Palette as described in the [Creating an Azure Cloud Account](./azure-cloud.md) section below. + + +4. You should have a cluster profile created in Palette for AKS. + + +5. Associate an SSH key pair to the cluster worker node. + + +
+ +## Additional Prerequisites + +There are additional prerequisites if you want to set up Azure Active Directory integration for the AKS cluster: + + + 1. A Tenant Name must be provided as part of the Azure cloud account creation in Palette. + + + 2. For the Azure client used in the Azure cloud account, these API permissions have to be provided: + + | | | + | --------------- | ------------------------------------- | + | Microsoft Graph | Group.Read.All (Application Type) | + | Microsoft Graph | Directory.Read.All (Application Type) | + + 3. You can configure these permissions from the Azure cloud console under **App registrations** > **API permissions** for the specified application. + + :::info + + Palette **also** enables the provisioning of private AKS clusters via a private cloud gateway (Self Hosted PCGs). The Self-Hosted PCG is an AKS cluster that needs to be launched manually and linked to an Azure cloud account in Palette Management Console. [Click here for more..](gateways.md) + + ::: + + + + +To create an Azure cloud account you need the following Azure account information: +* Client ID +* Tenant ID +* Client Secret +* Tenant Name (optional) +* Toggle `Connect Private Cloud Gateway` option and select the [Self-Hosted PCG](gateways.md) already created from the drop-down menu to link it to the cloud account. + +**Note:** + +For existing cloud account go to `Edit` and toggle the `Connect Private Cloud Gateway` option to select the created gateway from the drop down menu. + +For Azure cloud account creation, we first need to create an Azure Active Directory (AAD) application that can be used with role-based access control. Follow the steps below to create a new AAD application, assign roles, and create the client secret: + +
+ +1. Follow the steps described [here](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-an-azure-active-directory-application) to create a new Azure Active Directory application. Note down your ClientID and TenantID. + + +2. On creating the application, assign a minimum required [ContributorRole](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#contributor). To assign any type of role, the user must have a minimum role of [UserAccessAdministrator](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#user-access-administrator). Follow the [Assign Role To Application](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#assign-a-role-to-the-application) link learn more about roles. + + +3. Follow the steps described in the [Create an Application Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-a-new-application-secret) section to create the client application secret. Store the Client Secret safely as it will not be available as plain text later. + +## Deploy an AKS Cluster + +
+ + + + + +The following steps need to be performed to provision a new cluster: +
+ + +1. If you already have a profile to use, go to **Cluster** > **Add a New Cluster** > **Deploy New Cluster** and select an Azure cloud. If you do not have a profile to use, review the [Creating a Cluster Profile](../../../cluster-profiles/task-define-profile.md) page for steps to create one. + + + +2. Fill the basic cluster profile information such as **Name**, **Description**, **Tags** and **Cloud Account**. + + +3. In the **Cloud Account** dropdown list, select the Azure Cloud account or create a new one. See the [Creating an Azure Cloud Account](azure-cloud.md) section above. + + +4. Next, in the **Cluster profile** tab from the **Managed Kubernetes** list, pick **AKS**, and select the AKS cluster profile definition. + + +5. Review the **Parameters** for the selected cluster profile definitions. By default, parameters for all packs are set with values defined in the cluster profile. + + +6. Complete the **Cluster config** section with the information for each parameter listed below. + + | **Parameter** | **Description** | + | ------------------ | -------------------------------------------------------------------------------------------- | + | **Subscription** | Select the subscription which is to be used to access Azure Services. | + | **Region** | Select a region in Azure in where the cluster should be deployed. | + | **Resource Group** | Select the resource group in which the cluster should be deployed. | + | **SSH Key** | The public SSH key for connecting to the nodes. Review Microsoft's [supported SSH](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys#supported-ssh-key-formats) formats. | + | **Static Placement** | By default, Palette uses dynamic placement, wherein a new VPC with a public and private subnet is created to place cluster resources for every cluster. These resources are fully managed by Palette and deleted when the corresponding cluster is deleted.
Turn on the **Static Placement** option if it is desired to place resources into preexisting VPCs and subnets. If the user is making the selection of **Static Placement** of resources, the following placement information needs to be provided: + ||**Virtual Resource Group**: The logical container for grouping related Azure resources. + || **Virtual Network**: Select the virtual network from dropdown menu. + || **Control plane Subnet**: Select the control plane network from the dropdown menu. + || **Worker Network**: Select the worker network from the dropdown. + |**Update worker pools in parallel**| Check the box to concurrently update the worker pools.| + +:::caution + +If the Palette [cloud account](azure-cloud.md) is created with **Disable Properties** and the cluster option +**Static Placement** is enabled, the network information from your Azure account will not be imported to Palette. You can manually input the information for the **Control Plane Subnet** and the **Worker Network**. + +::: + +7. Click **Next** to configure the node pools. + + +
+ +The [maximum number](https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#maximum-pods-per-node) of pods per node in an AKS cluster is 250. If you don't specify maxPods when creating new node pools, then the default value of 30 is applied. You can edit this value from the Kubernetes configuration file at any time by editing the `maxPodPerNode` value. Refer to the snippet below: + +
+ +``` +managedMachinePool: + maxPodPerNode: 30 +``` + + + +## Node Pools + +This section guides you to through configuring Node Pools. As you set up the cluster, the **Nodes config** section will allow you to customize node pools. AKS Clusters are comprised of System and User node pools, and all pool types can be configured to use the Autoscaler, which scales out pools horizontally based on per node workload counts. + +A complete AKS cluster contains the following: + +
+ +1. As a mandatory primary **System Node Pool**, this pool will run the pods necessary to run a Kubernetes cluster, like the control plane and etcd. All system pools must have at least a single node for a development cluster; one (1) node is enough for high availability production clusters, and three (3) or more is recommended. + + +2. **Worker Node** pools consist of one (1) or more per workload requirements. Worker node pools can be sized to zero (0) nodes when not in use. + +
+ +## Create and Remove Node Pools + +During cluster creation, you will default to a single pool. + +
+ +1. To add additional pools, click **Add Node Pool**. + + +2. Provide any additional Kubernetes labels to assign to each node in the pool. This section is optional, and you can use a `key:value` structure, press your space bar to add additional labels, and click the **X** with your mouse to remove unwanted labels. + + +3. To remove a pool, click **Remove** across from the title for each pool. + +
+ +## Create a System Node Pool + +1. Each cluster requires at least one (1) system node pool. To define a pool as a system pool, check the box labeled **System Node Pool**. +
+ +:::info +Identifying a Node Pool as a System Pool will deactivate taints, and the operating system options within the Cloud Configuration section, as you can not to taint or change their OS from Linux. See the AKS Documentation for more details on pool limitations. +::: + +
+ +2. Provide a name in the **Node pool name** text box. When creating a node, it is good practice to include an identifying name that matches the node in Azure. + + +3. Add the **Desired size**. You can start with three for multiple nodes. + + +4. Include **Additional Labels**. This is optional. + + +5. In the **Azure Cloud Configuration** section, add the **Instance type**. The cost details are present for review. + + +6. Enter the **Managed Disk** information and its size. + + +7. If you are including additional or multiple nodes to make a node pool, click the **Add Worker Pool** button to create the next node. + + +## Configure Node Pools + +In all types of node pools, configure the following. + +
+ +1. Provide a name in the **Node pool name** text box. When creating a node, it is good practice to include an identifying name. + + **Note:** Windows clusters have a name limitation of six (6) characters. + + +2. Provide how many nodes the pool will contain by adding the count to the box labeled **Number of nodes in the pool**. Configure each pool to use the autoscaler controller. There are more details on how to configure that below. + + +3. Alternative to a static node pool count, you can enable the autoscaler controller, click **Enable Autoscaler** to change to the **Minimum size** and **Maximum size** fields which will allow AKS to increase or decrease the size of the node pool based on workloads. The smallest size of a dynamic pool is zero (0), and the maximum is one thousand (1000); setting both to the same value is identical to using a static pool size. + + +4. Provide any additional Kubernetes labels to assign to each node in the pool. This section is optional; you can use a `key:value` structure. Press your space bar to add additional labels and click the **X** with your mouse to remove unwanted labels. + + +5. In the **Azure Cloud Configuration** section: + + - Provide instance details for all nodes in the pool with the **Instance type** dropdown. The cost details are present for review. + +
+ +:::info +New worker pools may be added if you want to customize specific worker nodes to run specialized workloads. As an example, the default worker pool may be configured with the Standard_D2_v2 instance types for general-purpose workloads, and another worker pool with the instance type Standard_NC12s_v3 can be configured to run GPU workloads. +::: + +
+ + - Provide the disk type via the **Managed Disk** dropdown and the size in Gigabytes (GB) in the **Disk size** field. + +:::info +A minimum allocation of two (2) CPU cores is required across all worker nodes. + +A minimum allocation of 4Gi of memory is required across all worker nodes. +::: + +
+ + - When are done setting up all node pools, click **Next** to go to the **Settings** page to **Validate** and finish the cluster deployment wizard. + + **Note**: Keep an eye on the **Cluster Status** once you click **Finish Configuration** as it will start as *Provisioning*. Deploying an AKS cluster does take a considerable amount of time to complete, and the **Cluster Status** in Palette will say *Ready* when it is complete and ready to use. + +
+ +## Configure an Azure Active Directory + + +The Azure Active Directory (AAD) could be enabled while creating and linking the Azure Cloud account for the Palette Platform, using a simple check box. Once the cloud account is created, you can create the Azure AKS cluster. The AAD-enabled AKS cluster will have its Admin *kubeconfig* file created and can be downloaded from our Palette UI as the 'Kubernetes config file'. You need to manually create the user's *kubeconfig* file to enable AAD completely. The following are the steps to create the custom user *kubeconfig* file: + +
+ +1. Go to the Azure console to create the Groups in Azure AD to access the Kubernetes RBAC and Azure AD control access to cluster resources. + + +2. After you create the groups, create users in the Azure AD. + + +3. Create custom Kubernetes roles and role bindings for the created users and apply the roles and role bindings, using the Admin *kubeconfig* file. + +
+ +:::info +The above step can also be completed using Spectro RBAC pack available under the Authentication section of Add-on Packs. +::: + +
+ +4. Once the roles and role bindings are created, these roles can be linked to the Groups created in Azure AD. + + +5. The users can now access the Azure clusters with the complete benefits of AAD. To get the user-specific *kubeconfig* file, please run the following command: + + + `az aks get-credentials --resource-group --name ` + +
+ +## Resources + +- [Use Kubernetes RBAC with Azure AD integration](https://learn.microsoft.com/en-us/azure/aks/azure-ad-rbac?tabs=portal) + +- [Azure Kubernetes Service (AKS)](https://docs.microsoft.com/en-us/azure/aks/) + diff --git a/docs/docs-content/clusters/public-cloud/azure/architecture.md b/docs/docs-content/clusters/public-cloud/azure/architecture.md new file mode 100644 index 0000000000..5c3eb12bd3 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/azure/architecture.md @@ -0,0 +1,94 @@ +--- +sidebar_label: "Architecture" +title: "Azure Architecture" +description: "Learn about how Palette integrates with Azure and the architecture that powers the integration" +hide_table_of_contents: false +tags: ["public cloud", "azure", "architecture"] +sidebar_position: 0 +--- + +Palette supports the deployment of host clusters in both Azure and Azure Kubernetes Service (AKS). This page covers the architecture of the integration between Palette and Azure. + +## IaaS Architecture + +The following are some architectural highlights of Azure clusters deployed by Palette: + +- Azure cluster resources are placed within an existing Resource Group. + + +- Nodes are provisioned within a Virtual Network that is auto-created or preexisting, with one subnet for control plane nodes and one for worker nodes. These two subnets are secured with separate Network Security Groups. Both subnets can span across multiple availability zones (AZs). + + +- Worker nodes are distributed across multiple AZs. + + +- None of the control plane nodes and worker nodes have public IPs attached. The Kubernetes API Server endpoint is accessed through a public load balancer. + + +![An Azure IaaS architecture diagram](/clusters_azure_architecture_iaas-overview.png) + + + +## AKS Architecture + +The integration between Palette and Azure AKS unlocks the following capabilities. + +- Palette platform enables containerized applications' effortless deployment and management with fully managed AKS. + + +- Palette provides the you with a with serverless Kubernetes experience, an integrated continuous integration and continuous delivery (CI/CD) experience, and enterprise-grade security and governance. + + +- Palette helps you unite the development and operations to a single platform. This unification helps you achieve faster builds, delivery, and scaling of applications with credence. + + +- The infrastructure has event-driven autoscaling and triggers that enable elastic provisioning for self-managed infrastructure. + + +- Leverage extensive authentication and authorization capabilities by using Azure Active Directory and dynamic rules enforcement, across multiple clusters with Azure Policy. + + +![An Azure AKS architecture diagram](/clusters_azure_architecture_aks-diagram.png) + + +## Azure Storage + +During an Azure cluster deployment, Palette creates an [Azure storage account](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-overview) and storage container. Palette copies the base virtual hard disk (VHD) image to the Palette default storage container in the default Palette storage account. The storage account Palette created has unrestricted access and has an auto-generated name. You can attach a custom storage account or storage containers to the Azure cluster. + +Before the Azure cluster creation process, you must have created custom storage accounts or containers. All custom storage accounts and containers will be listed in the **Cluster config** page during the cluster creation process. If you need help creating a custom storage account or container, check out the Azure [Create a Storage Account](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-create?tabs=azure-portal) guide or the Azure [Manage Containers](https://learn.microsoft.com/en-us/azure/storage/blobs/blob-containers-portal) guide. + + +The following section covers a few scenarios where you have the need to customize Azure storage in an Azure cluster. + +## Custom Name + +If you need a custom name for the storage resources, you must create the storage resource and attach it to the cluster. Palette, by default, creates a storage account and container with an auto-generated name. Specify a custom storage account or container with the custom name during the cluster creation process. You can attach a custom storage account, custom container, or both if needed. + + +## Restrict User Access + +To restrict the user access to the storage resource, apply custom policies, or limit the network access, then you need to attach a custom storage account or container to the Azure cluster that contains the desired security customization. + +## Network Access + +Clusters that use a Palette self-hosted [Private Cloud Gateway](gateways.md) (PCG), should use a custom storage account and container that are restricted to the VNet that the PCG and cluster are located in. Ensure you disable public access and use private access for the Azure storage account. + + +## Tags + +You can assign tags to clusters deployed to Azure. Tags can help you with user access control management and more granularly restrict access to various Palette resources, including clusters. Check out the [Resource Filters](../../cluster-management/cluster-tag-filter/create-add-filter.md) documentation page to learn more about using tags to restrict resource access. + +The custom tags you create are assigned to the clusters during the creation process. Tags follow the key-value pair format: `department:finance`. + + +### Reserved Tags + +The following tags are reserved for internal purposes and are not available for usage. Palette will return an error if you use any of the following tags. + +- `azure` + + +- `microsoft` + + +- `windows` diff --git a/docs/docs-content/clusters/public-cloud/azure/azure-cloud.md b/docs/docs-content/clusters/public-cloud/azure/azure-cloud.md new file mode 100644 index 0000000000..30c50132cb --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/azure/azure-cloud.md @@ -0,0 +1,141 @@ +--- +sidebar_label: "Register and Manage Azure Cloud Account" +title: "Register and Manage Azure Cloud Account" +description: "This guide will help you register and manage an Azure cloud account in Palette" +hide_table_of_contents: false +tags: ["public cloud", "azure"] +sidebar_position: 10 +--- + +Palette supports deploying and managing Kubernetes clusters in an Azure account. This section guides you on how to create a Kubernetes cluster in Azure that is managed by Palette. + +## Prerequisites + +* A [Palette Account](https://console.spectrocloud.com/) + +* An active [Azure cloud account](https://portal.azure.com/) with sufficient resource limits and permissions to provision compute, network, and security resources in the desired regions. + +* An [Azure App](https://learn.microsoft.com/en-us/azure/app-service/overview) with valid credentials. + +## Enable Azure Cloud Account Registration to Palette + +To register an Azure cloud account in the Palette console + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the **Project Overview** drop-down and switch to the **Tenant Admin**. + + +3. Select **Tenant Settings** from the left **Main Menu**. + + +4. From the Tenant Settings go to **Cloud Accounts** and click on **+ Add Azure Account**. + + +5. The Azure cloud account wizard requires the following information: + +| **Basic Information** |Description| +|-------------------------|-----------| +|Account Name| A custom account name| +|Client ID| Unique client Id from Azure console| +|Tenant ID| Unique tenant Id from Azure console| +|[Client Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-an-azure-active-directory-application)| Azure secret for authentication| +|Tenant Name| An optional tenant name| +|[Disable Properties](/clusters/public-cloud/azure/azure-cloud#disableproperties)| To disable the import of Azure networking details.| +|Toggle **Connect Private Cloud Gateway**| An option to select the [Self-Hosted PCG](gateways.md) already created from the drop-down menu to link it to the cloud account. | + +:::info + + For existing cloud accounts go to **Edit** and toggle the **Connect Private Cloud Gateway** option to select the created Gateway from the drop-down menu. +::: + + +6. Click on the **Confirm** button to complete the wizard. + + +### Disable Properties + +When the above information is provided to the cloud account creation wizard, Azure networking details will be sent to Palette console, which you can disable. To disable network calls from the Palette console to the Azure account, you can click **Disable Properties**. + +For this, we first need to create an Azure Active Directory (AAD) Application which can be used with role-based access control. Follow the steps below to create a new AAD application, assign roles, and create the client secret: + + +1. Follow the steps described [here](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-an-azure-active-directory-application) to create a new Azure Active Directory application. Note down your ClientID and TenantID. + + +2. On creating the application, a minimum required [ContributorRole](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#contributor) needs to be assigned. To assign any kind of role, the user must have a minimum role of [UserAccessAdministrator](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#user-access-administrator). The role can be assigned by following the [Assign Role To Application](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#assign-a-role-to-the-application) link. + + +3. Follow the steps described in the [Create an Application Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#create-a-new-application-secret) section to create the client application secret. Store the Client Secret safely as it will not be available as plain text later. + +
+ +## Validate + +To validate the Azure Cloud account creation in Palette console: + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the **Project Overview** drop-down and switch to the **Tenant Admin**. + + +3. Select **Tenant Settings** from the left **Main Menu**. + + +4. From the Tenant Settings go to **Cloud Accounts** + + +5. Below the label **Azure**, the available Azure cloud accounts are listed. + +
+ +## Manage Azure Accounts +After an Azure cloud account has been registered with Palette, you can change the integration settings or remove the Azure account with **Edit and Delete** capabilities respectively. + +### Edit an Azure Account + +To edit the Azure Cloud account created in Palette console: + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the **Project Overview** drop-down and switch to the **Tenant Admin**. + + +3. Select **Tenant Settings** from the left **Main Menu**. + + +4. From the Tenant Settings go to **Cloud Accounts** + + +5. Towards the name of the cloud account you want to remove, click the **three-dots Menu** and select **Edit**. + + +6. Make the required changes and click on the **Confirm** button to complete the wizard. + +
+ +### Remove an Azure Account + +Use the following steps to delete an Azure cloud account from Palette,. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the **Project Overview** drop-down and switch to the **Tenant Admin**. + + +3. Select **Tenant Settings** from the left **Main Menu**. + + +4. From the Tenant Settings go to **Cloud Accounts** + + +5. Towards the name of the cloud account you want to remove, click the **three-dots Menu** and select **Edit**. + + +6. Towards the name of the cloud account you want to remove, click the **three-dots Menu** and select **Delete**. + + diff --git a/docs/docs-content/clusters/public-cloud/azure/azure.md b/docs/docs-content/clusters/public-cloud/azure/azure.md new file mode 100644 index 0000000000..b52233539f --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/azure/azure.md @@ -0,0 +1,35 @@ +--- +sidebar_label: "Azure" +title: "Azure" +description: "The methods of creating clusters for a speedy deployment on any CSP" +hide_table_of_contents: false +tags: +- "public cloud" +- azure +--- + +Palette supports integration with [Microsoft Azure](https://azure.microsoft.com/en-us). You can deploy and manage [Host Clusters](../../../glossary-all.md#hostcluster) in Azure. To get started check out the [Register and Manage Azure Cloud Account](azure-cloud.md#manage-azure-accounts). + +
+ +## Resources + +To learn more about Palette and Azure cluster creation and its capabilities check out the following resources: + +- [Register and Manage Azure Cloud Account](azure-cloud.md) + + +- [Create and Manage Azure Cluster](create-azure-cluster.md#deploy-an-azure-cluster-with-palette) + + +- [Deleting an Azure Cluster](../../cluster-management/remove-clusters.md) + + +- [Cluster Management Day Two Operations](../../cluster-management/cluster-management.md) + + +- [Azure Architecture](architecture.md) + + +- [Cluster Removal](../../cluster-management/remove-clusters.md) + diff --git a/docs/docs-content/clusters/public-cloud/azure/create-azure-cluster.md b/docs/docs-content/clusters/public-cloud/azure/create-azure-cluster.md new file mode 100644 index 0000000000..3e3ec8c700 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/azure/create-azure-cluster.md @@ -0,0 +1,196 @@ +--- +sidebar_label: "Create and Manage Azure IaaS Cluster" +title: "Create and Manage Azure IaaS Cluster" +description: "The methods of creating an Azure cluster in Palette" +hide_table_of_contents: false +tags: ["public cloud", "azure"] +sidebar_position: 20 +--- + + +You can deploy Azure clusters in the Palette platform. This section highlights the prerequisites and deployment steps of Palette Azure clusters. + +Azure clusters can be created under the following scopes: + +* Tenant Admin + +* Project Scope - This is the recommended scope. + +Be aware that clusters that are created under the **Tenant Admin** scope are not visible under Project scope . + +## Prerequisites + +The following prerequisites must be met before deploying a workload cluster in Azure: + +1. You must have an active Azure cloud account with sufficient resource limits and permissions to provision compute, network, and security resources in the desired regions. + + +2. Register your Azure cloud account in Palette as described in the [Creating an Azure Cloud account](azure-cloud.md#enable-azure-cloud-account-registration-to-palette) section. + + +3. A [cluster profile created](../../../cluster-profiles/task-define-profile.md) for Azure cloud. + + + + +## Deploy an Azure Cluster with Palette + +The following steps need to be performed to provision a new Azure cluster: + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Click on **Clusters** from the left **Main Menu**. + + +2. In the cluster page click **+ Add New Cluster** button and select **create new cluster**. + + +3. Select **Azure** as the cloud type and click on **Start Azure Configuration** to input cluster information + + +4. Provide the basic cluster information such as **Name**, **Description** (optional), and **Tags** (optional) and select the [**Azure Cloud Account**](azure-cloud.md#enable-azure-cloud-account-registration-to-palette) from the drop-down menu. Azure cloud accounts with credentials must be pre-configured in project settings. Click on the **Next** button. + + +5. Select the **Cluster Profile** created for the Azure environment. The profile definition will be used as the cluster construction template. Click on **Next**. + + +6. Review and override pack parameters as desired. By default, parameters for all packs are set with values defined in the Cluster Profile. Click on **Next**. + + +7. Provide the Azure Cloud account placement information for cluster configuration. If you have custom storage accounts or storage container available, they will be eligible for attachment. To learn more about attaching custom storage to a cluster, check out the [Azure storage](architecture#azure-storage) page. + + +:::caution + +If the Azure account is [registered](azure-cloud.md#enable-azure-cloud-account-registration-to-palette) with the option **Disable Properties** enabled and the cluster configuration option **Static Placement** is enabled, then the network information from your Azure account will not be imported by Palette. You can manually input the information for the **Control Plane Subnet** and the **Worker Network**, but be aware that drop-down menu selections will be empty. + +::: + +
+ +|**Parameter**| **Description**| +|-------------|---------------| +| **Subscription** | From the drop-down menu, select the subscription that will be used to access Azure Services.| +| **Region** | Select a region in Azure in which the cluster should be deployed.| +| **Resource Group** | Select the Azure resource group in which the cluster should be deployed.| +| **Storage Account** | Optionally provide the storage account. Review the [Azure Storage section](architecture#azure-storage) for a custom storage use cases. | +| **Storage Container**| Optionally provide the Azure storage container. Review the [Azure Storage section](architecture#azure-storage) for a custom storage use cases.| +| **SSH Key** | The public SSH key for connecting to the nodes. Review Microsoft's [supported SSH](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys#supported-ssh-key-formats) formats. | +| **Static Placement** | By default, Palette uses dynamic placement, in which a new VPC with a public and private subnet is created to place cluster resources for every cluster. These resources are fully managed by Palette and deleted when the corresponding cluster is deleted.
If you want to place resources into pre-existing VPCs and subnets, you can enable the **Static Placement** option. Review the [Static Placement](#static-placement-table) table below for available parameters for static placement.| +|**Update worker pools in parallel**| Check the box to concurrently update the worker pools.| +|**Private API Server LB**|This option applies when the cluster is deployed via the [Azure Private Endpoint](gateways.md). You can enable this option if your API Server must have private access. Review the [Private API Server LB](#private-api-server-lb-table) table below for more details.| +|**Update worker pools in parallel**|If you have multiple worker pools, select the check box to enable simultaneous upgrade of all the pools. The default is sequential upgrade.| + +#### Static Placement Table + +| **Parameter** | **Description** | +|------------------------|------------------------------------------------------------| +| **Network Resource Group** | The logical container for grouping related Azure resources | +| **Virtual Network** | Select the virtual network from the drop-down menu. | +| **CIDR Block** | Select the CIDR address from the drop-down menu. | +| **Control Plane Subnet** | Select the control plane network from the dropdown menu. | +| **Worker Network** | Select the worker network from the drop-down menu. | + + + +#### Private API Server LB Table + + +| **Parameter** | **Description**| +|----------------------|----------------------------------------------------------------------------------------------------------------------------------------| +| **Private DNS Zone** | Optionally select the DNS Zone from the drop-down menu. If you do not select a DNS Zone, one will be generated and assigned.| +| **IP Allocation Method** | Allocate an available IP from the private endpoint VNet. Review the [IP Allocation Method Table](#ip-allocation-method-table) below for more details.| + +##### IP Allocation Method Table + +| **Parameter** | **Description** | +|----------------------|----------------------------------------------------------------------------------------------------------------------------------------| +| **Dynamic** | Use Dynamic Host Configuration Protocol (DHCP) to dynamically allocates IP addresses from the available Virtual Network IP CIDR range.| +| **Static** | You can specify a static IP address from the available Virtual Network IP range.| + +When you have provided all the cluster configuration details to the wizard, click on **Next** and proceed to node configuration. + +
+ +7. Configure the master and worker node pools. A master and a worker node pool are configured by default. To learn more about the configuration options, review the [Node Pool](../../cluster-management/node-pool.md) documentation page. + +:::info + +You can add new worker pools to customize certain worker nodes to run specialized workloads. For example, the default worker pool may be configured with the Standard_D2_v2 instance types for general-purpose workloads and another worker pool with instance type Standard_NC12s_v3 can be configured to run GPU workloads. + +::: + +
+ + +8. The settings page is where you can configure patching schedule, security scans, backup settings, setup role based access control (RBAC), and enable [Palette Virtual Clusters](../../../devx/palette-virtual-clusters/palette-virtual-clusters.md). Review the settings and make changes if needed. Click on **Validate**. + + +9. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Be aware that provisioning IaaS clusters can take several minutes. + + +The cluster details page of the cluster contains the status and details of the deployment. Use this page to track the deployment progress. + +## Validate + +You can validate your cluster is up and running by reviewing the cluster details page. Navigate to the left **Main Menu** and click on **Clusters**. The **Clusters** page contains a list of all available clusters managed by Palette. Click on the row for the cluster you wish to review its details page. Ensure the **Cluster Status** field contains the value **Running**. +## Deleting an Azure IaaS Cluster + +The deletion of an Azure IaaS cluster results in the removal of all instances and associated resources created for the cluster. To perform a cluster deletion, use the following steps. + + +1. Ensure you are in the correct project scope. + + +2. Navigate to the left **Main Menu** and click on **Clusters** + + +3. Click on the cluster that you want to remove. + + +4. Click on the **Settings** drop-down menu. + + +5. Click on **Delete Cluster** + + +6. Type in the name of the cluster and click on **OK** + +The cluster status is updated to **Deleting** while cluster resources are being deleted. Once all resources are successfully deleted, the cluster status is updated to **Deleted** and is removed from the list of clusters. + +## Force Delete a Cluster + +If a cluster is stuck in the **Deletion** state for a minimum of 15 minutes it becomes eligible for force deletion. You can force delete a cluster from the tenant and project admin scope. +To force delete a cluster follow the same steps outlined in [Deleting an Azure IaaS Cluster](#deleting-an-azure-iaas-cluster). However, after 15 minutes, a **Force Delete Cluster** option is available in the **Settings** drop-down menu. The **Settings** drop-down menu will provide you with an estimated time left before the force deletion becomes available.. + +
+ +:::caution + + +A force delete can result in resources Palette provisioned being missed in the removal process. Verify there are no remaining Palette provisioned resources such as: + +- Virtual Network (VNet) +- Static Public IPs +- Virtual Network Interfaces +- Load Balancers +- VHD +- Managed Disks +- Virtual Network Gateway + +Failure in removing provisioned resources can result in unexpected costs. + +::: + +## Validate + +To validate the Azure cluster creation and deletion status + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Click on **Cluster** on the left **Main Menu** + + +4. Click on the check box **Deleted only** to view all the clusters deleted in the last 72 hours. diff --git a/docs/docs-content/clusters/public-cloud/azure/gateways.md b/docs/docs-content/clusters/public-cloud/azure/gateways.md new file mode 100644 index 0000000000..0c52da8176 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/azure/gateways.md @@ -0,0 +1,184 @@ +--- +sidebar_label: "Self-Hosted PCG" +title: "Self-Hosted PCG" +description: "The methods of creating Self Hosted PCG on Palette for secured cluster deployment" +icon: "" +hide_table_of_contents: false +tags: ["public cloud", "azure"] +sidebar_position: 40 +--- + + + +Palette enables the provisioning of private AKS clusters within Azure Virtual networks (VNet) for enhanced security by offloading the orchestration to a Private Cloud Gateway deployed within the same account as the private AKS clusters. This private cloud gateway (Self Hosted PCGs) is an AKS cluster that needs to be launched manually and linked to an Azure cloud account in Palette Management Console. The following sections discuss the prerequisites and detailed steps towards deploying Palette self-hosted PCG for Azure Cloud Accounts. Once the self-hosted PCG is created and linked with an Azure cloud account in Palette, any Azure clusters provisioned using that cloud account will be orchestrated via the self-hosted PCG, thereby enabling the provisioning of Private AKS clusters. + +## Prerequisites + +* An active [Azure cloud account](https://portal.azure.com/) with sufficient resource limits and permissions to provision compute, network, and security resources in the desired regions. + + +* The [Azure CLI v2.0.0+](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) + + + +## Create a Virtual Network in the Azure Console + +Log in to the [Azure portal](https://portal.azure.com/) and create a [Virtual Network](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-networks-overview) (VNet). Ensure the VNet contains the following network settings. + +1. Three subnets. Each of the subnets should have a minimum of **333 available + IPs**. (Note: 333 IP's are required if you want to use Azure Container Networking Interface (CNI) networking and + not necessary if you are using Kubenet networking) + + +2. The VNet should have the ** `Microsoft.Authorization/roleAssignments/write` ** action required to connect the virtual network to the Azure Kubernetes Cluster if the network configuration is `Azure CNI.` for the cluster. + + +3. The VNet needs to be linked with: + * Azure Kubernetes Cluster + * Azure Bastion Host (Jump Box Virtual Machine) to connect to the Azure target Kubernetes cluster securely. + +
+ + :::info + + **Note**: A bastion host is only required if accessing a Kubernetes cluster that is located inside a private Azure Virtual Network (VNet) that only exposes private endpoints. If you have direct network access to the VNet then you do not require a bastion host. Alternatively, you can also deploy a bastion host and remove it later if no longer required. + + ::: + + +## Create an Azure Kubernetes Target Cluster in the Azure Console + + +1. Log in to the **Azure Portal** and go to **Kubernetes services**, and click **Create a Kubernetes Cluster** to initiate the cluster. Creation. + + +2. Fill up the essential information to **Create Cluster Wizard** and take care of the following information specifically. + + +3. Primary Node Pool: + * Node Size: Select a VM instance with a minimum of 16 GiB RAM and four vCPUs. + * Node Count: 1 or 3 as per user requirement + + +4. The scale method to be selected is manual. + + +5. Networking: Two options available for networking configuration are **Kubenet** and **Azure CNI** +
+ +#### Kubenet: + +If this option is selected, the user needs to ensure that the VNet used is the VNet created at the previous step. This selection of VNet is not possible via Azure User Interface and hence can be achieved [programmatically](https://learn.microsoft.com/en-us/azure/aks/configure-kubenet) as below: + +
+ +``` +SUBNET_ID=$(az network vnet subnet show --resource-group resource-group-name --vnet-name vnet-name --name subnet-name --query id -o tsv) +``` +
+ +``` +az aks create \ + --resource-group resource-group-name \ + --name cluster-name \ + --enable-private-cluster \ + --node-count count-value \ + --network-plugin kubenet \ + --vnet-subnet-id $SUBNET_ID \ + --max-pods 110 +``` + + #### Azure CNI: + +6. Security: For network security enable **Private cluster**. + +In Azure CNI network configuration, select the static [virtual network created](gateways#create-a-virtual-network-in-the-azure-console) from the drop-down menu. + +
+ +## Establish external connectivity with the Target Azure Kubernetes Cluster + +To establish a connection to the Target Azure Kubernetes Cluster get connected to the Bastian Host (Jump-Box) and then get connected to the target Azure Kubernetes cluster to be imported to Palette Console as self hosted PCG. + +### Get Connected to Bastian Host (Jump-Box) + +To establish the external connectivity for the private Kubernetes cluster, launch an Azure Virtual Machine as a jump-box with an SSH key attached. +
+ +:::info +Port Prerequisite: +Add an inbound network security group rule with destination port 22. +::: + + +1. Open the client terminal of your choice to execute: +
+ + +2. Ensure you have read-only access to the private key. Chmod is only supported on Linux subsystems (e.g. WSL on Windows or Terminal on Mac). + + ```shell + chmod 400 .pem + ``` + +3. Issue the command below to connect to your VM. Replace the `` with the path to your private key and `public-ip-address-of-bastion-jump-box` with the public IP address of your VM. + + ```shell + ssh -i azureuser@public-ip-address-of-bastion-jump-box + ``` + + +### Get connected to the Target Azure Kubernetes cluster + +After getting connected to the Bastion host, establish a connection to the Target Azure Kubernetes cluster. Refer to the below sample instructions: + + ```shell + az login + ``` + + + ```shell + az account set --subscription 8710ff2b-e468-434a-9a84-e522999f6b81 + ``` + + + ```shell + az aks get-credentials --resource-group resource-group-name --nametarget-cluster-name + ``` +## Deploy Palette Self Hosted PCG to Palette Console +
+ +1. Login to Palette console as Tenant Admin and go to Tenant settings. + + +2. Go to Private Cloud Gateways and select + Add New Private Cloud Gateway. + + +3. From the available options, select the `Self Hosted Gateway.` + +4. In the create gateway wizard, + * Private cloud gateway name: Custom gateway name + * Cloud type: Select the cloud type as Azure for Azure self-hosted PCG. + +5.Install the Palette agent (also check for prerequisites and instructions on Palette UI) + + **Example:** + ```shell + kubectl apply -f endpoint/v1/pcg/12345678901234/services/jet/manifest + ``` + + ```shell + kubectl apply -n cluster-1234abcd -f https://endpoint/v1/pcg/12345678901234/services/ally/manifest + ``` + +6. The self-hosted PCG will be provisioned and will start running in the Palette console. The healthy self-hosted PCG can be managed from the Palette UI page. The healthy self-hosted PCG can be linked to Azure Cloud Account (optionally) to enjoy the enhanced security benefits. We support the [PCG migration](../../../enterprise-version/enterprise-cluster-management.md#palette-pcg-migration) for the public cloud self-hosted PCGs as well. + + :::info + + Palette users can launch Azure clusters without a PCG also. To enjoy the additional benefits of this private cloud Self-hosted PCG, users need to attach it to the Palette Azure cloud account. + + ::: + +## Attach the Self Hosted PCG to the Azure Cloud Account + +The self-hosted PCG can be attached to an existing Azure Palette cloud account or while creating a new Azure Palette cloud account. Refer to the [Azure Cloud Account](azure-cloud.md) creation. diff --git a/docs/docs-content/clusters/public-cloud/cox-edge/_category_.json b/docs/docs-content/clusters/public-cloud/cox-edge/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/cox-edge/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/clusters/public-cloud/cox-edge/add-cox-edge-accounts.md b/docs/docs-content/clusters/public-cloud/cox-edge/add-cox-edge-accounts.md new file mode 100644 index 0000000000..b73bc14631 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/cox-edge/add-cox-edge-accounts.md @@ -0,0 +1,110 @@ +--- +sidebar_label: "Register and Manage Cox Edge Accounts" +title: "Register and Manage Cox Edge Accounts" +description: "Learn how to add and manage a Cox Edge account in Palette." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["public cloud", "cox edge"] +--- + +Palette supports integration with Cox Edge accounts and account environments. This section explains how to create a Cox Edge account in Palette. + +## Add Cox Edge Account + +To add a Cox Edge account to Palette, use the following instructions. + +## Prerequisites + +- A [Spectro Cloud](https://console.spectrocloud.com) account. + +- A [Cox Edge](https://portal.coxedge.com/login) account. + +- Tenant admin access in Palette. + +- Your Cox Edge organization id. Ask your system administrator for this value or use the API endpoint `api/v2/organizations` to retrieve the organization id. + + ```shell + curl --silent "https://portal.coxedge.com/api/v2/organizations" \ + --header "MC-Api-Key: YourAPIkeyHere" | jq '.data[] | {id}' + ``` + + ```shell hideClipboard + { + "id": "268ce256-15ef-465f-bc0f-952fac3e7c1e" + } + ``` + +## Enablement + +You can use the steps below or the interactive guide to help you add a Cox Edge account to Palette. Click on the first image link to navigate the destination site with the tutorial at right. + + + +1. Log in to the [Cox Edge](https://portal.coxedge.com/login) portal. + + +2. Navigate to the drop-down **User Menu** and click on **API Credentials**. + + +3. Select **Generate API key**. + + +4. Give the key a name and select **Generate**. + + +5. Copy the API key value to a secure location. You will use this value in the future. + + +6. Copy the API endpoint URL. The API endpoint is located above the table that lists all your API keys. + + +7. Click the **four-tile Home** button at the top and select **Edge Compute**. + + +8. Next, click on the environment drop-down menu and select **Add Environment** to create a compute environment. A compute environment is required when adding a Cox Edge account to Palette. If you already have a compute environment available, skip to step 11. + + +9. Provide a name and description and click **Next**. + + +10. Add members to the compute environment. You can also add members at a later point. Apply the changes. + + +11. Open another browser tab and log in to [Palette](https://console.spectrocloud.com) as a Tenant admin. + + +12. Go to **Tenant Settings** > **Cloud Accounts** and click **+Add Cox Edge Account**. + + +13. Fill out the following input fields. + + - Account Name: Assign a name to the Cox Edge account. + + - API Base URL: Add the API endpoint URL you copied down earlier. You can locate this value in the API Key overview page in the Cox Edge portal. + + - API Key: Provide the API key you generated earlier. + + - Organization Id: Ask your system administrator for this value or use the Cox Edge API to retrieve the organization id. + + - Environment: This optional field allows you to enter name of the environment you wish to target if you have one. + + - Service: Use the value `edge-services`. + +14. Click **Validate** to confirm you have access to the Cox Edge account. + + +15. Select **Confirm** to add the Cox Edge account to Palette. + + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com) as a Tenant admin. + + +2. Navigate to the left **Main Menu** and select **Tenant Settings** to ensure you are in the **Cloud Accounts** page. + + +3. Your Cox Edge account is now listed with all the other infrastructure provider accounts. + + +4. You can also deploy a cluster to Cox Edge to validate everything is working. Use the [Create and Manage Cox IaaS Cluster](create-cox-cluster.md) guide to create and deploy a cluster to Cox Edge. \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/cox-edge/cox-edge.md b/docs/docs-content/clusters/public-cloud/cox-edge/cox-edge.md new file mode 100644 index 0000000000..aec97c4794 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/cox-edge/cox-edge.md @@ -0,0 +1,23 @@ +--- +sidebar_label: "Cox Edge" +title: "Cox Edge" +description: "Palette supports deployment of workloads to Cox Edge." +hide_table_of_contents: false +tags: ["public cloud", "cox edge"] +--- + +Palette enables deployment of workloads to Cox Edge, a last-mile edge cloud provider. Using the Cox Edge network allows you to deploy compute resources closer to the location of your application consumers, reducing latency and enhancing the user experience. + + +## Get Started + +To get started with Palette and Cox Edge, check out the [Create and Manage Cox Edge IaaS Cluster](create-cox-cluster.md) guide. + + +## Resources + +- [Register and Manage Cox Edge Accounts](add-cox-edge-accounts.md) + +- [Create and Manage Cox IaaS Cluster](create-cox-cluster.md) + +- [Required Network Rules](network-rules.md) diff --git a/docs/docs-content/clusters/public-cloud/cox-edge/create-cox-cluster.md b/docs/docs-content/clusters/public-cloud/cox-edge/create-cox-cluster.md new file mode 100644 index 0000000000..f4e266ad9f --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/cox-edge/create-cox-cluster.md @@ -0,0 +1,141 @@ +--- +sidebar_label: "Create and Manage Cox IaaS Cluster" +title: "Create and Manage Cox IaaS Cluster" +description: "Learn how to add and manage a cluster deployed to Cox Edge." +hide_table_of_contents: false +sidebar_position: 10 +tags: ["public cloud", "cox edge"] +--- + +Palette supports creating and managing Kubernetes clusters deployed to a Cox Edge account. This section guides you on how to create a Kubernetes cluster in Cox Edge that is managed by Palette. + +## Prerequisites + +- A [Spectro Cloud](https://console.spectrocloud.com) account. + +- A [Cox Edge](https://portal.coxedge.com/login) account. + +- A Cox Edge account registered in Palette. Check out the [Register and Manage Cox Edge Accounts](add-cox-edge-accounts.md) guide to learn how to register a Cox Edge account in Palette. + +- A cluster profile for Cox Edge clusters. If you need guidance creating a cluster profile, check out the [Creating Cluster Profiles](../../../cluster-profiles/task-define-profile.md) guide. + + +## Create a Cluster + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Click **+ Add New Cluster** and select **Deploy New Cluster**. + + +4. Select **Cox Edge** from the list of infrastructure providers. + + +5. Fill out the following input fields and click **Next**. + + - Cluster name: The name of the new cluster. + - Description: A text value that explains the cluster. + - Tags: Assign tags to the cluster. + - Cloud Account: Select your Cox Edge account. + + +6. Select a cluster profile that is compatible with Cox Edge. If you need guidance creating a cluster profile, check out the [Creating Cluster Profiles](../../../cluster-profiles/task-define-profile.md) guide. + + :::caution + + If you want to use the Kubernetes cluster autoscaler feature and you are using [Longhorn](../../../integrations/longhorn.md) for the storage container interface. Set the `charts.longhorn.defaultSettings.kubernetesClusterAutoscalerEnabled` parameter to `true`. + + ```yaml + charts: + longhorn: + defaultSettings: + kubernetesClusterAutoscalerEnabled: true + ``` + ::: + + +7. Review the cluster profile and all of its manifest files. Click **Next** to continue. + + +8. Fill out the following input fields and select **Next**. + - SSH Keys: Select an SSH key pair or create a new key pair. + - Load Balancer PoP: The location where you want to deploy the cluster compute resources. + - Organization: The Cox Edge organization to target for the deployment. + - Environment: The Cox Edge environment to deploy the compute resources. + - Update worker pools in parallel: Enable this checkbox if you wish to update worker pool nodes in parallel. + +9. Configure the master and worker node pools. The following input fields apply to Cox Edge master and worker node pools. For a description of input fields that are common across target platforms refer to the [Node Pools](../../cluster-management/node-pool.md) management page. Click **Next** when you are done. + +
+ + #### Master Pool configuration: + + - Cloud Configuration: + + - Deployment Name: The name to assign the Cox Edge deployment. + - PoP: The Cox Edge location to target. + - Instance Type: The compute size. + - Network policies: The network rules to apply to the deployment. Review the list of required network policies in the [Network Rules](network-rules.md) documentation. + +
+ + :::caution + + Use the network rules specified in the [Network Rules](network-rules.md) documentation. If you fail to add the required network rules, Palette will be unable to deploy the cluster to Cox Edge. + + ::: + + #### Worker Pool configuration: + + - Cloud Configuration: + - Deployment Name: The name to assign the Cox Edge deployment. + - PoP: The Cox Edge location to target. + - Instance Type: The compute size. + - Network policies: The network rules to apply to the deployment. Review the list of required network policies in the [Network Rules](network-rules.md) documentation. + + +10. The settings page is where you can configure patching schedule, security scans, backup settings, set up role-based access control (RBAC), and enable [Palette Virtual Clusters](../../../devx/palette-virtual-clusters/palette-virtual-clusters.md). Review the settings and make changes if needed. Click **Validate**. + + +11. Review the settings summary and click **Finish Configuration** to deploy the cluster. Be aware that provisioning IaaS clusters can take several minutes. + +The cluster details page contains the status and details of the deployment. Use this page to track the deployment progress. + + +## Validate + +You can validate your cluster is up and running by reviewing the cluster details page. Navigate to the left **Main Menu** and click **Clusters**. The **Clusters** page contains a list of all available clusters Palette manages. Select the cluster to review its details page. Ensure the **Cluster Status** field contains the value **Running**. + + +## Delete a Cox Edge IaaS Cluster + +When you delete a Cox Edge cluster, all instances and associated resources created for the cluster are removed. To delete a cluster, use the following steps. + + +1. Ensure you are in the correct project scope. + + +2. Navigate to the left **Main Menu** and click **Clusters**. + + +3. Select the cluster you want to delete. + + +4. Click the **Settings** drop-down menu and select **Delete Cluster**. + + +5. Click on **Delete Cluster** + + +6. Type the name of the cluster and click **OK** + +The cluster status is updated to **Deleting** while cluster resources are being deleted. When all resources are successfully deleted, the cluster status is updated to **Deleted** and the cluster is removed from the list. + +## Force Delete a Cluster + +If a cluster is stuck in the **Deletion** state for a minimum of 15 minutes it becomes eligible for force deletion. You can force delete a cluster from the tenant and project admin scope. +To force delete a cluster follow the same steps outlined in [Delete a Cox Edge IaaS Cluster](#delete-a-cox-edge-iaas-cluster). However, after 15 minutes, a **Force Delete Cluster** option is available in the **Settings drop-down Menu**. The **Settings** drop-down menu provides you with an estimated time left before the force deletion becomes available. + diff --git a/docs/docs-content/clusters/public-cloud/cox-edge/network-rules.md b/docs/docs-content/clusters/public-cloud/cox-edge/network-rules.md new file mode 100644 index 0000000000..49e899a0f5 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/cox-edge/network-rules.md @@ -0,0 +1,23 @@ +--- +sidebar_label: "Required Network Rules" +title: "Required Network Rules" +description: "Cox Edge deployments require the following network rules for a successful Palette deployment." +hide_table_of_contents: false +sidebar_position: 20 +tags: ["public cloud", "cox edge"] +--- + + +To successfully deploy a host cluster to Cox Edge with Palette, you must add the following network rules to each deployment. + + +## Inbound + +The following inbound network rules are required for Palette to deploy and manage a Cox Edge cluster. + +| Port | Protocol | Source | Description | +|------|----------|-----------|---------------------------------------------------------------------------| +| 22 | TCP | 0.0.0.0/0 | To support the secure shell (SSH) protocol. | +| 179 | TCP | 0.0.0.0/0 | Required for the Border Gateway Protocol (BGP). | +| 6443 | TCP | 0.0.0.0/0 | Required for Palette to communicate with the cluster's Kubernetes API server. | +| 4789 | UDP | 0.0.0.0/0 | Required for networking with VXLAN. | \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/deploy-k8s-cluster.md b/docs/docs-content/clusters/public-cloud/deploy-k8s-cluster.md new file mode 100644 index 0000000000..bacca62cf6 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/deploy-k8s-cluster.md @@ -0,0 +1,1263 @@ +--- +sidebar_label: "Deploy a Cluster" +title: "Deploy a Cluster" +description: "Learn how to deploy a Kubernetes cluster to a public cloud provider with Palette. " +icon: "" +category: ["tutorial"] +hide_table_of_contents: false +tags: ["public cloud", "aws", "azure", "gcp", "tutorial"] +toc_min_heading_level: 2 +toc_max_heading_level: 3 +sidebar_position: 50 +--- + +Palette helps you create and manage Kubernetes clusters in various cloud environments with minimal overhead. + +Palette offers profile-based management for Kubernetes, enabling consistency, repeatability, and operational efficiency across multiple clusters. A [cluster profile](../../cluster-profiles/cluster-profiles.md) allows you to customize the cluster infrastructure stack, allowing you to choose the desired Operating System (OS), Kubernetes, Container Network Interfaces (CNI), Container Storage Interfaces (CSI). You can further customize the stack with add-on application layers. + +After defining a cluster profile, you can provide the cloud environment details, the control plane, and worker node configurations to deploy a host cluster. + +This tutorial will teach you how to deploy a host cluster with Palette using Amazon Web Services (AWS), Microsoft Azure, or Google Cloud Platform (GCP) cloud providers. You can deploy a cluster using either Palette or Terraform. You will learn about *Cluster Mode* and *Cluster Profiles* and how these components enable you to deploy customized applications to Kubernetes with minimal effort. + +## Architecture + +As you navigate the tutorial, refer to this diagram to help you understand how Palette uses a cluster profile as a blueprint for the host cluster you deploy. Palette clusters have the same node pools you may be familiar with: control plane nodes, often called *master nodes*, and *worker nodes* where you will deploy applications. The result is a host cluster that Palette manages. + +![A view of Palette managing the Kubernetes lifecycle](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_application.png) + +
+ +### Deploy the Cluster and the Application + +Select the workflow you want to learn more about. + +- [UI Workflow](#ui-workflow) + +- [Terraform Workflow](#terraform-workflow) + +
+ + + +## UI Workflow + + +You can create and manage clusters directly from the Palette dashboard. Use the following steps to learn how to deploy a host cluster to multiple cloud providers. + +### Prerequisites + +To complete this tutorial, you will need the following. + +- A public cloud account from one of these providers: + - [AWS](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account) + - [Azure](https://learn.microsoft.com/en-us/training/modules/create-an-azure-account) + - [GCP](https://cloud.google.com/docs/get-started) + +
+ +- Register the cloud account in Palette. The following resources provide additional guidance. + - [Register and Manage AWS Accounts](aws/add-aws-accounts.md) + - [Register and Manage Azure Cloud Accounts](azure/azure-cloud.md) + - [Register and Manage GCP Accounts](gcp/add-gcp-accounts.md) + +
+ +- An SSH Key Pair. Use the [Create and Upload an SSH Key](../cluster-management/ssh-keys.md) guide to learn how to create an SSH key and upload it to Palette. + + - AWS users must create an AWS Key pair before starting the tutorial. If you need additional guidance, check out the [Create EC2 SSH Key Pair](https://docs.aws.amazon.com/ground-station/latest/ug/create-ec2-ssh-key-pair.html) tutorial. + +### Deploy the Environment + +The following steps will guide you through deploying the cluster infrastructure. You will start by creating a cluster profile that you apply to the host cluster. + +
+ + + + +#### Create Cluster Profile (AWS) + +[Cluster profiles](../../cluster-profiles/cluster-profiles.md) are templates you create with the following core layers and any add-on layers such as security, monitoring, logging, and more. + + - Operating System (OS) + - Kubernetes distribution and version + - Network Container Interface (CNI) + - Storage Container Interface (CSI) + + +You customize profiles by choosing the type of component and version. In this way, profiles offer a reproducible way to create clusters. + +Log in to [Palette](https://console.spectrocloud.com) and navigate to the left **Main Menu**. Select **Profiles** to view the cluster profile page. +You can view the list of available cluster profiles. To create a cluster profile, click the **Add Cluster Profile** button. + +![View of the cluster Profiles page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png) + +Follow the wizard to create a new profile. + +In the **Basic Information** section, assign the name **aws-profile**, a brief profile description, select the type as **Full**, and assign the tag **env:aws**. You can leave the version empty if you want to. Just be aware that the version defaults to **1.0.0**. Click on **Next**. + +**Cloud Type** allows you to choose the infrastructure provider with which this cluster profile is associated. Select **AWS** and click on **Next**. + +**Profile Layers**, this is the main configuration step where you specify the packs that compose the profile. There are four required infrastructure packs and several optional add-on packs you can choose from. +Every pack requires you to select the **Pack Type**, **Registry**, and **Pack Name**. + +For this tutorial, use the following packs: + +| Pack Name | Version | Layer | +|--------------------|-----------|--------------------| +| ubuntu-aws LTS | 20.4.x | Operating System | +| Kubernetes | 1.24.x | Kubernetes | +| cni-calico | 3.24.x | Network | +| csi-aws-ebs | 1.16.x | Storage | + + +As you fill out the information for each layer, click on **Next** to proceed to the next layer. + +Click on **Confirm** after you have completed filling out all the core layers. + +![A view of the cluster profile stack](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png) + +The review section gives an overview of the cluster profile configuration you selected. Click on **Finish Configuration** to create the cluster profile. + + +You can modify cluster profiles after you create them by adding, removing, or editing the layers. + +
+ + +#### Create a New Cluster + +Navigate to the left **Main Menu** and select **Cluster**. From the clusters page, click on the **Add New Cluster** button. + +![Palette clusters overview page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) + +Palette will prompt you to either deploy a new cluster or import an existing one. Click on **Deploy New Cluster** to access the cluster deployment wizard. Select **AWS** and click the **Start AWS Configuration** button. Use the following steps to create a host cluster in AWS. + +
+ + +#### Basic information + +In the **Basic information** section, insert the general information about the cluster, such as the Cluster name, Description, Tags, and Cloud account. Click on **Next**. + +![Palette clusters basic information](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png) + +
+ + +#### Cluster Profile + +A list is displayed of available profiles you can choose to deploy to AWS. Select the cluster profile you created earlier and click on **Next**. + +
+ + +#### Parameters + +The **Parameters** section displays the core and add-on layers in the cluster profile. + +![Palette clusters parameters](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_clusters_creation_parameters.png) + +Each layer has a pack manifest file with the deploy configurations. The pack manifest file is in a YAML format. Each pack contains a set of default values. You can change the manifest values if needed. Click on **Next** to proceed. + +
+ + +#### Cluster Configuration + +The **Cluster config** section allows you to select the **Region** in which to deploy the host cluster and specify other options such as the **SSH Key Pair** to assign to the cluster. All clusters require you to select an SSH key. After you have selected the **Region** and your **SSH Key Pair Name**, click on **Next**. + +#### Nodes Configuration + +The **Nodes config** section allows you to configure the nodes that make up the control plane (master nodes) and data plane (worker nodes) of the host cluster. + + +Before you proceed to next section, review the following parameters.

+- **Number of nodes in the pool** - This option sets the number of master or worker nodes in the master or worker pool. For this tutorial, set the count to one for the master pool and two for the worker pool. + + +- **Allow worker capability** - This option allows the master node to also accept workloads. This is useful when spot instances are used as worker nodes. You can check this box if you want to. + + +- **Instance Type** - Select the compute type for the node pool. Each instance type displays the amount of CPU, RAM, and hourly cost of the instance. Select `m4.2xlarge`. + + +- **Availability zones** - Used to specify the availability zones in which the node pool can place nodes. Select an availability zone. + + + +- **Disk size** - Set the disk size to **60 GiB**. + +
+ +- **Instance Option** - This option allows you to choose [on-demand instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html) or [spot instance](https://aws.amazon.com/ec2/spot/) for worker nodes. Select **On Demand**. + +
+ +![Palette clusters basic information](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png) + +Select **Next** to proceed with the cluster deployment. + +
+ + +#### Settings + +In the **Settings** section, you can configure advanced options such as when to patch the OS, enable security scans, manage backups, add role-based access control (RBAC) bindings, and more. + +For this tutorial, you can use the default settings. Click on **Validate** to continue. + +
+ + +#### Review + +The **Review** section allows you to review the cluster configuration prior to deploying the cluster. Review all the settings and click on **Finish Configuration** to deploy the cluster. + +![Configuration overview of newly created AWS cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_profile_cluster_profile_review.png) + + +
+ +Navigate to the left **Main Menu** and select **Clusters**. + +![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_aws_create_cluster.png) + +Click on your cluster to review its details such as deployment status, event logs, cluster profile, monitoring data, and other information about the cluster. + +
+ +![A view of the cluster details page](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_details.png) + + +
+ + +#### Create Cluster Profile (Azure) + +[Cluster profiles](../../cluster-profiles/cluster-profiles.md) are templates you create with the following core layers and any add-on layers such as security, monitoring, logging, and more. + - Operating System (OS) + - Kubernetes distribution and version + - Network Container Interface (CNI) + - Storage Container Interface (CSI) + +A cluster profile contains these core and additional add-on layers, such as security, monitoring, logging, etc. + +You customize profiles by choosing the type of component and version. In this way, profiles offer a reproducible way to create clusters. + +Log in to Palette and navigate to the left **Main Menu**. Select **Profiles** to view the cluster profile page. +You can view the list of available cluster profiles. To create a cluster profile, click the **Add Cluster Profile** button. + +![Cluster profiles page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png) + +Follow the wizard to create a new profile. + +In the **Basic Information** section, assign the name **azure-profile**, a brief profile description, select the type as **Full**, and assign the tag **env:azure***. You can leave the version empty if you want to. Just be aware that the version defaults to **1.0.0**. Click on **Next**. + +**Cloud Type** allows you to choose the infrastructure provider with which this cluster profile is associated. Select **Azure** and click on **Next**. + +**Profile Layers** is the main configuration step where you specify the packs that compose the profile. You can choose from four required infrastructure packs and several optional add-on packs. +Every pack requires you to select the **Pack Type**, **Registry**, and **Pack Name**. + +For this tutorial, use the following packs: + +| Pack Name | Version | Layer | +|--------------------|--------------------------------------------------|--------------------| +| ubuntu-azure LTS | 20.4.x | Operating System | +| Kubernetes | 1.24.x | Kubernetes | +| cni-calico-azure | 3.24.x | Network | +| Azure Disk | 1.25.x | Storage | + + +As you fill out the information for each layer, click on **Next** to proceed to the next layer. + +Click on **Confirm** after you have completed filling out all the core layers. + +![Azure cluster profile overview page](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_stack.png) + +The review section gives an overview of the cluster profile configuration you selected. Click on **Finish Configuration** to finish creating the cluster profile. + + +You can modify cluster profiles after you create them by adding, removing, or editing the layers. + +
+ + +### Create a New Cluster + +Navigate to the left **Main Menu** and select **Clusters**. Click the **Add New Cluster** button. + +![Palette clusters overview page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) + +Click on **Deploy New Cluster** to access the cluster deployment wizard. Select **Azure** and click the **Start Azure Configuration** button. Use the following steps to create a host cluster in Azure. + +
+ + +#### Basic information + +In the **Basic information** section, insert the general information about the cluster, such as the Cluster name, Description, Tags, and Cloud account. Click on **Next**. + +![Palette clusters basic information](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_clusters_basic_info.png) + +
+ + +#### Cluster Profile + +A list is displayed of available profiles you can choose to deploy to Azure. Select the cluster profile you created earlier and click on **Next**. + +#### Parameters + +The **Parameters** section displays all the layers and add-on components in the cluster profile. + +![palette clusters basic information](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_parameters.png) + +Each layer has a pack manifest file with the deploy configurations. The pack manifest file is in a YAML format. Each pack contains a set of default values. You can change the manifest values if needed. Click on **Next** to proceed. + +
+ +#### Cluster Configuration + + +The **Cluster config** section allows you to select the **Subscription**, **Region**, **Resource Group**, **Storage account**, and **SSH Key**** to apply to the host cluster. All clusters require you to assign an SSH key. Refer to the [SSH Keys](../cluster-management/ssh-keys.md) guide for information about uploading an SSH key. + + +
+ +When you are done selecting a **Subscription**, **Region**, **Resource Group**, **Storage account** and **SSH Key**, click on **Next**. +
+ +#### Nodes Configuration + +The **Nodes config** section allows you to configure the nodes that compose the control plane (master nodes) and data plane (worker nodes) of the Kubernetes cluster. + +Refer to the [Node Pool](../cluster-management/node-pool.md) guide for a list and description of parameters. + +Before you proceed to next section, review the following parameters. + +
+ + +**Number of nodes in the pool** - This option sets the number of master or worker nodes in the master or worker pool. For this tutorial, set the count to one for both the master and worker pools. + +**Allow worker capability** - This option allows the master node to also accept workloads. This is useful when spot instances are used as worker nodes. You can check this box if you want to. + + +- **Instance Type** - Select the compute type for the node pool. Each instance type displays the amount of CPU, RAM, and hourly cost of the instance. Select **Standard_A8_v2**. + + +- **Managed disk** - Used to select the storage class. Select **Standard LRS** and set the disk size to **60**. + + +- **Availability zones** - Used to specify the availability zones in which the node pool can place nodes. Select an availability zone. + +![Palette clusters nodes configuration](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png) + +
+ + +#### Settings + +In the **Settings** section, you can configure advanced options such as when to patch the OS, enable security scans, manage backups, add Role-Based Access Control (RBAC) bindings, and more. + +For this tutorial, you can use the default settings. Click on **Validate** to continue. + +
+ + +#### Review + +The Review section allows you to review the cluster configuration before deploying the cluster. Review all the settings and click on **Finish Configuration** to deploy the cluster. + +![Configuration overview of newly created Azure cluster](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_profile_review.png) + + +
+ +Navigate to the left **Main Menu** and select **Clusters**. + +![Update the cluster](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster.png) + +Click on your cluster to review details such as deployment status, event logs, cluster profile, monitoring data, and other information about the cluster. + +
+ +![View of the cluster details page](/tutorials/deploy-clusters/azure/clusters_public-cloud_deploy-k8s-cluster_azure_create_cluster_details.png) + +
+ +
+ + +### Create Cluster Profile (GCP) +[Cluster profiles](../../cluster-profiles/cluster-profiles.md) are templates you create with the following core layers and any add-on layers such as security, monitoring, logging, and more. + + - Operating System (OS) + - Kubernetes distribution and version + - Network Container Interface (CNI) + - Storage Container Interface (CSI) + + +You customize profiles by choosing the type of component and version. In this way, profiles offer a reproducible way to create clusters. + +Log in to [Palette](https://console.spectrocloud.com) and navigate to the left **Main Menu**. Select **Profiles** to view the cluster profile page. +You can view the list of available cluster profiles. To create a cluster profile, click the **Add Cluster Profile** button. + +![View of the cluster view page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_profile_list_view.png) + +Follow the wizard to create a new profile. + +In the **Basic Information** section, assign the name **gcp-profile**, provide a profile description, select the type as **Full**, and assign the tag **env:gcp**. You can leave the version empty if you want to. Just be aware that the version defaults to 1.0.0. Click on **Next**. + +Cloud Type allows you to choose the infrastructure provider with which this cluster profile is associated. Select **Google Cloud** and click on **Next**. + +Profile Layers, this is the main configuration step where you specify the packs that compose the profile. You can choose from four required infrastructure packs and several optional add-on packs. Every pack requires you to select the Pack Type, Registry, and Pack Name. + + +For this tutorial, use the following packs: + +| Pack Name | Version | Layer | +|--------------------|--------------------------|--------------------| +| ubuntu-gcp LTS | 20.4.x | Operating System | +| Kubernetes | 1.24.x | Kubernetes | +| cni-calico | 3.24.x | Network | +| csi-gcp-driver | 1.7.x | Storage | + + +As you fill out the information for each layer, click on **Next** to proceed to the next layer. + +Click on **Confirm** after you have completed filling out all the core layers. + +![GCP cluster profile view](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_gcp_cluster_profile_stack_view.png) + +The review section gives an overview of the cluster profile configuration you selected. Click on **Finish Configuration** to create the cluster profile. + +You can modify cluster profiles after you create them by adding, removing, or editing the layers. + +
+ +Navigate to the left **Main Menu** and select **Cluster**. Click the **Add New Cluster** button. + +![Palette clusters overview page](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) + +Click on **Deploy New Cluster** to access the cluster deployment wizard. Select **Google Cloud** and click the **Start Google Cloud Configuration** button. Use the following steps to create a host cluster in Google Cloud. + +
+ + +#### Basic information + +In the **Basic information** section, insert the general information about the cluster, such as the **Cluster name**, **Description**, **Tags**, and **Cloud account**. Click on **Next**. + +![Palette clusters basic information](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_basic_info.png) + +
+ + +#### Cluster Profile + +A list is displayed of available profiles you can choose to deploy to GCP. Select the cluster profile you created earlier and click on **Next**. + +![Palette clusters basic information](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_gcp_profile.png) + +
+ + +#### Parameters + +The **Parameters** section displays all the layers and add-on components in the cluster profile. + +![Palette clusters basic information](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_clusters_parameters.png) + +Each layer has a pack manifest file with the deploy configurations. The pack manifest file is in a YAML format. Each pack contains a set of default values. You can change the manifest values if needed. Click on **Next** to proceed. + +
+ + +#### Cluster Configuration + +The **Cluster config** section allows you to select the **Project**, **Region**, and **SSH Key** to apply to the host cluster. All clusters require you to assign an SSH key. Refer to the [SSH Keys](/clusters/cluster-management/ssh-keys) guide for information about uploading an SSH key. + + +
+ +After selecting a **Project**, **Region**, and **SSH Key**, click on **Next**. +### Nodes Configuration + +The **Nodes config** section allows you to configure the nodes that make up the control plane (master nodes) and data plane (worker nodes) of the host cluster. + +Before you proceed to the next section, review the following parameters. + +Refer to the [Node Pool](../cluster-management/node-pool.md) guide for a list and description of parameters. + +Before you proceed to next section, review the following parameters. +- **Number of nodes in the pool** - This option sets the number of master or worker nodes in the master or worker pool. For this tutorial, set the count to one for the master pool and two for the worker pool. + +- **Allow worker capability** - This option allows the master node to also accept workloads. This is useful when spot instances are used as worker nodes. You can check this box if you want to. + + +- **Instance Type** - Select the compute type for the node pool. Each instance type displays the amount of CPU, RAM, and hourly cost of the instance. Select **n1-standard-4**. + +- **Disk size** - Set the disk size to **60**. + + +- **Availability zones** - Used to specify the availability zones in which the node pool can place nodes. Select an availability zone. + +![Palette clusters nodes configuration](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_cluster_nodes_config.png) + +
+ +Select **Next** to proceed with the cluster deployment. + + +#### Settings + +In the **Settings** section, you can configure advanced options such as when to patch the OS, enable security scans, manage backups, add Role-Based Access Control (RBAC) bindings, and more. + +For this tutorial, you can use the default settings. Click on **Validate** to continue. + +#### Review + +The **Review** section allows you to review the cluster configuration before deploying the cluster. Review all the settings and click on **Finish Configuration** to deploy the cluster. + +![Newly created GCP cluster](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_review.png) + +
+ +Navigate to the left **Main Menu** and select **Clusters**. + +
+ +![Update the cluster](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_new_cluster.png) + +Click on your cluster to review details such as deployment status, event logs, cluster profile, monitoring data, and other information about the cluster. + +
+ +![View of the cluster details page](/tutorials/deploy-clusters/gcp/clusters_public-cloud_deploy-k8s-cluster_profile_details.png) + +
+
+ + +The cluster deployment process can take 15 to 30 min. The deployment time varies depending on the cloud provider, cluster profile, cluster size, and the node pool configurations provided. You can learn more about the deployment progress by reviewing the event log. Click on the **Events** tab to view the log. + +![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-event_log.png) + +
+ +While you wait for the cluster deployment process to complete, feel free to check out a video where we discuss the growing pains of using Kubernetes and how Palette can help your address these pain points. + +
+ + + + +--- + +### Update Cluster Profile + +In the following steps, you will learn how to update a cluster profile by adding a new layer to it that contains the application. + +
+ +#### Add a Manifest + +Navigate to the left **Main Menu** and select **Profiles**. Select the cluster profile you created earlier and which you applied to the host cluster. + +Click on **Add Manifest** at the top of the page and fill out the following input fields.

+ +- **Layer name** - The name of the layer. Assign the name **application**. + + +- **Manifests** - Add your manifest by giving it a name and clicking the **New Manifest** button. Assign a name to the internal manifest and click on the blue button An empty editor will be displayed on the right side of the screen. + +![Screenshot of unopened manifest editor](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest_blue_btn.png) + +
+ +In the manifest editor, insert the following content. + +
+ +```yaml +apiVersion: v1 +kind: Service +metadata: + name: hello-universe-service +spec: + type: LoadBalancer + ports: + - protocol: TCP + port: 8080 + targetPort: 8080 + selector: + app: hello-universe +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hello-universe-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: hello-universe + template: + metadata: + labels: + app: hello-universe + spec: + containers: + - name: hello-universe + image: ghcr.io/spectrocloud/hello-universe:1.0.12 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 +``` + +The code snippet you added will deploy the [*hello-universe*](https://github.com/spectrocloud/hello-universe) application. You may have noticed that the code snippet you added is a Kubernetes configuration. Manifest files are a method you can use to achieve more granular customization of your Kubernetes cluster. You can add any valid Kubernetes configuration to a manifest file. + +![Screenshot of manifest in the editor](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_manifest.png) + +The manifest defines a replica set for the application to simulate a distributed environment with a web application deployed to Kubernetes. The application is assigned a load balancer. Using a load balancer, you can expose a single access point and distribute the workload to both containers. + +Click on **Confirm & Create** to save your changes. + +
+ + +#### Deployment + +Navigate to the left **Main Menu** and select **Clusters**. Click on the host cluster you deployed to open its details page. + + +On the top right-hand corner is a blue button **Updates Available**. Click on the button to review the available updates. + +![The cluster details page with a view of pending updates](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_update_available.png) + + +Compare the new changes against the previous cluster profile definition. The only difference is the addition of a manifest that will deploy the Hello Universe application. + + +![Available updates details](/tutorials/deploy-clusters/deploy_app/clusters_public-cloud_deploy-k8s-cluster_update_details_compare.png) + +Click on **Confirm updates** to apply the updates to the host cluster. Depending on the scope of the change this may take a few moments. + +
+ + +### Verify the Application + +Navigate to the cluster's details page and verify you are in the **Overview** tab. When the application is deployed and ready for network traffic, indicated in the **Services** field, Palette exposes the service URL. Click on the URL for port **:8080** to access the Hello Universe application. + +![Cluster details page with service URL highlighted](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_service_url.png) + + +
+ + +:::caution + +It can take up to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. + + +::: + +
+ +![Deployed application landing page with counter displayed](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_app.png) + +
+ +Welcome to Hello Universe, a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the counter and for a fun image change. + +You have deployed your first application to a cluster managed by Palette. Your first application is a single container application with no upstream dependencies. + + +### Cleanup + +Use the following steps to remove all the resources you created for the tutorial. + +To remove the cluster, navigate to the left **Main Menu** and click on **Clusters**. Select the cluster you want to delete to access its details page. + +Click on **Settings** to expand the menu, and select **Delete Cluster**. + +![Delete cluster](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_delete-cluster-button.png) + +You will be prompted to type in the cluster name to confirm the delete action. Type in the cluster name to proceed with the delete step. The deletion process takes several minutes to complete. + +
+ +:::info + +If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for a force delete. To trigger a force delete, navigate to the cluster’s details page, click on **Settings**, then select **Force Delete Cluster**. Palette automatically removes clusters stuck in the cluster deletion phase for over 24 hours. + +::: + + +
+ +Once the cluster is deleted, navigate to the left **Main Menu** and click on **Profiles**. Find the cluster profile you created and click on the **three-dot Menu** to display the **Delete** button. Select **Delete** and confirm the selection to remove the cluster profile. + + + +## Terraform Workflow + +The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider enables you to create and manage Palette resources in a codified manner by leveraging Infrastructure as Code (IaC). Some notable reasons why you would want to utilize IaC are: + +- The ability to automate infrastructure. + +- Improved collaboration in making infrastructure changes. + +- Self-documentation of infrastructure through code. + +- Allows tracking all infrastructure in a single source of truth. + +If want to become more familiar with Terraform, we recommend you check out the [Terraform](https://developer.hashicorp.com/terraform/intro) learning resources from HashiCorp. + +
+ +### Prerequisites + +To complete this tutorial, you will need the following items + +- Basic knowledge of containers. +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) or another container management tool. +- Create a Cloud account from one of the following providers. + - [AWS](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account) + - [Azure](https://learn.microsoft.com/en-us/training/modules/create-an-azure-account) + - [GCP](https://cloud.google.com/docs/get-started) +- Register the [cloud account with Palette](https://console.spectrocloud.com/auth/signup). Use the following resource for additional guidance. + - [Register and Manage AWS Accounts](aws/add-aws-accounts.md) + - [Register and Manage Azure Cloud Accounts](azure/azure-cloud.md) + - [Register and Manage GCP Accounts](gcp/add-gcp-accounts.md) + +
+ +### Set Up Local Environment + +You can clone the tutorials repository locally or follow along by downloading a Docker image that contains the tutorial code and all dependencies. + +
+ +:::caution + +If you choose to clone the repository instead of using the tutorial container make sure you have Terraform v1.4.0 or greater installed. + + +::: + +
+ + + + + + +Ensure Docker Desktop on your local machine is available. Use the following command and ensure you receive an output displaying the version number. + +
+ +```bash +docker version +``` + +Download the tutorial image to your local machine. +
+ +```bash +docker pull ghcr.io/spectrocloud/tutorials:1.0.7 +``` + +Next, start the container, and open a bash session into it. +
+ +```shell +docker run --name tutorialContainer --interactive --tty ghcr.io/spectrocloud/tutorials:1.0.7 bash +``` + +Navigate to the tutorial code. + +
+ +```shell +cd /terraform/iaas-cluster-deployment-tf +``` + + +
+ + +Open a terminal window and download the tutorial code from GitHub. + +
+ +```shell +git@github.com:spectrocloud/tutorials.git +``` + +Change the directory to the tutorial folder. + +
+ +```shell +cd tutorials/ +``` + +Check out the following git tag. + +
+ +```shell +git checkout v1.0.7 +``` + +Change the directory to the tutorial code. + +
+ +```shell +cd terraform/iaas-cluster-deployment-tf/ +``` + +
+ + +
+ +--- + +### Create an API Key + +Before you can get started with the Terraform code, you need a Spectro Cloud API key. + +To create an API key, log in to [Palette](https://console.spectrocloud.com) and click on the user **User Menu** and select **My API Keys**. + +![Image that points to the user drop-down Menu and points to the API key link](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_create_api_key.png) + +Next, click on **Add New API Key**. Fill out the required input field, **API Key Name**, and the **Expiration Date**. Click on **Confirm** to create the API key. Copy the key value to your clipboard, as you will use it shortly. + +
+ +In your terminal session, issue the following command to export the API key as an environment variable. + +
+ +```shell +export SPECTROCLOUD_APIKEY=YourAPIKeyHere +``` + +The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider requires credentials to interact with the Palette API. +The Spectro Cloud Terraform provider will use the environment variable to authenticate with the Spectro Cloud API endpoint. + + +### Resources Review + +To help you get started with Terraform, the tutorial code is structured to support deploying a cluster to either Azure, GCP, or AWS. Before you deploy a host cluster to your target provider, take a few moments to review the following files in the folder structure. + +
+ +- **providers.tf** - This file contains the Terraform providers that are used to support the deployment of the cluster. + + +- **inputs.tf** - This file contains all the Terraform variables for the deployment logic. + + +- **data.tf** - This file contains all the query resources that perform read actions. + + +- **cluster_profiles.tf** - This file contains the cluster profile definitions for each cloud provider. + + +- **cluster.tf** - This file has all the required cluster configurations to deploy a host cluster to one of the cloud providers. + + +- **terraform.tfvars** - Use this file to customize the deployment and target a specific cloud provider. This is the primary file you will modify. + + +- **outputs.tf** - This file contains content that will be output in the terminal session upon a successful Terraform `apply` action. + +The following section allows you to review the core Terraform resources more closely. + +
+ +#### Provider + +The **provider.tf** file contains the Terraform providers and their respective versions. The tutorial uses two providers - the Spectro Cloud Terraform provider and the TLS Terraform provider. Note how the project name is specified in the `provider "spectrocloud" {}` block. You can change the target project by changing the value specified in the `project_name` parameter. + +
+ + +```hcl +terraform { + required_providers { + spectrocloud = { + version = ">= 0.13.1" + source = "spectrocloud/spectrocloud" + } + tls = { + source = "hashicorp/tls" + version = "4.0.4" + } + } +} + +provider "spectrocloud" { + project_name = "Default" +} +``` + +The next file you should become familiar with is the **cluster-profiles.tf** file. + +#### Cluster Profile + +The Spectro Cloud Terraform provider has several resources available for use. When creating a cluster profile, use `spectrocloud_cluster_profile`. +This resource can be used to customize all layers of a cluster profile. You can specify all the different packs and versions to use and add a manifest or Helm chart. + + +In the **cluster-profiles.tf** file, the cluster profile resource is declared three times. Each instance of the resource is for a specific cloud provider. Using the AWS cluster profile as an example, note how the **cluster-profiles.tf** file uses `pack {}` blocks to specify each layer of the profile. The order in which you arrange contents of the `pack {}` blocks plays an important role, as each layer maps to the core infrastructure in a cluster profile. + +The first listed `pack {}` block must be the OS, followed by Kubernetes, the container network interface, and the container storage interface. The first `pack {}` block in the list equates to the bottom layer of the cluster profile. Ensure you define the bottom layer of the cluster profile - the OS layer - first in the list of `pack {}` blocks. + +
+ +```hcl +resource "spectrocloud_cluster_profile" "aws-profile" { + name = "tf-aws-profile" + description = "A basic cluster profile for AWS" + tags = concat(var.tags, ["env:aws"]) + cloud = "aws" + type = "cluster" + + pack { + name = data.spectrocloud_pack.aws_ubuntu.name + tag = data.spectrocloud_pack.aws_ubuntu.version + uid = data.spectrocloud_pack.aws_ubuntu.id + values = data.spectrocloud_pack.aws_ubuntu.values + } + + pack { + name = data.spectrocloud_pack.aws_k8s.name + tag = data.spectrocloud_pack.aws_k8s.version + uid = data.spectrocloud_pack.aws_k8s.id + values = data.spectrocloud_pack.aws_k8s.values + } + + pack { + name = data.spectrocloud_pack.aws_cni.name + tag = data.spectrocloud_pack.aws_cni.version + uid = data.spectrocloud_pack.aws_cni.id + values = data.spectrocloud_pack.aws_cni.values + } + + pack { + name = data.spectrocloud_pack.aws_csi.name + tag = data.spectrocloud_pack.aws_csi.version + uid = data.spectrocloud_pack.aws_csi.id + values = data.spectrocloud_pack.aws_csi.values + } + + pack { + name = "hello-universe" + type = "manifest" + tag = "1.0.0" + values = "" + manifest { + name = "hello-universe" + content = file("manifests/hello-universe.yaml") + } + } +} +``` + +The last `pack {}` block contains a manifest file with all the Kubernetes configurations for the [Hello Universe](https://github.com/spectrocloud/hello-universe) application. Including the application in the profile ensures the application is installed during cluster deployment. If you wonder what all the data resources are for, head to the next section to review them. + + +#### Data Resources + +You may have noticed that each `pack {}` block contains references to a data resource. + +
+ + +```hcl + pack { + name = data.spectrocloud_pack.aws_csi.name + tag = data.spectrocloud_pack.aws_csi.version + uid = data.spectrocloud_pack.aws_csi.id + values = data.spectrocloud_pack.aws_csi.values + } +``` +
+ +[Data resources](https://developer.hashicorp.com/terraform/language/data-sources) are used to perform read actions in Terraform. The Spectro Cloud Terraform provider exposes several data resources to help you make your Terraform code more dynamic. The data resource used in the cluster profile is `spectrocloud_pack`. This resource enables you to query Palette for information about a specific pack. You can get information about the pack using the data resource such as unique ID, registry ID, available versions, and the pack's YAML values. + +Below is the data resource used to query Palette for information about the Kubernetes pack for version `1.24.10`. + +
+ +```hcl +data "spectrocloud_pack" "aws_k8s" { + name = "kubernetes" + version = "1.24.10" +} +``` + +Using the data resource, you avoid manually typing in the parameter values required by the cluster profile's `pack {}` block. + +
+ +#### Cluster + +The **clusters.tf** file contains the definitions for deploying a host cluster to one of the cloud providers. To create a host cluster, you must use a cluster resource for the cloud provider you are targeting. + +In this tutorial, the following Terraform cluster resources are used. + +
+ +| Terraform Resource | Platform | +|---|---| +| [`spectrocloud_cluster_aws`](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_aws) | AWS | +| [`spectrocloud_cluster_azure`](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_azure) | Azure | +| [`spectrocloud_cluster_gcp`](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_gcp) | GCP | + +Using the `spectrocloud_cluster_azure` resource in this tutorial as an example, note how the resource accepts a set of parameters. When deploying a cluster, you can change the same parameters in the Palette user interface (UI). You can learn more about each parameter by reviewing the resource documentation page hosted in the Terraform registry. + +
+ +```hcl +resource "spectrocloud_cluster_azure" "cluster" { + name = "azure-cluster" + tags = concat(var.tags, ["env:azure"]) + cloud_account_id = data.spectrocloud_cloudaccount_azure.account[0].id + + cloud_config { + subscription_id = var.azure_subscription_id + resource_group = var.azure_resource_group + region = var.azure-region + ssh_key = tls_private_key.tutorial_ssh_key[0].public_key_openssh + } + + cluster_profile { + id = spectrocloud_cluster_profile.azure-profile[0].id + } + + machine_pool { + control_plane = true + control_plane_as_worker = true + name = "master-pool" + count = var.azure_master_nodes.count + instance_type = var.azure_master_nodes.instance_type + azs = var.azure_master_nodes.azs + is_system_node_pool = var.azure_master_nodes.is_system_node_pool + disk { + size_gb = var.azure_master_nodes.disk_size_gb + type = "Standard_LRS" + } + } + + machine_pool { + name = "worker-basic" + count = var.azure_worker_nodes.count + instance_type = var.azure_worker_nodes.instance_type + azs = var.azure_worker_nodes.azs + is_system_node_pool = var.azure_worker_nodes.is_system_node_pool + } + + timeouts { + create = "30m" + delete = "15m" + } +} +``` +### Deploy Cluster + +To deploy a cluster using Terraform, you must first modify the **terraform.tfvars** file. Open the **terraform.tfvars** file in the editor of your choice, and locate the cloud provider you will use to deploy a host cluster. + +To simplify the process, we added a toggle variable in the Terraform template, that you can use to select the deployment environment. Each cloud provider has a section in the template that contains all the variables you must populate. Variables to populate are identified with `REPLACE_ME`. + +In the example AWS section below, you would change `deploy-aws = false` to `deploy-aws = true` to deploy to AWS. Additionally, you would replace all the variables with a value `REPLACE_ME`. You can also update the values for nodes in the master pool or worker pool. + +
+ +```hcl +########################### +# AWS Deployment Settings +############################ +deploy-aws = false # Set to true to deploy to AWS + +aws-cloud-account-name = "REPLACE_ME" +aws-region = "REPLACE_ME" +aws-key-pair-name = "REPLACE_ME" + +aws_master_nodes = { + count = "1" + control_plane = true + instance_type = "m4.2xlarge" + disk_size_gb = "60" + availability_zones = ["REPLACE_ME"] # If you want to deploy to multiple AZs, add them here +} + +aws_worker_nodes = { + count = "1" + control_plane = false + instance_type = "m4.2xlarge" + disk_size_gb = "60" + availability_zones = ["REPLACE_ME"] # If you want to deploy to multiple AZs, add them here +} +``` + +When you are done making the required changes, issue the following command to initialize Terraform. + +
+ +```shell +terraform init +``` + +Next, issue the `plan` command to preview the changes. + +
+ +```shell +terraform plan +``` + + +Output: +```shell +Plan: 2 to add, 0 to change, 0 to destroy. +``` + +If you change the desired cloud provider's toggle variable to `true,` you will receive an output message that two new resources will be created. The two resources are your cluster profile and the host cluster. + +To deploy all the resources, use the `apply` command. + +
+ +```shell +terraform apply -auto-approve +``` + + +#### Verify the Profile + + +To check out the cluster profile creation in Palette, log in to [Palette](https://console.spectrocloud.com), and from the left **Main Menu** click on **Profiles**. Locate the cluster profile with the name pattern `tf-[cloud provier]-profile`. Click on the cluster profile to review its details, such as layers, packs, and versions. + +![A view of the cluster profile](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_cluster_profile_view.png) + + +
+ + +#### Verify the Cluster + + +You can also check the cluster creation process by navigating to the left **Main Menu** and selecting **Clusters**. + +![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-cluster_aws_create_cluster.png) + +
+ +Select your cluster to review its details page, which contains the status, cluster profile, event logs, and more. + +
+ +The cluster deployment may take several minutes depending on the cloud provider, node count, node sizes used, and the cluster profile. You can learn more about the deployment progress by reviewing the event log. Click on the **Events** tab to check the event log. + +![Update the cluster](/tutorials/deploy-clusters/aws/clusters_public-cloud_deploy-k8s-event_log.png) + +
+ +While you wait for the cluster deployment process to complete, feel free to check out the following video where we discuss the growing pains of using Kubernetes and how Palette can help you address these pain points. + +
+ + + + +
+ +### Validate + +When the cluster deploys, you can access the Hello Universe application. +From the cluster's **Overview** page, click on the URL for port **:8080** next to the **hello-universe-service** in the **Services** row. This URL will take you to the application landing page. + +
+ + +:::caution + +It can take up to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. + +::: + + +![Deployed application](/tutorials/deploy-clusters/clusters_public-cloud_deploy-k8s-cluster_app.png) + +
+ +Welcome to Hello Universe, a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the counter and for a fun image change. + +You have deployed your first application to a cluster managed by Palette through Terraform. Your first application is a single container application with no upstream dependencies. + + +### Cleanup + +Use the following steps to clean up the resources you created for the tutorial. Use the `destroy` command to remove all the resources you created through Terraform. + +
+ +```shell +terraform destroy --auto-approve +``` + +Output: +```shell +Destroy complete! Resources: 2 destroyed. +``` + +
+ +:::info + +If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for force delete. To trigger a force delete, navigate to the cluster’s details page and click on **Settings**. Click on **Force Delete Cluster** to delete the cluster. Palette automatically removes clusters stuck in the cluster deletion phase for over 24 hours. + +::: + + +If you are using the tutorial container and want to exit the container, type `exit` in your terminal session and press the **Enter** key. Next, issue the following command to stop the container. + +
+ +```shell +docker stop tutorialContainer && \ +docker rmi --force ghcr.io/spectrocloud/tutorials:1.0.7 +``` + + +## Wrap-up + +In this tutorial, you created a cluster profile, which is a template that contains the core layers required to deploy a host cluster. You then deployed a host cluster onto your preferred cloud service provider. After the cluster deployed, you updated the profile by adding the Hello Universe application and applied the updates to the host cluster. + +Palette assures consistency across cluster deployments through cluster profiles. Palette also enables you to quickly deploy applications to a Kubernetes environment with little or no prior Kubernetes knowledge. In a matter of minutes, you were able to provision a new Kubernetes cluster and deploy an application. + +We encourage you to check out the [Deploy an Application using Palette Dev Engine](/devx/apps/deploy-app) tutorial to learn more about Palette. Palette Dev Engine can help you deploy applications more quickly through the usage of [virtual clusters](/glossary-all#palettevirtualcluster). Feel free to check out the reference links below to learn more about Palette. + +
+ + +- [Palette Modes](../../introduction/palette-modes.md) + + +- [Cluster Profiles](../../cluster-profiles/cluster-profiles.md) + + +- [Palette Clusters](../clusters.md) + + +- [Hello Universe GitHub repository](https://github.com/spectrocloud/hello-universe) + diff --git a/docs/docs-content/clusters/public-cloud/gcp/_category_.json b/docs/docs-content/clusters/public-cloud/gcp/_category_.json new file mode 100644 index 0000000000..455b8e4969 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/gcp/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 20 +} diff --git a/docs/docs-content/clusters/public-cloud/gcp/add-gcp-accounts.md b/docs/docs-content/clusters/public-cloud/gcp/add-gcp-accounts.md new file mode 100644 index 0000000000..c44b700e61 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/gcp/add-gcp-accounts.md @@ -0,0 +1,90 @@ +--- +sidebar_label: "Register and Manage GCP Accounts" +title: "Register and Manage GCP Accounts" +description: "Learn how to add a GCP account to Palette." +hide_table_of_contents: false +sidebar_position: 10 +tags: ["public cloud", "gcp"] +--- + +Palette supports integration with Google Cloud Platform (GCP) accounts. This section explains how to create a GCP cloud account in Palette. + +## Prerequisites + +* You must have a GCP service account available for use with Palette. For detailed instructions on creating a service account, refer to [Creating and managing service accounts](https://cloud.google.com/iam/docs/creating-managing-service-accounts). + + + +* The service account must, at a minimum, have the following roles. + + - [Kubernetes Engine Admin](https://cloud.google.com/iam/docs/understanding-roles#kubernetes-engine-roles) + + - [Compute Admin](https://cloud.google.com/iam/docs/understanding-roles#compute.admin) + + - [Service Account User](https://cloud.google.com/iam/docs/understanding-roles#iam.serviceAccountUser) + + - [Storage Object Viewer](https://cloud.google.com/iam/docs/understanding-roles#storage.objectViewer) + +
+ + :::info + + Alternatively, you can create a custom role and assign Palette the required GCP permissions. Check out the [Required IAM Permission](required-permissions.md) for a detailed list of all permissions. + + ::: + + + + +* Ensure you have access to the JSON credential file for your service account. For additional guidance, refer to the [GCP Credentials](https://developers.google.com/workspace/guides/create-credentials) documentation. + +## Create Account + + +1. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. + + +2. Navigate to the left **Main Menu** and select **Tenant Settings**. + + +3. Select **Cloud Accounts** and click on **Add GCP Account**. + + +4. In the cloud account creation wizard, provide the following information: + * **Account Name:** Custom name for the cloud account. + + * **JSON Credentials:** The JSON credentials object. + +
+ + :::info + + You can use the **Upload** button to upload the JSON file you downloaded from the GCP console. + + ::: + + +5. Click the **Validate** button to validate the credentials. + + +6. When the credentials are validated, click on **Confirm** to save your changes. + +## Validate + +You can validate the account is available in Palette by reviewing the list of cloud accounts. + +
+ +1. Log in to [Palette](https://console.spectrocloud.com) as Tenant admin. + + +2. To review the list of cloud accounts, navigate to the left **Main Menu** and click on **Tenant Settings**. + + +3. Next, click on **Cloud Accounts**. Your newly added GCP account is listed under the GCP section. + + +## Next Steps + + +Now that you have added an AWS account to Palette, you deploy clusters to your GCP account. To learn how to get started with deploying Kubernetes clusters to GCP, check out the [Create and Manage GCP IaaS Cluster](create-gcp-iaas-cluster.md) guide or the [Create and Manage AWS GKE Cluster](create-gcp-gke-cluster.md) guide. \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/gcp/architecture.md b/docs/docs-content/clusters/public-cloud/gcp/architecture.md new file mode 100644 index 0000000000..df674450f7 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/gcp/architecture.md @@ -0,0 +1,24 @@ +--- +sidebar_label: "Architecture" +title: "Architecture" +description: "Learn about the architecture used to support Google Cloud using Palette." +hide_table_of_contents: false +tags: ["public cloud", "gcp", "architecture"] +sidebar_position: 0 +--- + +Palette supports Google Cloud Platform (GCP) as one of its public cloud environments. Using Palette, you can effectively manage the entire lifecycle of any combination of new or existing, simple or complex, small or large Kubernetes environments in GCP. Palette gives IT teams complete control, visibility, and production-scale efficiencies to provide developers with highly curated Kubernetes stacks and tools with enterprise-grade security. + +The following are some highlights of Palette-provisioned GCP clusters. + +
+ +- Control plane nodes and worker nodes are placed within a single private subnet that spans different availability zones within a region. + + +- A new Virtual Private Cloud (VPC) Network is created with all the network infrastructure components, such as Cloud NAT and a Cloud Router. In addition, firewall rules are created to protect all the API endpoints. + + +- The Kubernetes API server endpoint is exposed through a Global Load Balancer. Applications deployed into the cluster can use a Regional Load Balancer to expose internal Kubernetes services. + + ![gcp_cluster_architecture.png](/gcp_cluster_architecture.png) \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/gcp/create-gcp-gke-cluster.md b/docs/docs-content/clusters/public-cloud/gcp/create-gcp-gke-cluster.md new file mode 100644 index 0000000000..c570efe57c --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/gcp/create-gcp-gke-cluster.md @@ -0,0 +1,137 @@ +--- +sidebar_label: "Create and Manage GCP GKE Cluster" +title: "Create and Manage GCP GKE Cluster" +description: "Learn how to add and manage a GKE cluster deployed to GCP with Palette." +hide_table_of_contents: false +sidebar_position: 30 +tags: ["public cloud", "gcp"] +--- + +Palette supports creating and managing Kubernetes clusters using Google Kubernetes Engine (GKE). This section guides you to create a Kubernetes cluster that is deployed to GKE and that Palette manages. + +## Prerequisites + +Ensure the following requirements are met before you attempt to deploy a cluster to GCP. + +
+ +- Access to a GCP cloud account. + + +- You have added a GCP account in Palette. Review the [Register and Manage GCP Accounts](add-gcp-accounts.md) for guidance. + + +- An infrastructure cluster profile for GKE. Review the [Create Cluster Profiles](../../../cluster-profiles/task-define-profile.md) for guidance. + + +- Palette creates compute, network, and storage resources while provisioning Kubernetes clusters. Ensure there is sufficient capacity in the preferred GCP region to create the following resources: + - Virtual Private Cloud (VPC) Network + - Static External IP Address + - Network Interfaces + - Cloud NAT + - Cloud Load Balancing + - Persistent Disks + - Cloud Router + + +## Deploy a GKE Cluster + +1. Log in to [Palette](https://console.spectrocloud.com) and ensure you are in the correct project scope. + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Click on **Add New Cluster**. + + +4. A prompt displays to either deploy or import a new cluster. Click on **Deploy New Cluster**. + + +5. Select **GCP** and click on **Start GCP Configuration**. + + +6. Populate the wizard page with the cluster name, description, and tags. Tags assigned to a cluster are propagated to the VMs deployed to the computing environments. + +7. Select a GCP account, and click on **Next**. + + + +8. Select the **Managed Kubernetes** row and select one of your GKE cluster profiles. Click on **Next**. + + + +9. Review and customize pack parameters as desired. By default, parameters for all packs are set with values defined in the cluster profile. Click on **Next** to continue. + + +10. Fill out the following parameters, and click on **Next** when you are done. + +
+ + |Parameter|Description| + |---|---| + |**Project**|The project to which the cluster belongs.| + |**Region**|Choose the desired GCP region in which to deploy the cluster.| + + +11. The Node configuration page is where you can specify the availability zones (AZ), instance types, disk size, and the number of nodes. Configure the worker node pool. + +
+ + :::info + + You can add new worker pools to customize specific worker nodes to run specialized workloads. For example, the default worker pool may be configured with the c2.standard-4 instance types for general-purpose workloads. You can configure another worker pool with instance type g2-standard-4 to run GPU workloads. + + ::: + + +12. An optional taint label can be applied to a node pool during the cluster creation. You can edit the taint label on existing clusters. Review the [Node Pool](../../cluster-management/node-pool.md) management page to learn more. Toggle the **Taint** button to create a label. + + + +13. Enable or disable node pool taints. If tainting is enabled, then you need to provide values for the following parameters. + + |**Parameter**| **Description**| + |-------------|---------------| + |**Key** |Custom key for the taint.| + |**Value** | Custom value for the taint key.| + | **Effect** | Choose the preferred pod scheduling effect from the **drop-down Menu**. Review the [Effect Table](create-gcp-iaas-cluster#effect-table) below for more details. | + + #### Effect Table + + |**Parameter**| **Description**| + |-------------|---------------| + | **NoSchedule**| A pod that cannot tolerate the node taint and should not be scheduled to the node. + | **PreferNoSchedule**| The system will avoid placing a non-tolerant pod to the tainted node but is not guaranteed. + | **NoExecute**| New pods will not be scheduled on the node, and existing pods on the node will be evicted if they do not tolerate the taint. | + +14. Click on **Next** after configuring the node pool. + + + +15. The **Settings** page is where you can configure the patching schedule, security scans, backup settings, and set up Role Based Access Control (RBAC). Review cluster settings and make changes if needed. Click on **Validate**. + + +16. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Be aware that provisioning GKE clusters can take 15 - 30 minutes depending on the cluster profile and the node pool configuration. + +You can monitor cluster deployment progress on the cluster details page. + + +## Validate + + +You can validate that your cluster is up and available by reviewing the cluster details page. + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + + +3. The **Clusters** page lists the available clusters that Palette manages. Select your cluster to view its details page. + + + +4. From the cluster details page, verify the **Cluster Status** field displays **Running**. \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/gcp/create-gcp-iaas-cluster.md b/docs/docs-content/clusters/public-cloud/gcp/create-gcp-iaas-cluster.md new file mode 100644 index 0000000000..53c2e9d4db --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/gcp/create-gcp-iaas-cluster.md @@ -0,0 +1,148 @@ +--- +sidebar_label: "Create and Manage GCP IaaS Cluster" +title: "Create and Manage GCP IaaS Cluster" +description: "Learn how to add and manage an IaaS cluster deployed to GCP." +hide_table_of_contents: false +sidebar_position: 20 +--- + +Palette supports creating and managing Kubernetes clusters deployed to a Google Cloud Platform (GCP) account. This section guides you to create an IaaS Kubernetes cluster in GCP that Palette manages. + +## Prerequisites + +Ensure the following requirements are met before you attempt to deploy a cluster to GCP: + +- Access to a GCP cloud account + + +- You have added a GCP account in Palette. Review the [Register and Manage GCP Accounts](add-gcp-accounts.md) for guidance. + + +- An infrastructure cluster profile for GCP. Review the [Create Cluster Profiles](../../../cluster-profiles/task-define-profile.md) for guidance. + + +- An SSH Key that is uploaded to Palette and available for usage. Refer to the [SSH Keys](/clusters/cluster-management/ssh-keys) guide to learn how to create an SSH key and upload the public key to Palette. + + +- Palette creates compute, network, and storage resources while provisioning Kubernetes clusters. Ensure there is sufficient capacity in the preferred GCP region to create the following resources: + - Virtual Private Cloud (VPC) Network + - Static External IP Address + - Network Interfaces + - Cloud NAT + - Cloud Load Balancing + - Persistent Disks + - Cloud Router + + +## Deploy a GCP Cluster + +1. Log in to [Palette](https://console.spectrocloud.com) and ensure you are in the correct project scope. + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Click on **Add New Cluster**. + + +4. A prompt displays to either deploy or import a new cluster. Click on **Deploy New Cluster**. + + +5. Select **GCP** and click on **Start GCP Configuration**. + + +6. Populate the wizard page with the cluster name, description, and tags. Tags assigned to a cluster are propagated to the VMs deployed to the computing environments. + + +7. Select a GCP account, and Click on **Next**. + + + +8. Select the **Infrastructure Provider** row and click on one of your GCP cluster profiles. Click on **Next**. + + + +9. Review and customize pack parameters as desired. By default, parameters for all packs are set with values defined in the cluster profile. Click on **Next** to continue. + + +10. Fill out the following parameters and click on **Next** when you are done. + +
+ + |Parameter|Description| + |---|---| + |**Project**|The project to which the cluster belongs.| + |**Region**|Choose the desired GCP region to deploy the cluster.| + |**SSH Key**|Choose the desired SSH key. Refer to the [SSH Keys](../../cluster-management/ssh-keys.md) guide to learn how to create an SSH key and upload the public key to Palette.| + |**Static Placement** | Check the **Static Placement** box to deploy resources into a pre-existing VPC. Review the [Static Placement](create-gcp-iaas-cluster.md#static-placement) table below to learn more about the required input fields.| + + #### Static Placement + + |Parameter|Description| + |---|---| + |**Virtual Network**: Select the virtual network from the **drop-down Menu**.| + |**Control plane subnet**: Select the control plane network from the **drop-down Menu**.| + |**Worker Network**: Select the worker network from the **drop-down Menu**. | + + + + +11. The Node configuration page is where you can specify the availability zones (AZ), instance types, disk size, and the number of nodes. Configure the master and worker node pools. A master and a worker node pool are configured by default. + +
+ + :::info + + You can add new worker pools to customize specific worker nodes to run specialized workloads. For example, the default worker pool may be configured with the c2.standard-4 instance types for general-purpose workloads. You can configure another worker pool with instance type g2-standard-4 to leverage GPU workloads. + + ::: + + +12. An optional taint label can be applied to a node pool during the cluster creation. You can edit the taint label on existing clusters. Review the [Node Pool](../../cluster-management/node-pool.md) management page to learn more. Toggle the **Taint** button to create a label. + + + +13. Enable or disable node pool taints. If tainting is enabled, then you need to provide values for the following parameters. + + |**Parameter**| **Description**| + |-------------|---------------| + |**Key** |Custom key for the taint.| + |**Value** | Custom value for the taint key.| + | **Effect** | Choose the preferred pod scheduling effect from the drop-down Menu. Review the [Effect Table](create-gcp-iaas-cluster#effect-table) below for more details. | + + #### Effect Table + + |**Parameter**| **Description**| + |-------------|---------------| + | **NoSchedule**| A pod that cannot tolerate the node taint and should not be scheduled to the node. + | **PreferNoSchedule**| The system will avoid placing a non-tolerant pod on the tainted node but is not guaranteed. + | **NoExecute**| New pods will not be scheduled on the node, and existing pods on the node will be evicted if they do not tolerate the taint. | + +14. Click on **Next** after configuring the node pool. + + + +15. The settings page is where you can configure the patching schedule, security scans, backup settings, and set up Role Based Access Control (RBAC). Review the cluster settings and make changes if needed. Click on **Validate**. + + + +16. Review the settings summary and click on **Finish Configuration** to deploy the cluster. Be aware that provisioning IaaS clusters can take approximately 15 - 30 min depending on the cluster profile and the node pool configuration. + +You can monitor cluster deployment progress on the cluster details page. + + +## Validate + +You can validate that your cluster is up and available by reviewing the cluster details page. + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. The **Clusters** page lists the available clusters that Palette manages. Select your cluster to review its details. + + +4. From the cluster details page, verify the **Cluster Status** field displays **Running**. \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/gcp/gcp.md b/docs/docs-content/clusters/public-cloud/gcp/gcp.md new file mode 100644 index 0000000000..607342646d --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/gcp/gcp.md @@ -0,0 +1,35 @@ +--- +sidebar_label: "GCP" +title: "Google Cloud Platform" +description: "The methods of creating clusters for a speedy deployment on any CSP" +hide_table_of_contents: false +tags: ["public cloud", "gcp"] +--- + +Palette supports integration with Google Cloud Platform (GCP). You can deploy and manage Host Clusters in GCP. To get started with GCP, start by adding your GCP account in Palette. Check out the [Register and Manage GCP Accounts](add-gcp-accounts.md). + + +## Get Started + +Learn how to deploy a cluster to a GCP by using Palette. Check out the [Deploy a Cluster with Palette](../deploy-k8s-cluster.md) tutorial to get started. + +## Resources + +To learn more about Palette and GCP clusters, check out the following resources: + +- [Register and Manage GCP Accounts](add-gcp-accounts.md) + + +- [Create and Manage GCP IaaS Cluster](add-gcp-accounts.md) + + +- [Create and Manage GCP GKE Cluster](create-gcp-gke-cluster.md) + + +- [Architecture](architecture.md) + + +- [Required IAM Permissions](required-permissions.md) + + +- [Cluster Removal](../../cluster-management/remove-clusters.md) \ No newline at end of file diff --git a/docs/docs-content/clusters/public-cloud/gcp/required-permissions.md b/docs/docs-content/clusters/public-cloud/gcp/required-permissions.md new file mode 100644 index 0000000000..6ead2a186d --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/gcp/required-permissions.md @@ -0,0 +1,85 @@ +--- +sidebar_label: "Required IAM Permissions" +title: "Required IAM Permissions" +description: "A list of required IAM permissions that Palette requires for GCP deployments." +hide_table_of_contents: false +sidebar_position: 40 +tags: ["public cloud", "gcp", "iam"] +--- + + + +This table contains the required Google Cloud Platform (GCP) permissions to create a custom GCP role tailored for usage with Palette. When creating a custom role, ensure you include all the permissions listed below to prevent Palette from having issues when deploying a host cluster. + +| Permissions | Description | +|------------------------------------------|---------------------------------------------------------------| +| `compute.backendServices.create` | Create backend services | +| `compute.backendServices.delete` | Delete backend services | +| `compute.backendServices.get` | Get backend service information | +| `compute.backendServices.list` | List backend services | +| `compute.backendServices.update` | Update backend services | +| `compute.backendServices.use` | Use backend services | +| `compute.disks.create` | Create persistent disks | +| `compute.firewalls.create` | Create firewall rules | +| `compute.firewalls.delete` | Delete firewall rules | +| `compute.firewalls.get` | Get firewall rule information | +| `compute.firewalls.list` | List firewall rules | +| `compute.globalAddresses.create` | Create global addresses | +| `compute.globalAddresses.delete` | Delete global addresses | +| `compute.globalAddresses.get` | Get global address information | +| `compute.globalAddresses.list` | List global addresses | +| `compute.globalAddresses.use` | Use global addresses | +| `compute.globalForwardingRules.create` | Create global forwarding rules | +| `compute.globalForwardingRules.delete` | Delete global forwarding rules | +| `compute.globalForwardingRules.get` | Get global forwarding rule information | +| `compute.globalForwardingRules.list` | List global forwarding rules | +| `compute.healthChecks.create` | Create health checks | +| `compute.healthChecks.delete` | Delete health checks | +| `compute.healthChecks.get` | Get health check information | +| `compute.healthChecks.list` | List health checks | +| `compute.healthChecks.useReadOnly` | Use health checks in read-only mode | +| `compute.instanceGroups.create` | Create instance groups | +| `compute.instanceGroups.delete` | Delete instance groups | +| `compute.instanceGroups.get` | Get instance group information | +| `compute.instanceGroups.list` | List instance groups | +| `compute.instanceGroups.update` | Update instance groups | +| `compute.instanceGroups.use` | Use instance groups | +| `compute.instances.create` | Create instances | +| `compute.instances.delete` | Delete instances | +| `compute.instances.get` | Get instance information | +| `compute.instances.list` | List instances | +| `compute.instances.setLabels` | Set labels on instances | +| `compute.instances.setMetadata` | Set metadata on instances | +| `compute.instances.setServiceAccount` | Set service account on instances | +| `compute.instances.setTags` | Set tags on instances | +| `compute.instances.use` | Use instances | +| `compute.networks.create` | Create networks | +| `compute.networks.delete` | Delete networks | +| `compute.networks.get` | Get network information | +| `compute.networks.list` | List networks | +| `compute.networks.updatePolicy` | Update network policies | +| `compute.regions.get` | Get region information | +| `compute.regions.list` | List regions | +| `compute.routers.create` | Create routers | +| `compute.routers.delete` | Delete routers | +| `compute.routers.get` | Get router information | +| `compute.routes.delete` | Delete routes | +| `compute.routes.get` | Get route information | +| `compute.routes.list` | List routes | +| `resourcemanager.projects.get` | Get details of a specified Google Cloud project. | +| `resourcemanager.projects.list` | List all Google Cloud projects that the user has access to. | +| `storage.objects.get` | Get details of a specified object in Google Cloud Storage. | +| `storage.objects.list` | List all objects in a specified Google Cloud Storage bucket. | +| `iam.serviceAccounts.actAs` | Act as the service account specified, allowing access to its resources. | +| `iam.serviceAccounts.get` | Get details of a specified service account. | +| `iam.serviceAccounts.getAccessToken` | Get the Oauth2 access token for the service account. | +| `iam.serviceAccounts.list` | List all service accounts available to the user. | +| `serviceusage.quotas.get` | Get quota information for a specified Google Cloud service. | +| `serviceusage.services.get` | Get details of a specified Google Cloud service. | +| `serviceusage.services.list` | List all Google Cloud services available to the user. | +| `recommender.containerDiagnosisInsights.*` | Access insights about diagnosed issues with Google Kubernetes Engine containers. | +| `recommender.containerDiagnosisRecommendations.*` | Access recommendations for resolving diagnosed issues with Google Kubernetes Engine containers. | +| `recommender.locations.*` | Access details about locations in Google Cloud Recommender. | +| `recommender.networkAnalyzerGkeConnectivityInsights.*`| Access insights about network connectivity for Google Kubernetes Engine clusters. | +| `recommender.networkAnalyzerGkeIpAddressInsights.*` | Access insights about IP address usage for Google Kubernetes Engine clusters. | + diff --git a/docs/docs-content/clusters/public-cloud/public-cloud.md b/docs/docs-content/clusters/public-cloud/public-cloud.md new file mode 100644 index 0000000000..4d969e653e --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/public-cloud.md @@ -0,0 +1,32 @@ +--- +sidebar_label: "Public Cloud Clusters" +title: "Public Cloud Clusters" +description: "The methods of creating clusters for a speedy deployment on any CSP" +hide_table_of_contents: false +sidebar_custom_props: + icon: "server" +--- + +Palette supports provisioning new workload clusters on public clouds using cloud providers' infrastructure. It achieves this by provisioning new virtual machines (VMs) for the control plane and worker pools and uses their managed Kubernetes services such as EKS, AKS, GKE, TKE, and more. + +Workload clusters are instantiated from cloud-specific _Cluster Profiles__](/cluster-profiles) templates that are created with pre-configured layers and components required for cluster deployments. You can use one of the cluster profiles provided or create a new one. + +# Get Started + +Learn how to deploy a cluster to a public cloud provider by using Palette. Check out the [Deploy a Cluster with Palette](deploy-k8s-cluster.md) tutorial to get started. + + +# Supported Environments + +The following pages provide detailed instructions for setting up new workload clusters in the various environments. + +* [Amazon Web Services](aws/aws.md) + +* [Azure](azure/azure.md) + +* [Cox Edge](cox-edge/cox-edge.md) + +* [Google Cloud](gcp/gcp.md) + +* [Tencent](tke.md) + diff --git a/docs/docs-content/clusters/public-cloud/tke.md b/docs/docs-content/clusters/public-cloud/tke.md new file mode 100644 index 0000000000..bf393c65a5 --- /dev/null +++ b/docs/docs-content/clusters/public-cloud/tke.md @@ -0,0 +1,283 @@ +--- +sidebar_label: "Tencent TKE" +title: "Tencent TKE" +description: "The methods of creating clusters for a speedy deployment on Tencent-TKE" +hide_table_of_contents: false +tags: ["public cloud", "tencent", "tke"] +sidebar_position: 40 +--- + + +Palette supports the deployment of tenant workloads with Tencent Kubernetes Engine (TKE). The following are the detailing of the Tencent TKE cluster provisioning through Palette: + +1. Palette enables the effortless deployment and management of containerized applications with fully managed TKE. + + +2. TKE is fully compatible with the native Kubernetes APIs and extends Kubernetes plugins such as CBS and CLB on the Tencent Cloud. + + +3. The Palette-supported TKE architecture is represented diagrammatically as below: + + ![tencent-diagram.png](/tencent-diagram.png) + +## Prerequisites + +* A Tencent Cloud account with appropriate [permissions](#tencent-cloud-account-permissions). + + +* Create a Cloud API **Secret ID** and **Secret Key**. + + +* Create the **Virtual Private Network** and **Subnet** to the region where the workload cluster needs to be deployed. + + +* The [**NAT Gateway**](https://intl.cloud.tencent.com/document/product/457/38369) is to be created to support IP address translation and to enable Internet access to resources in Tencent Cloud. + + +* A Route table set to accept external traffic, so that the nodes getting created in the associated subnets will have internet capability. + + +* Create a security group for network security isolation and add Inbound traffic rule that allows the TCP/HTTPS protocol for port 443 from all IPv6 and IPv4 sources through this security group. + + +## Tencent Cloud Account Permissions + +**Last Update**: April 26, 2022 + +```yaml +{ + "version": "2.0", + "statement": [ + { + "effect": "allow", + "action": [ + "as:CreateLaunchConfiguration", + "as:CreateAutoScalingGroup", + "as:DescribeLaunchConfigurations", + "as:DescribeAutoScalingInstances", + "as:DescribeAutoScalingActivities", + "as:DescribeAutoScalingGroups", + "as:ModifyDesiredCapacity", + "as:ModifyAutoScalingGroup", + "as:DescribeAutoScalingGroups", + "as:DescribeAutoScalingGroupLastActivities", + "cam:GetRole", + "cam:GetPolicy", + "cam:DeletePolicyVersion", + "cam:CreatePolicyVersion", + "cam:ListGroupsForConsole", + "cam:ListPolicies", + "cam:ListMaskedSubAccounts", + "cvm:DescribeSecurityGroupLimits", + "cvm:DescribeSecurityGroups", + "cvm:CreateSecurityGroup", + "cvm:DescribeInstances", + "cvm:DescribeInstancesStatus", + "cvm:DescribeSecurityGroupAssociateInstances", + "cvm:DescribeSecurityGroupLimits", + "cvm:DescribeSecurityGroupPolicys", + "cvm:DescribeImages", + "cvm:DescribeCbsStorages", + "cvm:RunInstances", + "cvm:DescribeKeyPairs", + "cvm:DescribeAddresses", + "cvm:ModifySingleSecurityGroupPolicy", + "cvm:CreateSecurityGroupPolicy", + "cvm:DeleteSecurityGroupPolicy", + "clb:DescribeLoadBalancers", + "cloudaudit:DescribeEvents", + "cloudaudit:DescribeEvents", + "ecdn:PurgePathCache", + "ecdn:PurgeUrlsCache", + "ecdn:PushUrlsCache", + "monitor:DescribeDashboardMetricData", + "tke:CreateCluster", + "tke:DescribeClusters", + "tke:DescribeClusterEndpointStatus", + "tke:DescribeClusterEndpointVipStatus", + "tke:DescribeClusterSecurity", + "tke:CreateClusterEndpointVip", + "tke:CreateClusterEndpoint", + "tke:DeleteClusterEndpointVip", + "tke:DeleteClusterEndpoint", + "tke:DeleteCluster", + "tke:DescribeClusterAsGroupOption", + "tke:DescribeClusterInstances", + "tag:DescribeResourceTagsByResourceIds", + "tag:DescribeTagValues", + "tag:TagResources", + "tag:DescribeTagKeys", + "vpc:DescribeSubnetEx", + "vpc:DescribeVpcEx", + "vpc:DescribeVpcLimits", + "vpc:DescribeRouteTable", + "vpc:DescribeNatGateways", + "vpc:DescribeCcns", + "vpc:DescribeCcnAttachedInstances", + "vpc:DescribeLocalGateway", + "vpc:DescribeHaVips", + "vpc:DescribeVpnGw", + "vpc:DescribeDirectConnectGateways", + "vpc:DescribeVpcPeeringConnections", + "vpc:DescribeCustomerGateways", + "vpc:DescribeRoutes", + "vpc:ModifyNatGatewayAttribute", + "vpc:ResetNatGatewayConnection", + "vpc:DescribeAddress", + "vpc:DescribeTemplateLimits", + "vpc:DescribeAddressGroups", + "vpc:DescribeService", + "vpc:DescribeServiceGroups", + "vpc:DescribeNetworkAcls", + "vpc:DescribeNetworkInterfaces" + ], + "resource": [ + "*" + ] + } + ] +} +``` + + +## Create a Tencent Cloud Account + +Create a Tencent Cloud account in Palette from the Tenant Admin or Project Admin scope. To create the cloud account: + +1. Log in to the Palette and from the **Tenant Admin Settings**, select the **Cloud Accounts** tab. + + +2. Click **+ Tencent Account** to open the cloud account creation wizard and fill in the following details: + + |**Parameter** | **Description**| + |-----------------|----------------| + | **Account Name**| A custom name to identify the cloud account on the Palette Console.| + | **Optional Description**| Add a description, if any about the cloud account. + | **Secret ID**| The Secret ID of the Tencent cloud account. + | **Secret Key**| The secret key of the Tencent cloud account.| + + +3. Click the **Validate** button to validate credentials. + + +4. Click **Confirm** button to complete the cloud account create wizard. + + +**Note**: The cloud account can be created during the first step of cluster creation when you fill in the basic information by clicking the **+** next to **Cloud Account**. + +## Deploy a Tencent Cluster + +The following steps need to be performed to provision a new TKS cluster: + +1. Provide the basic cluster information such as: + * **Name**, **Description**, and **Tags**. Tags on a cluster are propagated to the VMs deployed on the cloud or data center environments. + * Select the desired [Tencent cloud account](#create-a-tencent-cloud-account). The Tencent credentials must be pre-configured in the Project/Tenant Admin settings. + + + **Note**: The cloud account can be created during the cluster creation by clicking **+** next to the **Cloud Account**. +
+ +2. Select the cluster profile created for Tencent Cloud. The profile definition will be used as the cluster deployment template. + + +3. Review and override pack parameters as desired. By default, parameters for all packs are set with values defined in the cluster profile. While configuring the Operating System layer of the TKE cluster profile, configure the value of the OS pack file with any one of the following images: + + ```yaml + "OsName": "centos7.6.0_x64" + ``` + ```yaml + "OsName": "centos7.6.0_x64 GPU" + ``` + ```yaml + "OsName": "ubuntu18.04.1x86_64" + ``` + ```yaml + "OsName": "ubuntu18.04.1x86_64 GPU" + ``` + + :::caution + + While adding Add-on packs to the Cluster Profile, make sure that Persistent Volume Claim size is >=10 GB and in multiples of 10. + + Example: + + ```yaml + master: + persistence: + enabled: true + accessModes: + - ReadWriteOnce + size: 20Gi + ``` + + ::: + +4. Provide the Tencent Cloud account and placement information: + + |**Parameter** | **Description**| + |--------------|----------------| + | **Cloud Account**| Select the desired cloud account. + | **Tencent Cloud Accounts** | The Tencent credentials need to be pre-configured in the **Project**/**Tenant Admin** settings. + ||**Note**: The cloud account can be created during this step of
cluster creation by clicking **+** next to the **Cloud Account**. | + | **Region** | Choose the desired Tencent region where you
would like the clusters to be provisioned. + | **SSH Key Pair Name**| Choose the desired SSH keypair. You must preconfigure SSH key pairs on TKS for the desired regions. The selected key is inserted into the provisioned VMs. + | **VPCID**|The ID of the Virtual Private Cloud (VPC) that the stack is to be launched into. The VPC must be in the specified region. All cluster instances will be launched into this VPC. | + |**Cluster Endpoint Access**| Select Public, or Private & Public, based on how you want to establish the communication with the endpoint for the managed Kubernetes API server and your cluster.| + |**Public Security Group**|A security group to controls the traffic that is allowed to reach and leave the resources that it is associated with. For example, after you associate a security group with the cluster, it controls the inbound and outbound traffic to the cluster. | + + :::info + Palette encourages its uses to go with the Public Cluster endpoint access as of now. Other options will be supported in the near future. + ::: + +5. Public Access CIDRs - To enable access restrictions. + + +6. Update Worker Pools in parallel - Patch updates to all Worker Pools simultaneously. + + +7. Configure one or more worker node pools. A single worker node will be configured by default. To learn more about the configuration options, review the [Node Pool](../cluster-management/node-pool.md) documentation page. Click on **Next** when you are done with node pool configurations. + + +8. Review settings and deploy the cluster. Provisioning status with details of ongoing provisioning tasks is available to track progress. + +# Delete a Tencent Cluster + +The deletion of a Tencent cluster results in the removal of all Virtual Machines and associated Storage Disks created for the cluster. The following tasks need to be performed to delete a Tencent cluster: + +1. Ensure you are in the correct project scope. + + +2. Navigate to the left **Main Menu** and click on **Clusters** + + +3. Click on the cluster that you want to remove. + + +4. Click on the **Settings** drop-down menu. + + +5. Click on **Delete Cluster** + + +6. Type in the name of the cluster and click on **OK** + +The cluster status is updated to **Deleting** while cluster resources are being deleted. Once all resources are successfully deleted, the cluster status is updated to **Deleted** and is removed from the list of clusters. + +## Force Delete a Cluster + +In Tenant Admin and Project Admin scope, Palette allows you to force the deletion of a cluster that's been stuck in **Deletion** state for a minimum of **15 minutes**. + +1. Log in to the Palette Management Console. + + +2. Navigate to the **Cluster Details** page of the cluster stuck in deletion. + + - If the deletion is stuck for more than 15 minutes, click the **Force Delete Cluster** button from the **Settings** dropdown. + + - If the **Force Delete Cluster** button is not enabled, wait for 15 minutes. The **Settings** dropdown will give the estimated time for the auto-enabling of the **Force Delete** button. + + :::caution + + If any resources remain in the cloud, you should clean them up before initiating a forced delete. + + ::: diff --git a/docs/docs-content/compliance.md b/docs/docs-content/compliance.md new file mode 100644 index 0000000000..922debd194 --- /dev/null +++ b/docs/docs-content/compliance.md @@ -0,0 +1,44 @@ +--- +sidebar_label: "Compliance" +title: "Certification of Compliance" +description: "Certification of Compliance" +hide_table_of_contents: false +sidebar_position: 220 +sidebar_custom_props: + icon: "user-shield" +tags: ["compliance", "soc2", "fips"] +--- + +We have two Security Operations Center (SOC) certifications and a FIPS certificate for a Cryptographic Module. + +## SOC 2 Type II + +![soc2.png](/soc2.png "#width=180px") + +Spectro Cloud is certified against SOC2 Type II, compliance with the AICPA’s (American Institute of Certified Public Accountants) TSC (Trust Services Criteria). +* Spectro Cloud SOC 2 Type II audit report assures our organization’s: + * Security + * Availability + * Processing integrity + * Confidentiality + * Privacy +* SOC 2 audits are an important component in regulatory oversight, vendor management programs, internal governance, and risk management. +* These reports help the users and their auditors to understand the Spectro Cloud controls established to support operations and compliance. +* The annual certification of SOC2 is Independent 3rd Party Auditor. +* Spectro Cloud SOC 2 Type II report is available upon request for any customers or prospects with signed MNDA. + +## FIPS 140-2 + + +![FIPS-Compliance](/docs_compliance_compliance_fips-logo.png "#width=180px") + +Spectro Cloud is certified against FIPS 140-2 with [Certificate number 4349](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4349) in compliance with the Cryptographic Module Validation Program (CMVP). + +Our Spectro Cloud Cryptographic Module is a general-purpose cryptographic library. The FIPS-enforced Palette VerteX edition incorporates the module in the Kubernetes Management Platform and the infrastructure components of target clusters to protect the sensitive information of regulated industries. Palette VerteX supports FIPS at the tenant level. For more information about the FIPS-enforced Palette edition, check out [Palette VerteX](vertex/vertex.md). + +The module is tested against these configurations: + +* Red Hat Enterprise Linux 8 on Dell PowerEdge R440 with Intel Xeon Silver 4214R _with and without_ PAA +* SUSE Linux Enterprise Server 15 on Dell PowerEdge R450 with Intel Xeon Silver 4309Y _with and without_ PAA +* Ubuntu 18.04 on Dell PowerEdge R450 with Intel Xeon Silver 4309Y _with and without_ PAA +* Ubuntu 20.04 on Dell PowerEdge R450 with Intel Xeon Silver 4309Y _with and without_ PAA diff --git a/docs/docs-content/component.md b/docs/docs-content/component.md new file mode 100644 index 0000000000..7fc84a7b14 --- /dev/null +++ b/docs/docs-content/component.md @@ -0,0 +1,161 @@ +--- +sidebar_label: "Compatibility Matrix" +title: "Palette Components Compatibility Matrix" +description: "Learn what Palette components are compatible with what versions." +hide_table_of_contents: false +sidebar_position: 230 +sidebar_custom_props: + icon: "audits" +tags: ["components"] +--- + + + + +This page lists the version details of various Palette components and their respective Palette releases. Visit the [Downloads](spectro-downloads.md) resource to access the download URLs. + +## Palette CLI Versions + +|Palette Release| Recommended CLI Version| +|---------------------------|----| +|Release 4.0.0 |4.0.0 | +|Release 3.4.0 |3.4.0 | +|Release 3.3.0 |3.3.0 | + + +## Palette Edge CLI Versions + +|Palette Release|CLI Version| +|---|-------| +|Release 4.0.0 |v4.0.0 | +|Release 3.4.0 |v3.4.2 | +|Release 3.3.0 |v3.3.0 | +|Release 3.2.0 |v3.2.0 | + + + +## On-Premises Installer Version + +|Palette Release|On-Prem Installer Version| +|--|---| +|3.4|2.8.0| +|3.3|2.6.0| +|3.2|2.4.0| +|3.1|2.4.0| +|3.0|2.1.0| +|2.8|2.1.0| +|2.7|2.1.0| +|2.6|2.1.0| +|2.5|2.0.2| +|2.3|2.0.2| + +## Latest Air Gapped OVA Version + +|Palette Release|Air Gapped Version| +|--|---| +|3.3|2.8.0| +|3.2|2.4.0| +|3.1|2.0.1| +|3.0|2.0.1| +|2.8|2.0.1| +|2.7|2.0.1| +|2.6|2.0.1| +|2.5|2.0.1| +|2.3|2.0.1| + +## Private Cloud Gateways (PCG) Images + +## vSphere PCG Image Version + +|Palette Release|vSphere PCG Version| +|--|---| +|3.4|1.8.0| +|3.3|1.6.0| +|3.2|1.4.0| +|3.1|1.2.0| +|3.0|1.2.0| +|2.8|1.2.0| +|2.7|1.2.0| +|2.6|1.2.0| +|2.5|1.1.9| +|2.3|1.1.9| +------ + +## MAAS PCG Image Version + +|Palette Release|MAAS PCG Version| +|--|---| +|3.4|1.0.12| +|3.3|1.0.12| +|3.2|1.0.12| +|3.1|1.0.11| +|3.0|1.0.11| +|2.8|1.0.11| +|2.7|1.0.11| +|2.6|1.0.11| +|2.5|1.0.9| +|2.3|1.0.9| +--------- + +## OpenStack PCG Image Version + +|Palette Release|OpenStack PCG Version| +|--|---| +|3.4|1.0.12| +|3.3|1.0.12| +|3.2|1.0.12| +|3.1|1.0.11| +|3.0|1.0.11| +|2.8|1.0.11| +|2.7|1.0.11| +|2.6|1.0.11| +|2.5|1.0.9| +|2.3|1.0.9| +------- + +## Kubernetes Versions + +- Kubernetes: Refer to the Kubernetes [pack documentation](integrations/kubernetes.md). + +## Operating System Layer Versions + +|Operating System |Versions| +|--|--| +|Ubuntu| 22.04| +|Ubuntu| 20.04| +|Ubuntu| 18.04| +|CentOS| 8.0| +|CentOS| 7.9| +|CentOS| 7.7| +|OpenSuSE|15.4| + + +## Network Layer Versions + +- Calico: Refer to the Calico [pack documentation](integrations/calico.md). + +- Cilium: Refer to the Cilium [pack documentation](integrations/cilium.md). + + +|Cilium Enterprise|Versions| +|--|--| +|1.10.x|1.10.8| + + +## Storage Layer Version + +- Azure Disk: Refer to the [pack documentation](integrations/azure-disk.md). +- GCE Persistent Disk: Refer to the [pack documentation](integrations/gce.md). +- NFS Subdir External Provisioner: Refer to the [pack documentation](integrations/nfs-subdir-external.md). +- Open Stack Cinder: Refer to the [pack documentation](integrations/openstack-cinder.md). +- Portworx: Refer to the [pack documentation](integrations/portworx.md). +- Rook Ceph: Refer to the [pack documentation](integrations/rook-ceph.md). +- vSphere CSI: Refer to the [pack documentation](integrations/vsphere-csi.md). +- vSphere Volume: Refer to the [pack documentation](integrations/vsphere-volume.md). + +## Resources + +- [Packs List](integrations/integrations.mdx) + + +- [Downloads](spectro-downloads.md) diff --git a/docs/docs-content/devx/_category_.json b/docs/docs-content/devx/_category_.json new file mode 100644 index 0000000000..0b49ba8465 --- /dev/null +++ b/docs/docs-content/devx/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 70 +} diff --git a/docs/docs-content/devx/app-profile/_category_.json b/docs/docs-content/devx/app-profile/_category_.json new file mode 100644 index 0000000000..094470741d --- /dev/null +++ b/docs/docs-content/devx/app-profile/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 10 +} diff --git a/docs/docs-content/devx/app-profile/app-profile-cloning.md b/docs/docs-content/devx/app-profile/app-profile-cloning.md new file mode 100644 index 0000000000..89e0dfecc7 --- /dev/null +++ b/docs/docs-content/devx/app-profile/app-profile-cloning.md @@ -0,0 +1,61 @@ +--- +sidebar_label: "App Profile Cloning" +title: "App Profile Cloning" +description: "Palette Dev Engine App Profile Cloning" +hide_table_of_contents: false +sidebar_position: 30 +tags: ["devx", "app mode", "pde"] +--- + +Palette supports the cloning of App Profiles across multiple projects. For example, you can clone an app profile created under a specific project to another project within the same [tenant](../../glossary-all.md#tenant). The ability to clone App Profiles can be useful for the following use cases. + +* Share system scope App Profiles to projects scope. + + +* Share App Profiles amongst different projects. + +## Prerequisites + +* An App Profile created in Palette. Check out the [Create an App Profile](create-app-profile.md) for guidance. + +## Clone an App Profile + +To clone an App Profile follow the steps below: + +1. Login to [Palette](https://console.spectrocloud.com) + + +2. Select **App Profiles** from the left **Main Menu**. Identify the App Profile you want to clone and click on the three dots at the right handside of the row. Click on the **Clone** button from the drop down. + + +4. You will be prompted to fill out the following information: + * **Name:** Name of the new app profile. + * **Profile Version:** Version number for the new app profile. + * **Source Profile Version:** The version number of the source app profile getting cloned. + * **Target Project:** The target project to which the profile is to be cloned. Select the project name from the drop-down menu. + + +5. Click **Confirm** to conclude the cloning of the App Profile. + +In the target project specified during the clone process, you can now use the App Profile for app deployments. + + +## Validate + +To validate the App Profile is cloned and available in the target project conduct the following steps: + + +1. Login to [Palette](https://console.spectrocloud.com) + + +2. Select the **App Profiles** option from the left **Main Menu**. + + +3. This page will list all the App Profiles available to you. In addition, this should list all the cloned App Profiles as well. Use the cloned App Profile for App deployment under the target scope. + + + + + + + diff --git a/docs/docs-content/devx/app-profile/app-profile-macros.md b/docs/docs-content/devx/app-profile/app-profile-macros.md new file mode 100644 index 0000000000..1b1e5f11df --- /dev/null +++ b/docs/docs-content/devx/app-profile/app-profile-macros.md @@ -0,0 +1,83 @@ +--- +sidebar_label: "Output Variables" +title: "Output Variables" +description: "Explore Palette Dev Engine App Profile Macros" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["devx", "app mode", "pde"] +--- + + +Palette Dev Engine output variables are defined in the [app profile](../../glossary-all.md#appprofile) and are only resolved at cluster deployment time. The output variables have the following properties: + +* May be referenced by specifying them during app profile creation. + +* Output variables are inherited from the lower tiers of the app profile. + +* Each service type exposes a set of unique output variables. + + +The variables are generated when the server layer is deployed. Output variables can be consumed by the higher layers in the app profile. + +Check out the [Services Connectivity](services/connectivity.md) page to learn how to use output variables for establishing network connectivity between services. + + +
+ + +```hideClipboard +{{.spectro.app.$appdeploymentName..}} +``` + +## System Output Variables + +The following output variables are globally available for all services. + +| Output Variable | Description | +| --- | --- | +| `spectro.system.user.name` | The user name of the logged in user. | +| `spectro.system.user.uid` | The id of the logged in user.| +| `spectro.system.user.email` | The email address of the logged in user. | +| `spectro.system.tenant.uid `| The id of the current tenant or organization. | +| `spectro.system.project.uid` | The id of the current project. | +| `spectro.system.project.name` | The name of the current project. | +| `spectro.system.cluster.uid` | The id of the current cluster. | +| `spectro.system.cluster.name` | The name of the current cluster. | +| `spectro.system.kubernetes.version` | The current version of Kubernetes. | +| `spectro.system.reverseproxy.server` | The hostname of the Spectro Cloud reverse proxy server. This value is empty when not enabled. | +| `spectro.system.reverseproxy.port` | The port of the Spectro Cloud reverse proxy server. This value is empty when not enabled. | +| `spectro.system.reverseproxy.vhostport` | The port of the virtual host that is hosting the reverse proxy. | +| `spectro.system.reverseproxy.protocol` | The protocol used for the Spectro Cloud reverse proxy. | +| `spectro.system.cloud.type` | The type of cloud environment where the cluster is deployed, such as EKS, AKS, and GKE. | +| `spectro.system.cloud.region` | The cloud provider region where the cluster is deployed.| +| `spectro.system.apptier.name` | The name of the service layer from the context of the app profile. | +| `spectro.system.apptier.uid` | The id of the service layer. | +| `spectro.system.appprofile.name` | The name of the app profile. | +| `spectro.system.appprofile.uid` | The id of the app profile. | +| `spectro.system.appdeployment.uid` | The id of the app deployment. | +| `spectro.system.appdeployment.name` | The name of the app deployment. | +| `spectro.system.appdeployment.tiername` | The name of the service layer from the context of the app deployment. | +| `spectro.system.appdeployment.ingress.host` | The ingress host pattern for a cluster group with ingress enabled. This value is dynamically generated. | + +## Container Service Output Variables + +The container service type exposes the following output variables. Replace **[service-name]** with the respective name of the service layer. + +| Output Variable | Description | +| --- | --- | +| `.spectro.app.$appDeploymentName.[service-name].CONTAINER_NAMESPACE` | The Kubernetes namespace of the deployed container. | +|`.spectro.app.$appDeploymentName.[service-name].CONTAINER_SVC` | The Kubernetes DNS hostname of the service. | +|`.spectro.app.$appDeploymentName.[service-name].CONTAINER_SVC_PORT` | The exposed port of the service. | +| `spectro.app.$appDeploymentName.[service-name].CONTAINER_SVC_EXTERNALHOSTNAME`| The Kubernetes DNS hostname of the load balancer. This value is available if the service's to **Public** and deployed to a public cloud provider environment. | +|`spectro.app.$appDeploymentName.[service-name].CONTAINER_SVC_EXTERNALIP`| The public URL of the load balancer. This value is available if the service's access is set to **Public** and deployed to a private cloud provider environment.| + +## Database Service Output Variables + +Each database service exposes a set of output variables. Review each database service for more details. You can find information about each database service by checking out the [Available Services](services/service-listings/service-listings.mdx) resource. + +## Resources + +* [Palette System Macros](../../registries-and-packs/pack-constraints.md#pack-macros) + +* [Palette User Macros](../../clusters/cluster-management/macros.md) +
diff --git a/docs/docs-content/devx/app-profile/app-profile.md b/docs/docs-content/devx/app-profile/app-profile.md new file mode 100644 index 0000000000..fe28917723 --- /dev/null +++ b/docs/docs-content/devx/app-profile/app-profile.md @@ -0,0 +1,37 @@ +--- +sidebar_label: "App Profiles" +title: "App Profiles" +description: "Learn how to create and manage App Profiles in Palette Dev Engine." +hide_table_of_contents: false +tags: ["devx", "app mode", "pde"] +--- + +App Profiles are templates created with pre-configured services required for Palette Virtual Cluster deployment. App Profiles provide a way to drive consistency across virtual clusters. + +You create App Profiles to meet specific types of workloads on your Palette [virtual clusters](../palette-virtual-clusters/palette-virtual-clusters.md). You can use containers, Helm Charts, custom manifest, containers, and other out-of-the-box services such as databases, message queue systems, and object storage. Check out the Palette Dev Engine [Services](services/services.md) documentation to learn more about the available services. + +You can also review all the Palette Dev Engine services that offer an out-of-the-box experience by reviewing the [Service Listings](../app-profile/services/services.md). + + +:::caution + +When adding a manifest-type layer to an App profile, make sure to specify a namespace. Otherwise the manifest deployment will get deployed to the `Default` namespace. + +```yaml +namespace: yourNameHere +``` +::: + +
+ +## Get Started + +Get started today by learning how to create your [App Profile](create-app-profile.md). + +## Resources +- [Create an App Profile](create-app-profile.md) +- [Container Deployment](container-deployment.md) +- [App Profile Macros](app-profile-macros.md) +- [App Profile Cloning](app-profile-cloning.md) +- [App Profile Versioning](versioning-app-profile.md) + diff --git a/docs/docs-content/devx/app-profile/container-deployment.md b/docs/docs-content/devx/app-profile/container-deployment.md new file mode 100644 index 0000000000..fd71b22727 --- /dev/null +++ b/docs/docs-content/devx/app-profile/container-deployment.md @@ -0,0 +1,110 @@ +--- +sidebar_label: "Container Deployment" +title: "Container Deployment" +description: "Palette Dev Engine App Profile Container Deployment" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["devx", "app mode", "pde"] +--- + +Palette App Mode supports the use of containers, a standard unit of software that packages code and all its dependencies to run applications quickly and reliably from one computing environment to another. Containers contain all the required executables, binary codes, libraries, and configuration files. As a result, containers are lightweight and portable with less overhead. To add a container tier to Palette Dev Engine App Profile, follow the steps below. + + +## Prerequisite + +* Access to Palette Dev Engine App Mode. + + +## Add Container to App Profile + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Select **App Profiles** from the left **Main Menu** and click on the **New App Profile** button at the top right-hand side of the main screen. + + +3. Provide the wizard with the following information and click on **Next** after you have filled out the following basic information. + + | Parameter | Description | + |-------------------------------|-----------------| + |**Application Profile Name** | A custom name for the App Profile| + |**Description (optional)** | Description of the App Profile, if any | + |**Tag (optional)** | Tags on a cluster group are propagated to the infrastructure environment environments.| + + +4. Next, select **Container Deployment** from the available services list. + + +5. Provide the following information to the wizard. + + **General Settings**: + + | Parameter | Description | + | ---------------- | ------------------------------------------------------------------------------------------------------ | + | **Container Name** | A unique name for the container deployment. | + | **Registry** | Select the registry from which the image will be downloaded. If specifying a non-Docker Hub registry, ensure you provide the full URL of the image. | + | **Image** | Image of the container to be deployed. | + | **Replicas** | The number of application instances to be deployed. This option follows the same behavior as a [*ReplicaSet*](https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/) in the Kubernetes configuration file. A max of 10 replicas is supported. + +
+ + :::info + + + + When adding a container image from a public [DockerHub registry](https://hub.docker.com/), you can skip the registry hostname. For instance, to download the Nginx image, specify `nginx` and it will be downloaded correctly during the provisioning process. + + ::: + +
+ + **Network Access**: + + | Parameter | Description | + | -------------- | ------------------------------------------------------------------------------------------------------------- | + | **Private** | To establish connectivity to a container service through a private network. | + | **Public** | To establish connectivity to a container service through the public network. | + | **Port number** | Exposes the container for external communication. | + +
+ + **Environment Variables**: + + | Parameter | Description | + | ----------------------- | ------------------------------------------------------------------------------------------------------ | + | **Environment Variables** | Environment variables can be specified as **Key-Value** pairs during the container deployment. | + +
+ + **Volume**: + + | Parameter | Description | + | ------------- | --------------------------------------------------------------- | + | **Volume** | To persist the data generated by and used by the container. | + | **Name** | Volume name. | + | **Size** | The size of the volume in GiB. | + | **Mount Path** | The path to the volume. | + + +
+ + * **Runtime Settings**: The command and arguments you define here will override the default command and arguments provided by the container image. + + +6. Click the **Review** button when you have filled out the information and are ready to conclude the wizard. + +Once the container is added as a layer to the App Profile, continue with the remaining steps of the [App Profile creation](create-app-profile.md) wizard. You can add more services as layers if needed. + +## Validate + +1. Login to [Palette](https://console.spectrocloud.com). + + +2. Select the **App Profiles** option from the left **Main Menu**. + + +3. In the App Profiles page, you will find your App Profile listed. Click the name of the App Profile to view the profile details. The app profile tier details will show the container added to the profile. + + + + diff --git a/docs/docs-content/devx/app-profile/create-app-profile.md b/docs/docs-content/devx/app-profile/create-app-profile.md new file mode 100644 index 0000000000..33b4d64296 --- /dev/null +++ b/docs/docs-content/devx/app-profile/create-app-profile.md @@ -0,0 +1,93 @@ +--- +sidebar_label: "Create an App Profile" +title: "Create an App Profile" +description: "This document provides guidance on how to create a Palette App Profile" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["devx", "app mode", "pde"] +--- + + +You can create as many App Profiles as needed to fit various types of workloads on your Palette Virtual Clusters. Each App Profile can contain multiple services, also called layers in the App Profile stack. You can also create multiple versions of an App Profile. For more information, visit [App Profile Versioning](versioning-app-profile.md). + +Use the following steps to create an App Profile. + + +:::info + +A tutorial is available to help you learn how to use Palette Dev Engine by deploying an application. Check out [Deploy an Application using Palette Dev Engine](../apps/deploy-app.md) to get started with Palette Dev Engine. + +::: + + +## Prerequisites + +* A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). +
+ +## App Profile Creation + +To create an App Profile: + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. In App Mode, select **App Profiles** from the **Main Menu**, and click the **New App Profile** button. + + +3. Provide the following basic information for your App Profile and click **Next**. + + +| Parameter | Description | +|-------------------------------|-----------------| +|Application Profile Name | A custom name for the App Profile| +|Version (optional) | The default value is 1.0.0. You can create multiple versions of an App Profile using the format **`major.minor.patch`**. +|Description (optional) | Description of the App Profile. | +|Tag (optional) | Assign tags to the app profile.| + + +4. Select one of the available services to start configuring your App Profile. Refer to [App Profiles](../app-profile/app-profile.md) for a list of available services. + + +5. Provide configuration information for the service. + + +6. You can add more services to the App Profile as needed. To do this, click the **Actions** button next to the **Configure tier** pane. To rearrange layers in the profile, select a service and drag it up or down in the pane. Each service becomes a layer in the App Profile stack in the order shown in this pane. + + +7. When you've provided the required configuration information for services, click **Review**. Your App Profile is now created and can be deployed. + +## Validate + +To validate your App Profile is available and ready for use, use the following steps. + +1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. + + +2. Navigate to the left **Main Menu** and click on **App Profiles**. + + +3. Select the cluster profile you created to review its details. + + +4. Hover your cursor over each app layer to learn more about the layers, including the pack name, version, and registry. + + ![A view of a cursor triggering the info box for each app profile layer.](/devx_app-profile_create-app-profile_app-layer-infoboxes.png) + +
+ + :::info + + Use the pop-up information box for each layer to help you gather the required information when creating Terraform templates for [app profiles](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/application_profile). + + ::: + + +5. Deploy your application to a virtual cluster to verify all the required configurations and dependencies are correct. Review the [Create and Manage Apps](../apps/create-app.md) to learn how to deploy an app to a virtual cluster. Check out the [Deploy an Application using Palette Dev Engine](../apps/deploy-app.md) tutorial for a more in-depth guide. + + +
+ +## Next Steps + +Start exploring the various [out-of-the-box](services/services.md) services Palette exposes to application authors. Use these services to quickly deploy applications without the overhead of managing and configuring the infrastructure required for common third-party services such as databases, message queues, and more. diff --git a/docs/docs-content/devx/app-profile/services/_category_.json b/docs/docs-content/devx/app-profile/services/_category_.json new file mode 100644 index 0000000000..ae9ddb024d --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 50 +} diff --git a/docs/docs-content/devx/app-profile/services/connectivity.md b/docs/docs-content/devx/app-profile/services/connectivity.md new file mode 100644 index 0000000000..36646ef709 --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/connectivity.md @@ -0,0 +1,33 @@ +--- +sidebar_label: "Service Connectivity" +title: "Service Connectivity" +description: "Palette Dev Engine Database Connectivity" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["devx", "app mode", "pde"] +--- + + +Using the exposed output variables, you can connect different service layers. For example, assume you have one application and database defined in an app profile. You can connect the two using the exposed output variable containing the Kubernetes service hostname. + +It's important to consider the order of service layers. Using the previous example, you must add the application after the database service layer to use the output variables of the database service. In other words, the database service should be at the app profile's bottom-most layer. + +The order of the service layers is important because the output variables used in services follow a usage hierarchy. The output variables for a service are only available if the service comes after the service that exposes the output variable. Output variables from the first services you add, which become the first layer in the app profile stack, can be consumed by other services after it. However, output variables cannot be passed downwards from the top service layers. + + +## Connectivity Example + +The following diagram is an example of an app profile containing three different service layers. The bottom layer is a Postgres database, the second layer is a container application, and the top layer is a Helm application. + + +The API server communicates with the database, and the application sends requests to the API. For each service to establish network connectivity, each layer needs to reference the output variable of the lower layer. The API will use the output variable Postgres exposes that contains the Kubernetes hostname. + +![Output Variables example](/devx-services-connectivity-output-variables-example.png) + +The API server can consume the output variable `{{.spectro.app.$appDeploymentName.postgresql-3.POSTGRESMSTR_SVC}}` from the Postgres service layer to connect to the database. The output variable would be consumed as an environment variable. + +![The API layer consuming the DB output variable](/devx-services-connectivity-container-env-example.png) + +The application would use the output variable `{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC}}` from the API service layer to connect to the API. The output variable value can be referenced as a YAML value in the Helm manifest file. + +![The App layer consuming the API output variable](/devx-services-connectivity-helm-env-example.png) \ No newline at end of file diff --git a/docs/docs-content/devx/app-profile/services/db-services.md b/docs/docs-content/devx/app-profile/services/db-services.md new file mode 100644 index 0000000000..ca09576adc --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/db-services.md @@ -0,0 +1,70 @@ +--- +sidebar_label: "Databases" +title: "Databases" +description: "Explore Palette Dev Engine Database Services" +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["devx", "app mode", "pde"] +--- + +Palette Dev Engine facilitates database service setup, operation, and scaling without installing physical hardware, software, or performance configurations. Instead, Palette takes care of all the administrative and maintenance tasks so that you can use and access the database quickly. + +For a list of all the supported databases, refer to the [Available Services](service-listings/service-listings.mdx) resource and select the **Database** filter. + +## Database Deployment + +Palette leverages several Kubernetes built-in workload resources such as Deployment, ReplicaSet, DaemondSet, StatefulSet, etc. To take advantage of the persistence of the data storage, Palette deploys database services as [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/). + +StatefulSet lets you run one or more related pods that track the state. The database service workload records data persistently through a StatefulSet that matches each pod with a [PersistentVolume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/). The database service running in the pods that belong to the StatefulSet can replicate data to other pods in the same StatefulSet to improve the overall resilience of the service. + +## Storage + +You must allocate storage to the database service based on the available storage within the Virtual Cluster. + +:::caution + + +By default, cluster groups are configured not to back up the disk storage. This default behavior affects database services because a backup would not include the storage disk. To learn more, refer to [Enable Disk Backup on Virtual Clusters](../../../clusters/cluster-groups/cluster-group-backups.md). + +::: + +## Version Update + +You can make changes to the app profile services, such as version updates, manifest updates, app service additions, and removals. [App Profile Service update](../versioning-app-profile.md#update-an-app-profile) +will generate an update notification on all the apps created from the app profile. Update notifications include all the changes applied to the profile since the initial creation or the previous update. You can apply the update to the apps individually at any time. + +## Output Variables + +Each database service has a set of exposed output variables. These output variables can be used to establish service connectivity with other service layers of the app profile by consuming the information. + +The following code snippet is an example of the output variables exposed by the MongoDB service. Check out the [service listings](service-listings/service-listings.mdx) page to learn more about each service. + +
+ + +```hideClipboard +env: + - name: USER_NAME + value: "{{.spectro.app.$appDeploymentName.mongodb-1.USERNAME}}" + - name: PASSWORD + value: "{{.spectro.app.$appDeploymentName.mongodb-1.PASSWORD}}" + - name: MONGO_URI + value: "{{.spectro.app.$appDeploymentName.mongodb-1.MONGO_URI}}" + - name: MONGO_URI_SRV + value: "{{.spectro.app.$appDeploymentName.mongodb-1.MONGO_URI_SRV}}" +``` + +
+ +:::info +The service connectivity follows a fixed hierarchy in Palette. The connectivity is established for higher-level services using the output variable. Higher-level refers to the service added to the app profile after adding the database service. +::: + + +
+ + +## Connect to a DB Service + +Applications and clients can connect to a Palette database service by using the information exposed in the output variables. Check out the [Service Connectivity](connectivity.md) documentation to learn more about connecting to a database service. \ No newline at end of file diff --git a/docs/docs-content/devx/app-profile/services/service-listings/_category_.json b/docs/docs-content/devx/app-profile/services/service-listings/_category_.json new file mode 100644 index 0000000000..455b8e4969 --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/service-listings/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 20 +} diff --git a/docs/docs-content/devx/app-profile/services/service-listings/cockroach-db.md b/docs/docs-content/devx/app-profile/services/service-listings/cockroach-db.md new file mode 100644 index 0000000000..1a219df59c --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/service-listings/cockroach-db.md @@ -0,0 +1,156 @@ +--- +sidebar_label: "CockroachDB" +title: "CockroachDB" +description: "Learn how to use CockroachDB with Palette Dev Engine." +hide_table_of_contents: false +type: "appTier" +category: ['databases'] +hiddenFromNav: false +sidebar_position: 50 +logoUrl: "https://upload.wikimedia.org/wikipedia/en/thumb/3/31/Cockroach_Labs_Logo.png/220px-Cockroach_Labs_Logo.png" +tags: ["devx", "app mode", "pde", "databases"] +--- + +CockroachDB is a [distributed SQL database](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) designed for cloud-native environments. CockroachDB provides a reliable and scalable solution for managing data across multiple nodes and regions. Its architecture automates data replication, sharding, and rebalancing, By simplifying operational tasks, Cockroach enables developers to concentrate on building their applications. + +With a focus on strong consistency and horizontal scalability, CockroachDB supports fast transactions and real-time data insights. Its fault-tolerant and self-healing capabilities help reduce downtime and ensure data accuracy. As a result, CockroachDB offers a stable and efficient database solution for developers looking to build robust applications in today's demanding digital landscape. + + + +## Deploy CockroachDB + +Palette users can deploy CockroachDB to a virtual cluster by using the following steps. + +### Prerequisite + +- A Virtual Cluster with the following minimum resources. + - 8 CPU + - 8 GB of Memory + - 8 GB of Storage. + + +### Enablement + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. On the right side of the window, click on the **User Menu** and select **Switch to App Mode**. + + + +3. Navigate to the left **Main Menu** and click on **App Profiles** to create a new app profile. Review [Create an App Profile](../../create-app-profile.md) for more information. Provide the following basic information and click **Next**. + +| Parameter | Description | +|-----------------------------|-----------------| +|Application Profile Name | A custom name for the app profile.| +|Version (optional) | The default value is 1.0.0. You can create multiple versions of an app profile using the format **`major.minor.patch`**. +|Description (optional) | Description of the app profile. | +|Tag (optional) | Assign tags to the app profile.| + + +4. Select the **CockroachDB** service and start the configuration. + + + +5. Provide the following information to the wizard: + * **Name**: The application name. + + + * **Username**: The user name for database access control. + + + * **dbPassword**: Security password for the DB service. + + + * **Database Name**: The name of the database to target. + + + * **PersistentVolumeClaim Size (GiB)**: Select the volume according to the storage volume available in the cluster group and virtual clusters. Ensure you do not exceed the maximum storage size for your virtual cluster. + + +6. Save your changes. + + +7. Deploy the app profile to a Palette Virtual Cluster. Use the [Deploy a Virtual Cluster](../../../../clusters/palette-virtual-clusters/deploy-virtual-cluster.md#deploy-a-virtual-cluster) guide for additional guidance or check out the [Deploy an Application using Palette Dev Engine](../../../apps/deploy-app.md) tutorial. + + +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. + + +2. Navigate to the left **Main Menu** and select **Apps**. + + + +3. Select the application that contains CockroachDB. + + + +4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. + +|**Color Code**| **Description**| +|--------------|--------------| +|Green| Successfully Deployed| +|Blue | Under Deployment| +|Red | Error State| + + +## Output Variables + +The exposed output variables of this service layer may be used in other service layers. These output variables are typically used for connectivity purposes: + +| Parameter | Output Variable | Description | +|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| +| Database Username | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_USERNAME}}` | The database user name. | +| Database User Password | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_PASSWORD}}` | The password of the database user name. | +| Database Name | `{{.spectro.app.$appDeploymentName..COCKROACHDBMSTR_DB_NAME}}` | The name of the database. +| Service Hostname | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_SVC}}` | The Kubernetes service hostname for the database. | +| Service Port | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_SVC_PORT}}` | The exposed ports for the database service. | +| Service Namespace | `{{.spectro.app.$appDeploymentName.database-.COCKROACHDBMSTR_SVC_NAMESPACE}}` | The namespace of the service. | + + +## Database Password + +You can get the database secret by reading the content of the Kubernetes secret created for the database user. To retrieve the password for the Redis database, use the following command format. + +
+ +```shell +kubectl get secret --user \ + --namespace --ns --output jsonpath='{.data.password}' | base64 --decode +``` + +Replace the values with the respective names. + + * app-name: represents the name of the app provided during the app creation process. + * service-name: The name of the service layer in the app profile. + + +### Example + +- App Name: `app-tion-medon` + +- Service Name: `cockroachdb-1` + + +```shell +kubectl get secret app-tion-medon-cockroachdb-1-user \ + --namespace app-tion-medon-cockroachdb-1-ns --output jsonpath='{.data.password}' | base64 --decode +``` +Output: +```shell +.Hr1}%DrA2MFf +``` +## Next Steps + +To learn more about developing with CockroachDB, check out the [CockroachDB Developer Guide](https://www.cockroachlabs.com/docs/stable/developer-guide-overview.html). The developer guide is a great resource for understanding how to get started with CockroachDB and build applications that are scalable, resilient, and secure. + + +## Resources + +- [CockroachDB Official Documentation](https://www.cockroachlabs.com/docs/) + + +- [Developer Guide](https://www.cockroachlabs.com/docs/stable/developer-guide-overview.html) \ No newline at end of file diff --git a/docs/docs-content/devx/app-profile/services/service-listings/mongo-db.md b/docs/docs-content/devx/app-profile/services/service-listings/mongo-db.md new file mode 100644 index 0000000000..273e460d0b --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/service-listings/mongo-db.md @@ -0,0 +1,145 @@ +--- +sidebar_label: "MongoDB" +title: "MongoDB" +description: "Palette Dev Engine MongoDB Service" +hide_table_of_contents: false +type: "appTier" +category: ['databases'] +sidebar_position: 0 +logoUrl: "https://newrelic.com/sites/default/files/styles/800w/public/2021-10/mongo_logo.jpg?itok=Z1PabBZB" +tags: ["devx", "app mode", "pde", "databases"] +--- + + +[MongoDB](https://www.mongodb.com/) is a developer data platform that quickly builds applications with optimal performance and scalability. It provides data distribution and mobility across multiple cloud environments. In addition, this multi-cloud database service provides you with resilience, data privacy, and security. + +## Add MongoDB to an App Profile + +Use the following steps to add MongoDB to an app profile. + +
+ +### Prerequisite + +A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). + +
+ +### Enablement + +You can use the following steps to learn how to add MongoDB to your app profile. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. On the right side of the window, click the **User Menu** to expand it and select **Switch to App Mode**. + + +3. From the **Main Menu** click **App Profiles** to create a new profile. Check out the [Create an App Profile](../../create-app-profile.md) guide to learn how. Provide the following basic information and click **Next**. + +| **Parameter** | **Description** | +|-------------------------|-----------------| +|Application Profile Name | A custom name for the App Profile.| +|Version (optional) | The default value is 1.0.0. You can create multiple versions of an App Profile using the format **`major.minor.patch`**. +|Description (optional) | Description of the App Profile. | +|Tag (optional) | Assign tags to the app profile.| + + +4. Select **MongoDB** from the database services and start the configuration. + + +5. Provide the following information to the wizard: + * **Name:** The DB name. You can use the default Palette-generated name or create a custom name. + * **Username:** The user name for database access control. + * **Password:** The password for the username. + * **Database Volume Size:** Select the volume size for the database. Ensure you stay within the storage amount available in the cluster group and virtual clusters. + + * **Version:** Select the version from the **Version** drop-down. + +6. Click **Save Changes**. +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. + + +2. Navigate to the left **Main Menu** and select **Apps**. + + + +3. Select the application that contains MongoDB. + + + +4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. + +|**Color Code**| **Description**| +|--------------|--------------| +|Green| Successfully Deployed| +|Blue | Under Deployment| +|Red | Error State| + + +## Output Variables + +The exposed output variables of this service layer may be used in other service layers. These output variables are typically used for connectivity purposes. + +| Parameter | Output Variable | Description | +|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| +| Database Username | `{{.spectro.app.$appDeploymentName..USERNAME}}` | The database user name. | +| Database User Password | `{{.spectro.app.$appDeploymentName..PASSWORD}}` | The password of the database user name. | +| Connection String | `{{.spectro.app.$appDeploymentName..MONGO_URI}}` | The MongoDB connection string that contains the Kubernetes service hostname of the database. The connection string is prefixed with `mongodb://` +| DNS Seed | `{{.spectro.app.$appDeploymentName..MONGO_URI_SRV}}` | Represents the MongoDB DNS seed list connection format. The SRV indicates to the client that the host name that follows corresponds to a DNS SRV record. Contains the prefix `mongodb+srv` | + + +## Database Password + +You can get the database password by reading the content of the Kubernetes secret created for the database user. To retrieve the password for the MongoDB database user, use the following command format. + +```shell +kubectl get secret -- \ + --namespace --ns --output jsonpath='{.data.password}' | base64 --decode +``` + +Replace the values with the respective names. + + * app-name: represents the name of the app provided during the Palette app creation process. + * service-name: The name of the service layer in the app profile. + * user-name: The name of the database user. + + +### Example: + +- App Name: `app-tarfful` + +- Service Name: `mongodb-1` + +- Database User: `myuser` + +```shell +kubectl get secret app-tarfful-mongodb-1-myuser \ + --namespace app-tarfful-mongodb-1-ns --output jsonpath='{.data.password}' | base64 --decode +``` +#### Output: +```shell hideClipboard +.Hr1}%DrA2MFf +``` + + +## Next Steps + +Palette Dev Engine removes the traditional challenges encountered when deploying a MongoDB instance. You can add MongoDB to your application profile and get started with MongoDB today. Check out the [MongoDB Tutorials](https://www.mongodb.com/docs/manual/tutorial/) to learn how to integrate MongoDB with your applications. + + +## Resources + + +- [MongoDB Documentation](https://www.mongodb.com/docs/) + + +- [MongoDB Tutorials](https://www.mongodb.com/docs/manual/tutorial/) + + +- [MongoDB Libraries](https://www.mongodb.com/docs/drivers/) + + + diff --git a/docs/docs-content/devx/app-profile/services/service-listings/mysql.md b/docs/docs-content/devx/app-profile/services/service-listings/mysql.md new file mode 100644 index 0000000000..7fb5f0b1a8 --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/service-listings/mysql.md @@ -0,0 +1,141 @@ +--- +sidebar_label: "MySQL" +title: "MySQL" +description: "Palette Dev Engine MySQL Service" +hide_table_of_contents: false +type: "appTier" +category: ['databases'] +sidebar_position: 10 +logoUrl: "https://registry.dev.spectrocloud.com/v1/mysql-operator/blobs/sha256:2d59bc428916752528280eac03330d712164163e2f3c476409f5c25d8a7c2778?type=image/png" +tags: ["devx", "app mode", "pde", "databases"] +--- + + +[MySQL](https://www.mysql.com/) is an open-source relational database management system commonly used in web applications and other software that requires a database. It is known for its reliability, ease of use, and flexibility. MySQL is covered under the GNU license and uses structured query language (SQL) to manage data with the following properties: + +* Creates a database for storing and manipulating data and defining the relationship of each table. + + +* Clients can retrieve and manipulate data by creating SQL queries and submitting them to the MySQL instance. + + +# Add MySQL to App Profile + +Use the following steps to add MySQL to an app profile. + +
+ +## Prerequisite + +- Access to Palette Dev Engine. + +
+ +### Enablement + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. On the right side of the window, click on the **User Menu**, then select **Switch to App Mode**. + + +3. Navigate to the left **Main Menu** and click **App Profiles** to create a [new App Profile](../../create-app-profile.md). Provide the following basic information and click **Next**. + +| **Parameter** | **Description** | +|-----------------------------|-----------------| +|Application Profile Name | A custom name for the app profile.| +|Version (optional) | The default value is 1.0.0. You can create multiple versions of an App Profile using the format **`major.minor.patch`**.| +|Description (optional) | Description of the app profile.| +|Tag (optional) | Assign tags to the app profile.| + + +4. Select **MySQL** from the database services and start the configuration. + + +5. Provide the following information to the wizard: + * **Name:** The database name. You can use the auto-generated name or create a custom name. + * **Root Password:** The root password for the database service. + + * Database Volume Size (GiB): Select the volume size for the database. Ensure you stay within the storage amount available in the cluster group and virtual clusters. + + * Select the version from the **Version** drop-down menu. + +6. Click on **Save Changes**. + +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. + + +2. Navigate to the left **Main Menu** and select **Apps**. + + + +3. Select the application that contains MySQL. + + + +4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. + +|**Color Code**| **Description**| +|--------------|--------------| +|Green| Successfully Deployed| +|Blue | Under Deployment| +|Red | Error State| + + +## Output Variables + +The exposed output variables of this service layer may be used in other service layers. These output variables are typically used for connectivity purposes: + +| Parameter | Output Variable | Description | +|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| +| Database Root Password | `{{.spectro.app.$appDeploymentName..ROOT_PASSWORD}}` | The root password of the MySQL database. | +| Service Hostname | `{{.spectro.app.$appDeploymentName..MYSQLMSTR_SVC}}` | The Kubernetes service hostname for the database. | +| Service Port | `{{.spectro.app.$appDeploymentName..MYSQLMSTR_SVC_PORT}}` | The exposed ports for the database service. | +| Namespace | `{{.spectro.app.$appDeploymentName..MYSQLMSTR_SVC_NAMESPACE}}` | The Kubernetes namespace the MySQL database is deployed to. | + + + +## Database Password + +You can get the database password by reading the content of the Kubernetes secret created for the database. To retrieve the password for the MySQL database root user, use the following command format. + +```shell +kubectl get secret --user \ + --namespace --ns --output jsonpath='{.data.ROOT_PASSWORD}' | base64 --decode +``` + +Replace the values with the respective names. + + * app-name: represents the name of the app provided during the app creation process. + * service-name: The name of the service layer in the app profile. + +#### Example + +- App Name: `app-tarfful` + +- Service Name: `mysql-2` + + +```shell +kubectl get secret app-tarfful-mysql-2-user \ + --namespace app-tarfful-mysql-2-ns --output jsonpath='{.data.ROOT_PASSWORD}' | base64 --decode +``` +#### Output +```shell hideClipboard +,U31nQ@T2tN4uM +``` + +## Next Steps + +You can add MySQL to your application profile and start integrating MySQL with your applications. To learn more about integrating MySQL with your applications, check out the [MySQL](https://redis.io/docs/manual/) documentation from Oracle. + + + +## Resources + +- [MySQL Documentation](https://dev.mysql.com/doc/) + + +- [MySQL Tutorial](https://dev.mysql.com/doc/refman/8.0/en/tutorial.html) \ No newline at end of file diff --git a/docs/docs-content/devx/app-profile/services/service-listings/postgresql-db.md b/docs/docs-content/devx/app-profile/services/service-listings/postgresql-db.md new file mode 100644 index 0000000000..1881b7f764 --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/service-listings/postgresql-db.md @@ -0,0 +1,162 @@ +--- +sidebar_label: "Postgres" +title: "Postgres" +description: "Palette Dev Engine Postgres Service" +hide_table_of_contents: false +type: "appTier" +category: ['databases'] +sidebar_position: 20 +logoUrl: 'https://upload.wikimedia.org/wikipedia/commons/2/29/Postgresql_elephant.svg' +tags: ["devx", "app mode", "pde", "databases"] +--- + + +Palette supports [Postgres](https://www.postgresql.org/) as a database service. Postgres is a powerful open-source object-relational database system with over 35 years of active deployment with a strong reputation for reliability, feature robustness, and performance. Postgres uses and extends the SQL language combined with many features that safely store and scale the most complicated data workloads. + +
+ +## Prerequisites + +The following are the requirements for using Postgres in Palette: + +* Do not use the Postgres user names `postgres` and `admin`. These user names are reserved for internal system operations and will cause internal conflicts. + + +* The user name format does not support the special character hyphen(-). For example, `name-1` is not supported. + + +* Clients must set `sslMode=require` or a stricter setting, as the server instance requires encryption for all connections. Review the [Postgres SSL documentation](https://www.postgresql.org/docs/current/libpq-ssl.html) to learn more about the SSL modes. + + +## Add Postgres to an App Profile + + +You can use the following steps to learn how to add Postgres to your app profile. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. On the right side of the window, click the **User Menu** to expand it and select **Switch to App Mode**. + + +3. Navigate to the left **Main Menu** and click on **App Profiles** to create a [new App Profile](../../create-app-profile.md). Provide the following basic information and click **Next**. + +| **Parameter** | **Description** | +|-------------------------|-----------------| +|Application Profile Name | A custom name for the App Profile.| +|Version (optional) | The default value is 1.0.0. You can create multiple versions of an App Profile using the format **`major.minor.patch`**. +|Description (optional) | Description of the app profile. | +|Tag (optional) | Assign tags to the app profile.| + + +4. Select **Postgres** from the database services and start the configuration. + + +5. Provide the following information to the wizard: + + * **Name:** The database service name. You can use the auto-generated name or create a custom name. + + + * **Username:** The user name for database access control. + + + * **Password:** Security password for the DB service. + +:::info + + +You can use the default system-generated password. If the default password is used, it can be retrieved from the Postgres secret that is created for the password. Review the [Database Password](../../services/service-listings/postgresql-db.md#database-password) section for guidance. + +::: + + + * **Database Volume Size (GiB):** Select the volume size for the database. Ensure you stay within the storage amount available in the cluster group and virtual clusters. + + * **Version: **Select the version from the **Version** **drop-down Menu**. + +6. Save your changes. + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. + + +2. Navigate to the left **Main Menu** and select **Apps**. + + + +3. Select the application that contains Postgres. + + + +4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. + +|**Color Code**| **Description**| +|--------------|--------------| +|Green| Successfully Deployed| +|Blue | Under Deployment| +|Red | Error State| + + +## Output Variables + +The exposed output variables of this service layer may be used in other service layers. These output variables are typically used for connectivity purposes: + +| Parameter | Output Variable | Description | +|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| +| Database Username | `{{.spectro.app.$appDeploymentName.database-.USERNAME}}` | The database user name. | +| Database User Password | `{{.spectro.app.$appDeploymentName.database-.PASSWORD}}` | The password of the database user name. | +| Service Hostname | `{{.spectro.app.$appDeploymentName.database-.POSTGRESMSTR_SVC}}` | The Kubernetes service hostname for the database. | +| Service Port | `{{.spectro.app.$appDeploymentName.database-.POSTGRESMSTR_SVC_PORT}}` | The exposed ports for the database service. | + + +## Database Password + +You can get the database password by reading the content of the Kubernetes secret created for the database user. To retrieve the password for the Postgres database user, use the following command format. + +```shell +kubectl get secret --postgres--credentials \ + --namespace --ns --output jsonpath='{.data.password}' | base64 --decode +``` + +Replace the values with the respective names. + + * app-name: represents the name of the app provided during the app creation process. + * service-name: The name of the service layer in the app profile. + * user-name: The name of the database user. + + +#### Example + +- App Name: `app-tarfful` + +- Service Name: `postgresql-3` + +- Database User: `pguser` + +```shell +kubectl get secret app-tarfful-postgresql-3-postgres-pguser-credentials \ + --namespace app-tarfful-postgresql-3-ns --output jsonpath='{.data.password}' | base64 --decode +``` +#### Output +```shell hideClipnoard +zFniawyxEVdFtSF9uPfDsjFlOnAeDcrpndi3ReaUbqSGTMSnZ1gawSWkJCLabZR9 +``` + +## Next Steps + +Add Postgres to your application profile and explore all the capabilities Postgres has to offer. The official Postgres documentation has several [tutorials](https://www.postgresql.org/docs/online-resources/) to help you learn more about Postgres and how to leverage Postgres with your applications. + + +## Resources + +- [Postgres Documentation](https://www.postgresql.org/docs/) + + +- [Community Postgres Tutorials](https://www.postgresqltutorial.com/) + + +- [Postgres Tutorials](https://www.postgresql.org/docs/online-resources/) + + +- [Postgres SSL documentation](https://www.postgresql.org/docs/current/libpq-ssl.html) diff --git a/docs/docs-content/devx/app-profile/services/service-listings/redis-db.md b/docs/docs-content/devx/app-profile/services/service-listings/redis-db.md new file mode 100644 index 0000000000..66b8c1ff3e --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/service-listings/redis-db.md @@ -0,0 +1,126 @@ +--- +sidebar_label: "Redis" +title: "Redis" +description: "Palette Dev Engine Redis Database Service" +hide_table_of_contents: false +type: "appTier" +category: ['databases'] +hiddenFromNav: false +sidebar_position: 30 +logoUrl: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSjxG5Qb38rX39m1M2p1W4t8H70OKpRY2breg&usqp=CAU" +tags: ["devx", "app mode", "pde", "databases"] +--- + + +[Redis](https://redis.io/docs/about/) is an open-source (BSD licensed), in-memory data structure store used as a data cache store or database service. Redis has built-in replication, Lua scripting, least recently used eviction, transactions, and different levels of on-disk persistence capabilities. In addition, Redis provides high availability via Redis Sentinel and automatic partitioning with Redis Cluster. + +## Add Redis to an App Profile + +Use the following steps to add Redis to an app profile. + +### Prerequisite + +- Access to Palette Dev Engine. + +### Enablement + +1. Log in to [Palette](https://console.spectrocloud.com) + + +2. On the right side of the window, click on the **User Menu** and select **Switch to App Mode**. + + +3. Navigate to the left **Main Menu** and click on **App Profiles** to create a [new App Profile](../../create-app-profile.md). Provide the following basic information and click **Next**. + +| Parameter | Description | +|-----------------------------|-----------------| +|Application Profile Name | A custom name for the app profile.| +|Version (optional) | The default value is 1.0.0. You can create multiple versions of an app profile using the format **`major.minor.patch`**. +|Description (optional) | Description of the app profile. | +|Tag (optional) | Assign tags to the app profile.| + + +4. Select the **Redis DB** service and start the configuration. + + +5. Provide the following information to the wizard: + * **Name:** The database name. + * **Password:** The password for the database service. + * **Database Volume Size (GiB):** Select the volume as per the storage volume available in the cluster group and virtual clusters. + +6. Save your changes. +### Validate + +1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. + + +2. Navigate to the left **Main Menu** and select **Apps**. + + + +3. Select the application that contains Redis. + + + +4. Validate your application is displaying the green status. The color code in the app profile box shows the status of the service deployment. + +|**Color Code**| **Description**| +|--------------|--------------| +|Green| Successfully Deployed| +|Blue | Under Deployment| +|Red | Error State| + + +## Output Variables + +The exposed output variables. Use these variables when connecting higher-level services with the database: + + + +| Parameter | Output Variable | Description | +|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| +| Database Username | `{{.spectro.app.$appDeploymentName..USERNAME}}` | The database user name. | +| Database User Password | `{{.spectro.app.$appDeploymentName..PASSWORD}}` | The password of the database user name. | +| Service Hostname | `{{.spectro.app.$appDeploymentName..REDISMSTR_SVC}}` | The Kubernetes service hostname for the database. | +| Service Port | `{{.spectro.app.$appDeploymentName..REDISMSTR_SVC_PORT}}` | The exposed port for the database service. | +| Namespace | `{{.spectro.app.$appDeploymentName..REDISMSTR_NS}}` | The Kubernetes namespace the Redis database is deployed to. | + + +## Database Password + +You can get the database secret by reading the content of the Kubernetes secret created for the database user. To retrieve the password for the Redis database, use the following command format. + +```shell +kubectl get secret --redis-auth \ + --namespace --ns --output jsonpath='{.data.password}' | base64 --decode +``` + +Replace the values with the respective names. + + * app-name: represents the name of the app provided during the app creation process. + * service-name: The name of the service layer in the app profile. + +#### Example + +- App Name: `app-tarfful` + +- Service Name: `redis-4` + + +```shell +kubectl get secret app-tarfful-redis-4-redis-auth \ + --namespace app-tarfful-redis-4-ns --output jsonpath='{.data.password}' | base64 --decode +``` +#### Output +```shell hideClipboard + .Hr1}%DrA2MFf +``` + +## Next Steps + +You can add Redis to your application profile and start integrating Redis with your applications. To learn more about integrating Redis with your applications, check out the [Using Redis](https://redis.io/docs/manual/) documentation from Redis. + + +## Resources + +- [Using Redis](https://redis.io/docs/manual/) diff --git a/docs/docs-content/devx/app-profile/services/service-listings/service-listings.mdx b/docs/docs-content/devx/app-profile/services/service-listings/service-listings.mdx new file mode 100644 index 0000000000..cbbd447cd5 --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/service-listings/service-listings.mdx @@ -0,0 +1,22 @@ +--- +sidebar_label: "Available Services" +title: "Out-of-the-box Service" +description: "Reference documentation for each service available in Palette App Mode." +hide_table_of_contents: true +--- + +Palette Dev Engine contains a set of services commonly used by application authors to support the capabilities of an application. You can use the following service types out of the box with minimal configuration. + +
+ +- **Messaging System Services**: A messaging system service is a platform that enables the exchange of messages between users. It allows people to send and receive messages in real time using different devices and communication channels. + +- **Object Storage Services**: Object storage is a data storage solution for unlimited, unstructured data like images, videos, and backups. It's managed as objects, not files or blocks, and is scalable and durable. + +- **Database Services**: A database stores structured data electronically for fast search and retrieval. It's commonly used for applications and websites to store information such as user data, transactions, and analytics. + +- **Security Services**: Security services are used to protect your application from unauthorized access and to ensure that your application is compliant with security standards. + +Select a category to narrow the list of available services and learn more about a specific offering. + + diff --git a/docs/docs-content/devx/app-profile/services/service-listings/vault.md b/docs/docs-content/devx/app-profile/services/service-listings/vault.md new file mode 100644 index 0000000000..c39d85bdff --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/service-listings/vault.md @@ -0,0 +1,227 @@ +--- +sidebar_label: "Vault" +title: "Vault" +description: "Learn how to use Vault with Palette Dev Engine." +hide_table_of_contents: false +type: "appTier" +category: ['security'] +hiddenFromNav: false +sidebar_position: 40 +logoUrl: "https://icon-library.com/images/padlock-icon-png/padlock-icon-png-29.jpg" +--- + + + + +# Vault + +Palette Dev Engine (PDE) users can deploy Vault onto their virtual cluster using the out-of-the-box Vault offering. Vault deployed through PDE is using Banzai Cloud Bank-Vaults. Bank-Vaults is a wrapper for the official [Vault](https://www.vaultproject.io/) client. Vault is a tool that helps you securely manage and protect sensitive information, like passwords, API keys, and encryption keys. The Bank-Vaults client enhances the official Vault client by adding automatic token renewal, built-in Kubernetes support, and a dynamic database credential provider. + +Vault keeps these secrets safe by locking them in a virtual "vault" and only allows authorized users to access them. Vault also tracks who has accessed which secrets and when, making it easier to maintain security. You can use Vault to govern access to secrets, automate application delivery, and consume secrets programmatically. + +Vault is deployed behind the scenes through the use of the [Bank-Vaults Vault Operator Helm Chart](https://github.com/banzaicloud/bank-vaults/tree/main/charts/vault-operator). + +
+ +:::info + +Vault is deployed as a single container in the virtual cluster, and the container is not tied to any particular node. + +::: + + +# Deploy Vault + +Use the following steps to learn how to deploy Vault to your virtual cluster. + +## Prerequisites + +- A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). + + +- A Virtual Cluster with at least the following minimum resources. + - 4 CPU + - 6 GB Memory + - 6 GB Storage + + +- Kubernetes 1.6.x or greater. + +
+ +## Enablement + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. Click on the **User Menu** at top right, and select **Switch to App Mode**. + + + +3. Navigate to the left **Main Menu** and click on **App Profiles** to create a [new App Profile](../../create-app-profile.md). Provide the following basic information and click **Next**. + +| Parameter | Description | +|-----------------------------|-----------------| +|Application Profile Name | A custom name for the app profile.| +|Version (optional) | The default value is 1.0.0. You can create multiple versions of an app profile using the format `major.minor.patch`. +|Description (optional) | Description of the app profile. | +|Tag (optional) | Assign tags to the app profile.| + + +4. Select the **Vault** service and start the configuration. + + + +5. Provide the following information to the wizard: + * **Name:** The application name. + * **PersistentVolumeClaim Size (GiB):** Select the volume as per the storage volume available in the cluster group and virtual clusters. Ensure you do not exceed the maximum storage size for your virtual cluster. + + +6. Save your changes. + + +7. Deploy the app profile to a Palette Virtual Cluster. Use the [Deploy a Virtual Cluster](../../../../clusters/palette-virtual-clusters/deploy-virtual-cluster.md) guide for additional guidance or check out the [Deploy an Application using Palette Dev Engine](../../../apps/deploy-app.md) tutorial. + + +## Validate + +You can validate the Vault instance deployed successfully by using the following steps. + +1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. + + +2. Navigate to the left **Main Menu** and select **Apps**. + + +3. Select your application that contains Vault to view its details page. + + +4. Ensure the **Status** is **Deployed** and that the **Vault** service has a green dot next to it. + + +5. Next, click on the **Virtual Cluster** link in the App details page. + + +6. Click the URL to download the **kubeconfig**. + + +7. Set up your local kubectl environment to use the **kubeconfig** file you downloaded. Review the [Access Cluster with CLI](../../../../clusters/cluster-management/palette-webctl.md) guide for additional guidance. + + +8. Export the following environment variables to prepare your environment to interact with Vault. + +
+ + ```shell + export VAULT_ADDR=https://127.0.0.1:8200 + ``` + +
+ + ```shell + export VAULT_SKIP_VERIFY=true + ``` + +9. Configure port forwarding between your local workstation and the pod hosting Vault. Use the following commands to configure the port forward. + +
+ + ```shell + VAULT_NAMESPACE=$(kubectl get pods --selector app.kubernetes.io/name=vault --all-namespaces --output jsonpath='{.items[0].metadata.namespace}') && \ + kubectl port-forward $(kubectl get pods --selector app.kubernetes.io/name=vault --all-namespaces --output jsonpath='{.items[0].metadata.name}') 8200:8200 --namespace $VAULT_NAMESPACE + ``` + +
+ + ```shell + kubectl port-forward $(kubectl get pods --selector app.kubernetes.io/name=vault --all-namespaces --output jsonpath='{.items[0].metadata.name}') 8200:8200 --namespace $VAULT_NAMESPACE + ``` + +10. Open your browser and visit [https://localhost:8200/ui](https://localhost:8200/ui) to access the Vault UI. You will receive a warning due to the usage of a self-signed certificate but you can ignore this warning. + +To acquire the Vault root token, review the [Vault Credentials](vault.md#vault-credentials) section. + + +# Output Variables + +The exposed output variables. Use these variables when connecting higher-level services with Vault: + + + +| Parameter | Output Variable | Description | +|------------------------|-------------------------------------------------------------------------------------|-------------------------------------------------| +| Vault Root Token | `{{.spectro.app.$appDeploymentName..VAULT_ROOT_TOKEN}}` | The root token of the Vault instance. | +| Service Hostname | `{{.spectro.app.$appDeploymentName..VAULTMSTR_SVC}}` | The Kubernetes service hostname for the Vault service. | +| Service Port | `{{.spectro.app.$appDeploymentName..VAULTMSTR_SVC_PORT}}` | The exposed port for the Vault service. | +| Namespace | `{{.spectro.app.$appDeploymentName..VAULTMSTR_SVC_NAMESPACE}}` | The Kubernetes namespace the Vault instance is deployed to. | + + +# Vault Credentials + +The Vault root token and the unseal keys are stored as a Kubernetes secret inside the virtual cluster. You can retrieve the Vault root token by following these steps.

+ + +1. Log in to [Palette](https://console.spectrocloud.com) and switch to **App Mode**. + + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + + +3. Select the cluster that has Vault installed to view its details page. + + + +4. Download the cluster **kubeconfig** file. + + + +5. Set up your local kubectl environment to use the **kubeconfig** file you downloaded. Review the [Access Cluster with CLI](../../../../clusters/cluster-management/palette-webctl.md) guide for additional guidance. + + +6. You need to get the Vault namespace and application name. Issue the following command to get the unique values. + +
+ + ```shell + VAULT_NAMESPACE=$(kubectl get pods --selector app.kubernetes.io/name=vault --all-namespaces --output jsonpath='{.items[0].metadata.namespace}') && \ + APP_NAME=$(echo "$VAULT_NAMESPACE" | sed 's/-ns$//') + ``` + +2. Next, issue the command below to retrieve the Vault root token. + +
+ + ```shell + kubectl get secret $APP_NAME-unseal-keys --output jsonpath='{.data.vault-root}' --namespace $VAULT_NAMESPACE | base64 --decode + ``` + +3. To acquire all five unseal keys, use the following command. + +
+ + ```shell + kubectl get secret $APP_NAME-unseal-keys --namespace $VAULT_NAMESPACE --output json \ + | jq -r '.data | to_entries | .[] | select(.key | startswith("vault-unseal-")) | .value | @base64d + "\n"' + ``` + + + + +# Next Steps + +You can add Vault to your application profile and start integrating Vault with your applications. To learn more about integrating Vault with your applications, check out the [Vault App Integrations](https://developer.hashicorp.com/vault/tutorials/app-integration) tutorials from HashiCorp. + + +# Resources + + +- [Vault Documentation](https://developer.hashicorp.com/vault/docs) + + +- [HashiCorp Vault Tutorial](https://developer.hashicorp.com/vault/tutorials) + + +- [Bank-Vaults Vault Operator Helm Chart](https://github.com/banzaicloud/bank-vaults/tree/main/charts/vault-operator) \ No newline at end of file diff --git a/docs/docs-content/devx/app-profile/services/services.md b/docs/docs-content/devx/app-profile/services/services.md new file mode 100644 index 0000000000..c666d0ec80 --- /dev/null +++ b/docs/docs-content/devx/app-profile/services/services.md @@ -0,0 +1,51 @@ +--- +sidebar_label: "Services" +title: "Services" +description: "Palette Dev Engine App Services" +hide_table_of_contents: false +tags: ["devx", "app mode", "pde"] +--- + +Palette offers you different types of services to help you model all the dependencies and resources required to deploy an application. You can choose from several different service types in Palette. + + +## Container Deployment + +[Containers](https://www.docker.com/resources/what-container/) are methods of building, packaging, and deploying an application. A container includes the code, run-time, libraries, and all the dependencies required by a containerized workload. Containers are deployed to their target environment. For steps on how to deploy a container in Palette, refer to [Container Deployment](../container-deployment.md). + + +## Helm + +Palette provides out-of-the-box Helm registries and allows you to add registries. For more information, visit [Palette Helm Registry](../../../registries-and-packs/helm-charts.md). + + +## Manifest + +You can construct App Profile layers using raw manifests to provision Kubernetes resources that are unavailable in Palette or Helm Charts. Pack Manifests provide a pass-through mechanism to orchestrate Kubernetes resources in a cluster. For example, specific integrations may require the creation of secrets or Custom Resource Definitions (CRDs). To achieve this, you can attach a Manifest file to the layer. + +## Out-of-the-box Services + +Palette also offers a set of common services or resources that application authors frequently use to expand or add capabilities to an application. These services are managed by Palette and help reduce the burden of maintaining and deploying resources required by your application. + +### Messaging System Services + +A messaging system service is a platform that enables the exchange of messages between users. It allows people to send and receive messages in real time using different devices and communication channels. + +
+ +### Object Storage Services + +Object storage is a data storage solution for unlimited, unstructured data like images, videos, and backups. Uploaded data is managed as objects, not files or blocks, and is scalable and durable. + +
+ + +### Database Services + +A database stores structured data electronically for fast search and retrieval. It's commonly used for applications and websites to store information such as user data, transactions, and analytics. + +
+ +## Available Services + +Check out the available service offerings in Palette by visiting the [Service Listings](service-listings/service-listings.mdx) resource. diff --git a/docs/docs-content/devx/app-profile/versioning-app-profile.md b/docs/docs-content/devx/app-profile/versioning-app-profile.md new file mode 100644 index 0000000000..c3cd2c0e2e --- /dev/null +++ b/docs/docs-content/devx/app-profile/versioning-app-profile.md @@ -0,0 +1,167 @@ +--- +sidebar_label: "App Profile Versioning" +title: "App Profile Versioning" +description: "Learn about App Profile Versioning, what it is, how to create a version, and how to manage a version." +hide_table_of_contents: false +sidebar_position: 40 +tags: ["devx", "app mode", "pde"] +--- + + +Palette enables users to create multiple versions of an App Profile within the scope of a single profile name. The **Version** field of the app profile takes a semantic versioning format (only numbers supported) as below: + + **`major.minor.patch`** represented as: Version 1.1.2 + +App versioning is an optional field with a default value of **1.0.0** . The users can create multiple versions of an app profile under a single profile name and each of these versions can have its own pack configurations. + +Cluster profile versions are grouped under their unique names and their uniqueness is decided by the name and version within the scope and promotes backward compatibility to profile changes. + + **Example:** Profile-1 can have multiple versions like 1.0.0 and 2.0.1. These versions are grouped under the **App Profile Name** Profile-1. The menu next to the app profile name contains the different versions under that name. + + The version numbers can be edited from the **Settings > Edit Info** option from the App Profile page. While deleting the profile, select the version to be deleted. + +The new versions of the App Profile may: + +* Contain additional tiers + +* Drop existing tiers + +* Contain new versions of a tier + +* Update the configuration of the existing tiers + +:::info + +The following attributes are non-editable during versioning: + +* App Profile name and version number. New version numbers are created and existing version number can be deleted. + +* App Profile tier name and type. + +::: + + +## Apply Version to a Profile + + +### Prerequisites + +- An App Profile + +### Create Version + +1. Log in to [Palette](https://console.spectrocloud.com) + + +2. Select the **App Profiles** option from the left **Main Menu**. + + +3. Select the App Profile to be versioned. + + +4. From the drop-down menu next to the App Profile name, select the **Create New Version**. + + +5. Give the version number per the semantic format described above. + + +6. Click on **Confirm** to complete the wizard. The UI will return a versioning successful message. + +### Validate + +To validate the App Profile is versioned and available in the target project conduct the following steps: + +1. Log in to [Palette](https://console.spectrocloud.com) + + +2. Select the **App Profiles** option from the left **Main Menu**. + + +3. This page will list all the App Profiles available to you. In addition, this should list all the versioned App Profiles as well. Use the versioned App Profile for App deployment under the target scope. + +## Delete an App Profile + +### Prerequisites + +- An App Profile + +### Delete Profile + +1. Log in to [Palette](https://console.spectrocloud.com) + + +2. Select the **App Profiles** option from the left **Main Menu**. + + +3. This page will list all the App Profiles available to you. Select the App Profile to be deleted. + + +4. From the drop-down menu next to the App Profile Name, select the version to be deleted and click **Delete** to delete the profile. + + +5. The selected App Profile version will be deleted. + +### Validate + + +To validate the App Profile is removed and not available in the target project, conduct the following steps: + +1. Log in to [Palette](https://console.spectrocloud.com) + + +2. Select the **App Profiles** option from the left **Main Menu**. + + +3. Verify the app profile is not in the list of available profiles. + + +## Update an App Profile + +You can make changes to the app profile, such as version updates, manifest updates, app tier additions and removals. + +App Profile changes will generate an update notification on all the Apps that are created from the app profile. Update notifications include information about all the changes applied to the profile since the initial creation or since the previous update. You can apply the update to the Apps individually at any time. + +### Prerequisites + +- An App Profile + +### Apply Updates to the App + +To apply updates to an App follow the below steps: + +1. Log in to [Palette](https://console.spectrocloud.com) + + +2. Select the **App Profiles** option from the left **Main Menu**. + + +3. This page will list all the App Profiles available to you. Select the App Profile you want to update. + + +4. Make the desired changes. You can add or delete layers, change pack versions, change pack values, etc. and save your changes. + +5. Navigate to the left **Main Menu** and click on **Apps** + + +5. On the App page, apps eligible for an update will have an **Updates Available** badge. + + +* Click on the App with the update notification to start the **Apply** updates wizard. Click on **Apply** button. + + +* An **Apply Updates** wizard will open up with the update notification. The notification contains details about the updates that will be applied. Click the **Confirm** button to apply the updates to the app. + +### Validate + +To validate that the App profile updates are implemented on the target app, conduct the following steps: + +1. Log in to [Palette](https://console.spectrocloud.com) + + +2. Select the **Apps** option from the left **Main Menu**. + + +3. This page will list all the Apps. Click open the updated App. + + +4. Review the app profile details, which will include the applied updates. diff --git a/docs/docs-content/devx/apps/_category_.json b/docs/docs-content/devx/apps/_category_.json new file mode 100644 index 0000000000..455b8e4969 --- /dev/null +++ b/docs/docs-content/devx/apps/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 20 +} diff --git a/docs/docs-content/devx/apps/apps.md b/docs/docs-content/devx/apps/apps.md new file mode 100644 index 0000000000..ae1c969233 --- /dev/null +++ b/docs/docs-content/devx/apps/apps.md @@ -0,0 +1,24 @@ +--- +sidebar_label: "Apps" +title: "Apps" +description: "Explore Palette Dev Engine as Free Developers" +hide_table_of_contents: false +tags: ["devx", "app mode", "pde"] +--- + + +Applications are the combination of an [App Profile](../app-profile/app-profile.md) and a [Palette Virtual Cluster](../palette-virtual-clusters/palette-virtual-clusters.md). When you specify an application profile and deploy it to a virtual cluster, you create an application. + +Check out the resource links below to learn more about Apps. + + + +## Resources + +- [Create and Manage Apps](create-app.md) + + +- [App Logs](logs.md) + + +- [Deploy an Application using the Palette Dev Engine](deploy-app.md) \ No newline at end of file diff --git a/docs/docs-content/devx/apps/create-app.md b/docs/docs-content/devx/apps/create-app.md new file mode 100644 index 0000000000..9f0ea23199 --- /dev/null +++ b/docs/docs-content/devx/apps/create-app.md @@ -0,0 +1,81 @@ +--- +sidebar_label: "Create and Manage Apps" +title: "Create and Manage Apps" +description: "Learn how to create and manage an app in Palette Dev Engine." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["devx", "app mode", "pde"] +--- + +Use the following steps to create and deploy an app to a virtual cluster. + + +## Prerequisite + +- An application profile. Use the guide [Create an App Profile](../app-profile/create-app-profile.md) to learn how to create an app profile. + + +:::info + +A tutorial is available to help you learn how to use Palette Dev Engine by deploying an application. Check out [Deploy an Application using Palette Dev Engine](deploy-app.md) to get started with Palette Dev Engine. + +::: + +## Create a New App + + +1. Login to [Palette](https://console.spectrocloud.com). + + + +2. Navigate to the top right **User Menu** and select **Switch to App Mode**. + + + +3. Select the **Apps** from the left **Main Menu** and click on **New App**. + + + +4. Next, provide the following information to the app creation wizard. + + * **Application name:** A custom name for the application. + + * **App Profile**: Select an app profile from the existing list by clicking **Select App Profile**. + + + +5. Choose a Virtual Cluster deployment option. You have two options available. + + - **Deploy In A Palette Virtual Cluster** + + - **Deploy In An Existing Palette Virtual Cluster** + + Create a new virtual cluster or select an existing one from the available list, depending on your choice + + +6. Click on **Create an application** to complete the application wizard. + + + +The application will begin the deployment process. This may take a few minutes, depending on the number of layers and types of applications specified in the app profile. + + + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. Navigate to the top right **User Menu** and select **Switch to App Mode**. + + + +3. Select the **Apps** from the left **Main Menu** and click on **New App**. + + + +4. Review the list and select your application to view the details page. + + +5. Ensure the **Status** is marked as **Deployed**. diff --git a/docs/docs-content/devx/apps/deploy-app.md b/docs/docs-content/devx/apps/deploy-app.md new file mode 100644 index 0000000000..a4895f4dcf --- /dev/null +++ b/docs/docs-content/devx/apps/deploy-app.md @@ -0,0 +1,1288 @@ +--- +sidebar_label: "Deploy an Application using Palette Dev Engine" +title: "Deploy an Application using Palette Dev Engine" +description: "Learn how to deploy applications to a Kubernetes cluster without the traditional overhead accompanied by Kubernetes. Palette’s App Mode reduces the deployment time and complexity when deploying applications to Kubernetes. Learn how to get started with Palette’s App Mode in this tutorial. Get started with the free tier of Palette App Mode" +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["devx", "app mode", "pde", "tutorial"] +--- + +Palette’s mission is to reduce the challenges you, as a user, face when interacting with Kubernetes. Whether you are a system administrator or an application developer, Kubernetes can introduce overhead that slows down the development process. One of Palette’s core components, *Dev Engine*, focuses on reducing the application development time by enabling builders to deploy applications to Kubernetes with minimal friction. + +This tutorial will teach you how to deploy single and multiple applications to Kubernetes through Palette’s Dev Engine experience. You will learn about *App Mode*, *App Profiles*, and *Palette Virtual Clusters* and understand how they enable you to deploy applications to Kubernetes quickly with minimal overhead. + +## Prerequisites + +To complete this tutorial, you will need the following items. + +- A Spectro Cloud account +- Basic knowledge about containers. + +If you select the Terraform workflow, you will need the following software installed. +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) or another container management tool. + +There are no expenses associated with this tutorial as everything falls under the Palette Free Tier. + + +## Architecture + +The tutorial includes two scenarios, and for each scenario, you will deploy a separate Kubernetes environment. The following diagram illustrates the different layers that will power the tutorial environment. + +![Architecture diagram depicting two virtual clusters](/tutorials/deploy-app/devx_apps_deploy-apps_architecture-diagram.png) + +The top layer is Palette, which is the product platform. Palette can be used in two modes: app mode or cluster mode. Each mode is intended for different use cases and personas, but for this tutorial, you will use app mode. For an in-depth explanation of each mode’s differences, check out the [App Mode and Cluster Mode](../../introduction/palette-modes.md) documentation. + +## Deploy The Environment + +The following steps will guide you through deploying the two scenarios. You will start with the single application scenario to build up your knowledge before deploying the multiple applications scenario. + +From Palette, you will deploy two Palette Virtual Clusters. Palette Virtual Clusters will be referred to as virtual clusters for the rest of the tutorial. Each virtual cluster will be hosted on a host cluster group managed by us, Spectro Cloud, called *beehive*. You can deploy up to two virtual clusters in the beehive group for free. Each scenario’s virtual cluster will sit on the beehive host cluster group. + +
+ +:::info + + +Virtual clusters are standalone Kubernetes environments that sit on top of what you would consider a traditional Kubernetes cluster or host cluster. Palette Virtual Clusters are Kubernetes clusters that run as nested clusters within an existing host cluster and share the host cluster resources, such as CPU, memory, and storage. Palette Virtual Clusters use k3s, a highly available, certified Kubernetes distribution designed for production workloads. Palette Virtual Clusters are also powered by vCluster. + +::: + +You can complete this tutorial by using the Palette console, simulating a manual workflow. Or you may leverage infrastructure as code and complete the tutorial using Terraform. + +- [UI Workflow](#ui-workflow) + +- [Terraform Workflow](#terraform-workflow) + +
+ + +## UI Workflow + + +Start by logging in to Palette. From the landing page, click on the user **drop-down Menu** and click on **App Mode**. + + + +![Image with an arrow pointing to the user drop-down Menu](/tutorials/deploy-app/devx_apps_deploy-apps_toggle-app-mode.png) + + + + +From the app mode landing page, navigate to the left **Main Menu** and click on **Virtual Clusters**. Next, click on the button **New Virtual Cluster**. + + + +![View of the virtual cluster list](/tutorials/deploy-app/devx_apps_deploy-apps_virtual-cluster-list.png) + + + +In the following screen, you will be prompted for the cluster group, virtual cluster name, and the cluster size in terms of CPU, memory, and storage. Select beehive for the cluster group, name the cluster `cluster-1`, and allocate 4 CPU, 4 GiB memory, and 2 GiB of storage. Click on **Deploy Virtual Cluster** after you have filled out all the required information. + + + +Palette Dev Engine allows you to deploy up to two virtual clusters into the beehive cluster group. Each virtual cluster requires a minimum of 4 CPU, 4 GiB memory, and 2 GiB storage. When using the beehive cluster, you can allocate a maximum of 12 CPU, 16 Gib memory, and 20 GiB of storage. Check out the [Palette Dev Engine and Quotas](../manage-dev-engine/resource-quota.md) documentation to learn more about limits. + + + +It will take a few minutes for the virtual cluster to deploy. In the meantime, navigate to the left **Main Menu** and click on **App Profiles**. + + + + +![The App Profile page with arrows guiding](/tutorials/deploy-app/devx_apps_deploy-apps_app-profiles.png) + + + + +App Profiles are templates that contain all the configurations and settings required to deploy applications to virtual clusters. App Profiles provide a way to drive consistency across virtual clusters as you can re-use app profiles and deploy them to different virtual clusters. You can think of app profiles as declarative templates that inform the Kubernetes cluster of the desired application or set of applications. + + + +Click on the **New App Profile** button to start creating your first app profile. Give the app profile the name `hello-universe-ui` and add the tag `scenario-1`. Click on **Next**. The following screen is the service type selection page. You have the option to deploy applications through containers, Helm, or Manifests. You can also consume services such as databases and more. Click on **Container Deployment**. + + + +Name the container `ui`, select a public registry, and provide the image URL `ghcr.io/spectrocloud/hello-universe:1.0.12`. Change the network access to **Public** and add the port `8080`. + + + +![App Profile container creation page with details](/tutorials/deploy-app/devx_apps_deploy-apps_app-profile-creation.png) + + + + +Click on **Review** once you have filled out the provided information. On the next page, click on the **Deploy New App** button. + + + +It’s time to deploy your application to a virtual cluster. Name the application `single-scenario`. For the **App profile** input field, click on the button to the right of the input field to get a list of all your available app profiles. Select the **hello-universe-ui profile** and click on **Confirm**. + + + +Next, click the radio button **Deploy in An Existing Palette Virtual Cluster**. Select **cluster-1** and click on **Create App** to deploy the app profile onto the virtual cluster. + + + +
+ + + +:::caution + + + +If no clusters are displayed, then **cluster-1** is not yet available. Wait a few more moments and return to the above steps. You can refresh the page, but you must fill out all the required input fields. + + + +::: + + + +The app profile deployment takes a few moments to finish. You can review the application's deployment progress by navigating to the left **Main Menu** and selecting **Virtual Clusters**. Click on **cluster-1** to view its details page. You can review cluster information, log events, access a remote shell session in the cluster, and more from the cluster details page. + + + +![Cluster details view displaying exposed services](/tutorials/deploy-app/devx_apps_deploy-apps_cluster-details-view.png) + + + +When the application is deployed and ready for use, the **Services** row on the details page will automatically be updated by Palette with the app's public-facing URL. Click on the **:8080** link to view the application. + + + +
+ + + + +:::caution + + + + +It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. + +::: + + + + +![Hello Universe landing page displaying global clicks](/tutorials/deploy-app/devx_apps_deploy-apps_hello-universe.png) + + + +Welcome to [Hello Universe](https://github.com/spectrocloud/hello-universe), a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the global counter and for a fun image change. + + + +You have deployed your first application to Palette. Your first application is a single container application with no upstream dependencies. In a production environment, you often deploy applications that consume other services and require connectivity with other resources. The next scenario expands on the single application scenario by adding an API server and Postgres database to simulate a common application architecture encountered in a production environment. + + + +### Deploy Multiple Applications + + + +Create another virtual cluster for the multi-application scenario. From the app mode landing page, navigate to the left **Main Menu** and click on **Virtual Clusters**. Next, click on the **New Virtual Cluster** button. + + + +Add the following details. Select beehive for the cluster group, name the cluster **cluster-2**, add the tag **scenario-2**, and allocate 8 CPU, 12 GiB memory, and 12 GiB of storage. Click on **Deploy Virtual Cluster** after you have filled out all the required information. + + + +It will take a few minutes for the new virtual cluster to deploy. In the meantime, go ahead and navigate to the left **Main Menu** and click on **App Profiles**. + + + +### Postgres + + + +Click on the **New App Profile** button to create your second app profile. Give the app profile the name `hello-universe-complete` and add the tag `scenario-2`. Click on **Next**. This application profile will contain three different applications, and you will create a service configuration for each. The three layers or tiers will together make up the entire application deployment. The order in which you create each layer plays an important role, as it dictates the deployment order. For this scenario, you will deploy the database, the API, and the UI. To create the first layer, select the database service Postgres. + + + + +In the next screen, assign the following values to the Postgres database. + + + +- Name: `postgres-db` + +- Username: `pguser` + +- Database Name: `counter` + +- Database Volume Size: `2` + +- Version: `14` + + + +![Postgres service creation page](/tutorials/deploy-app/devx_apps_deploy-apps_postgres-service-create.png) + + + +Take note of the **Output Variables** section. The Postgres service exposes several output variables to help other applications connect with the database. In the next section, you will use these output variables and other output variables that Palette exposes for each service. You can learn more about output variables by reviewing the app profile [output variables](../app-profile/app-profile-macros.md) documentation. + + + +Next, navigate to the top left side of the wizard screen and click on the **Actions** button **+**. Go ahead and select **Container Deployment**. + + + +### API + + + +The API is available as a container image. To deploy the API successfully, you need to provide the API server with information about the database such as hostname, database user, database name, and password. The required information can be retrieved using Palette's global output variables and the output variables the database service exposes. + + + +Provide the container service with the following information: + + + +- Container Name: `api` + +- Registry: Public + +- Image: `ghcr.io/spectrocloud/hello-universe-api:1.0.8` + +- Network Access: Public + +- Ports: `3000` + + + +Assign the following environment variables to the API service: + + + +| Parameter | Value | +|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `DB_NAME` | `counter` | +| `DB_HOST` | `{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}` | +| `DB_PASSWORD` | `{{.spectro.app.$appDeploymentName.postgres-db.PASSWORD}}` | +| `DB_INIT` | `true` | +| `DB_USER` | `{{.spectro.app.$appDeploymentName.postgres-db.USERNAME}}` | +| `DB_ENCRYPTION` | `require` | +| `AUTHORIZATION` | `true` | + + + + + +You can learn more about each environment variable's purpose by reviewing the API server's [documentation](https://github.com/spectrocloud/hello-universe-api#environment-variables). One variable that you should understand in greater detail is the `DB_HOST.` The value of this environment variable is constructed using the output variables the Postgres service exposed. The `{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}` variable contains the Kubernetes DNS value of the Postgres service container. + +
+ +:::info + +To learn more about connecting different service layers, refer to the [Service Connectivity](../app-profile/services/connectivity.md) resource. + +::: + + + +A virtual cluster is a Kubernetes environment, and because it’s a Kubernetes environment, you can use the [Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) record created for each service and pod. You will have another opportunity to practice this concept when you deploy the UI. + + + +When you have filled out all the required information, navigate to the top left side of the wizard screen and click on the **Actions** button **+**. Select the **Container Deployment** to add the final service layer, the UI. + + + +### UI + + + +This time the UI will point to the API server that you manage. The API server has authentication enabled, so to ensure all API requests are accepted you will provide the UI with the anonymous token. + + + +![A diagram of the reverse proxy architecture](/tutorials/deploy-app/devx_apps_deploys-apps_reverse-proxy-diagram.png) + + + +Provide the UI container with the following information. + +- Container Name: `ui` + +- Registry: Public + +- Image: `ghcr.io/spectrocloud/hello-universe:1.0.12` + +- Network Access: Public + +- Ports: `8080` + + + +Assign the following environment variables to the UI service: + + + +| Parameter | Value | +|-----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `API_URI` | `http://{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}:3000` | +| `TOKEN` | `931A3B02-8DCC-543F-A1B2-69423D1A0B94` | + + + +If you want to explore the UI service's environment variables in greater detail, you can review the UI [documentation](https://github.com/spectrocloud/hello-universe). The `API_URI` contains the address of the application load balancer that will be deployed for the API service. + +The output variable `{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}` is used to retrieve the load balancer URL value. + + +Click on the **Review** button at the bottom of the screen to finalize the app profile. Click on **Deploy New App** in the following screen to deploy the new app profile to cluster-2. + + + +Name the app `multiple-app-scenario`, select the app profile **hello-universe-complete**, pick version **1.0.0** and toggle the radio button **Deploy In An Existing Palette Virtual Cluster**. Select **cluster-2** and click on **Create App**. + + +
+ + + +:::caution + + + +If cluster-2 is not displayed. Wait a few more moments and return to the above steps. You can refresh the page but you must fill out all the required input fields. + +::: + + + + + +![App deployment cluster-2](/tutorials/deploy-app/devx_app_deploy-apps_cluster-2-deploy-app.png) + + + + +The app profile deployment takes a few moments to finish. You can review the application's deployment progress by navigating to the left **Main Menu** and selecting **Virtual Clusters**. Click on **cluster-2** to view its details page. + +Once the app is successfully deployed, the cluster details page will expose the public-facing URLs of the services. + + + +![Cluster 2's details page](/tutorials/deploy-app/devx_apps_deploy-apps_cluster-2-details-page.png) + + + +Click on the UI’s service URL for port **8080** to access the Hello Universe application in a three-tier configuration. + + + + + +![View of the self-hosted version of Hello Universe](/tutorials/deploy-app/devx_apps_deploy-app_self-hosted-hello-universe.png) + + + + +The global counter is no longer available. Instead, you have a counter that starts at zero. Each time you click on the center image, the counter is incremented and stored in the Postgres database along with metadata. Also, remember that the reverse proxy injects the Bearer token value in each request sent to the API. + + + +### Cleanup + + + +To remove all resources created in this tutorial, begin by navigating to the left **Main Menu** and select **Apps**. For each application, click on the **three-dots Menu** to expand the options menu and click on the **Delete** button. Repeat this process for each application. + + + +![Apps view with an arrow pointing towards the delete button](/tutorials/deploy-app/devx_apps_deploy-apps_delete-apps-view.png) + + + +Next, in the left **Main Menu**, click on the **Cluster** link to access the clusters page. + +Click on **cluster-1** to access its details page. Click on **Settings** from the details page to expand the settings menu. Click on **Delete** to delete the cluster. You will be asked to enter the cluster name to confirm the delete action. Type the cluster name to proceed with the delete step. Repeat this process for cluster-2. + + ![Delete a cluster view with arrow](/tutorials/deploy-app/devx_apps_deploy-apps_delete-cluster-view.png) + +:::info + +If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for **Force Delete**. To trigger a force delete, navigate to the respective cluster’s details page and click on **Settings**. Click on the **Force Delete Cluster** to delete the cluster. Palette will automatically remove clusters stuck in the cluster deletion phase for over 24 hours. + +::: + +
+ + +## Terraform Workflow + + +The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider enables you to create and manage Palette resources in a codified manner by leveraging Infrastructure as Code (IaC). There are many reasons why you would want to utilize IaC. A few reasons worth highlighting are: the ability to automate infrastructure, improve collaboration related to infrastructure changes, self-document infrastructure through codification, and track all infrastructure in a single source of truth. If you need to become more familiar with Terraform, check out the [Why Terraform](https://developer.hashicorp.com/terraform/intro) explanation from HashiCorp. + +
+ +:::info + +As you go through the Terraform workflow, be aware that high-level concepts from Palette will not be discussed in-depth to optimize the reader experience and focus more on the Terraform concepts that apply to Palette. To better understand the mentioned Palette concepts, review the UI workflow where the concepts are explained in greater detail. + +::: + +
+ + + + + + +Ensure Docker Desktop on your local machine is available. Use the following command and ensure you receive an output displaying the version number. + +
+ +```bash +docker version +``` + +Download the tutorial image to your local machine. +
+ +```bash +docker pull ghcr.io/spectrocloud/tutorials:1.0.4 +``` + +Next, start the container, and open a bash session into it. + +
+ +```shell +docker run --name tutorialContainer --interactive --tty ghcr.io/spectrocloud/tutorials:1.0.4 bash +``` + +Navigate to the tutorial code. + +
+ +```shell +cd terraform/hello-universe-tf/ +``` + +
+ + + + +Open a terminal window to begin the tutorial and download the tutorial code from GitHub. + +
+ +```shell +git@github.com:spectrocloud/tutorials.git +``` + +Change directory to the tutorial folder. + +
+ +```shell +cd tutorials/ +``` + +Check out the following git tag. + +
+ +```shell +git checkout v1.0.4 +``` + +Change directory to the tutorial code. + +
+ +```shell +cd terraform/hello-universe-tf/ +``` + + +
+
+ +Before you can get started with the Terraform code, you need a Spectro Cloud API key. +### API Key + +To create an API key, log in to Palette, and click on the user **User Menu** and select **My API Keys**. + +![Image that points to the user drop-down Menu and points to the API key link](/tutorials/deploy-app/devx_apps_deploy-app_create-api-key.png) + +Next, click on **Add New API Key**. Fill out the required input field, **API Key Name**, and the **Expiration Date**. Click on **Confirm** to create the API key. Copy the key value to your clipboard, as you will use it shortly. + + +### Initialize Terraform + +The tutorial folder contains several Terraform files that you should review and explore. Each file is named after the respective type of Palette resource it supports. Use the following list to gain a high-level overview of the files. + +
+ +- **provider.tf** - the provider configuration and version of the provider. +- **inputs.tf** - contains all the Terraform variables and the default values used in the tutorial. +- **outputs.tf** - contains the output variables that are used to expose information. +- **data.tf** - all the data resources that are used to dynamically retrieve data from Palette. +- **virtual-clusters.tf** - the code for the virtual clusters that will be deployed in Palette. +- **application-profiles.tf** - contains the configurations that make up all the app profiles. +- **application.tf** - the configuration that creates a Spectro Cloud app and deploys the app into a virtual cluster. + +The [Spectro Cloud Terraform](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) provider requires credentials to interact with the Palette API. Export the API key as an environment variable so that the Spectro Cloud provider can authenticate with the Palette API. + +```shell +export SPECTROCLOUD_APIKEY=YourAPIKeyHere +``` + +Next, initialize the Terraform provider by issuing the following command. + +```shell +terraform init +``` + +```shell +Initializing the backend... + +Initializing provider plugins... + +Terraform has been successfully initialized! + +You may now begin working with Terraform. Try running "terraform plan" to see +any changes that are required for your infrastructure. All Terraform commands +should now work. + +If you ever set or change modules or backend configuration for Terraform, +rerun this command to reinitialize your working directory. If you forget, other +commands will detect it and remind you to do so if necessary. +``` + +The `init` command downloads all the required plugins and providers specified in **provider.tf** file. In the provider configuration, the scope or context of Palette is set. The provider is configured for the `Default` project, but you can change this value to point to any other projects you may have in Palette. + +
+ +```hcl +terraform { + required_providers { + spectrocloud = { + version = ">= 0.11.1" + source = "spectrocloud/spectrocloud" + } + } +} + +provider "spectrocloud" { + project_name = "Default" +} +``` + +To deploy the first scenario, a single application container, you must first create a configuration for the virtual cluster. Look at the virtual cluster resources in **virtual-clusters.tf**, and check out the "cluster-1" resource. The resource specifies the cluster name, the cluster group id, the resource limits, and the tags that will apply to the cluster. + +
+ +```hcl +resource "spectrocloud_virtual_cluster" "cluster-1" { + name = var.scenario-one-cluster-name + cluster_group_uid = data.spectrocloud_cluster_group.beehive.id + + resources { + max_cpu = 4 + max_mem_in_mb = 4096 + min_cpu = 0 + min_mem_in_mb = 0 + max_storage_in_gb = "2" + min_storage_in_gb = "0" + } + + tags = concat(var.tags, ["scenario-1"]) + + timeouts { + create = "15m" + delete = "15m" + } +} +``` + +The cluster group id is retrieved from the data resource `spectrocloud_cluster_group.beehive`. The data resource will query the Palette API and retrieve information about the specified cluster group, which is the *beehive* cluster group made available for all Palette users. This resource will create a new virtual cluster that is hosted in the *beehive* cluster group. + +
+ +```hcl +data "spectrocloud_cluster_group" "beehive" { + name = var.cluster-group-name + context = "system" +} +``` + +Next, take a look at the **application-profiles.tf** file. The resource `spectrocloud_application_profile.hello-universe-ui` is the resource responsible for creating the app profile for the first scenario. There are several points of interest in this resource that you should be familiar with. Focus on these five key points: + +
+ +1. The pack object represents a single tier or layer in the app profile. Inside the pack object, you define all the attributes that make up the specific layer of the app profile. + + +2. The type of app layer. This application is hosted on a container image. Therefore a container pack is specified. Instead of hard coding the value, the data resource `data.spectrocloud_pack_simple.container_pack` is specified. + + +3. A pack requires a registry id. To create the app profile, Terraform needs to know what registry is hosting the pack. For containers, you can use the `Public Repo` hosting most of the Palette packs. This time the data resource `data.spectrocloud_registry.public_registry` is specified to avoid hardcoding values. + + +4. The attribute `source_app_tier` is used to specify the unique id of the pack. All packs are assigned a unique id, including different versions of a pack. To ensure the correct pack is selected, the data resource `data.spectrocloud_pack_simple.container_pack` is used. + + +5. The `values` attribute is used to specify the properties of the specific service. In this case, the properties of the container such as the image name, ports, and service type, are specified. These properties can be provided as an extended string using the [Terraform Heredoc strings](https://developer.hashicorp.com/terraform/language/expressions/strings#heredoc-strings), or you can specify these values as a stringified JSON object. + + + + +```hcl +resource "spectrocloud_application_profile" "hello-universe-ui" { + name = "hello-universe-ui" + description = "Hello Universe as a single UI instance" + version = "1.0.0" + pack { + name = "ui" + type = data.spectrocloud_pack_simple.container_pack.type + registry_uid = data.spectrocloud_registry.public_registry.id + source_app_tier = data.spectrocloud_pack_simple.container_pack.id + values = <<-EOT + pack: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" + postReadinessHooks: + outputParameters: + - name: CONTAINER_NAMESPACE + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: metadata.namespace + - name: CONTAINER_SVC + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: metadata.annotations["spectrocloud.com/service-fqdn"] + - name: CONTAINER_SVC_EXTERNALHOSTNAME + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: status.load balancer.ingress[0].hostname + conditional: true + - name: CONTAINER_SVC_EXTERNALIP + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: status.load balancer.ingress[0].ip + conditional: true + - name: CONTAINER_SVC_PORT + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: spec.ports[0].port + containerService: + serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" + registryUrl: "" + image: ${var.single-container-image} + access: public + ports: + - "8080" + serviceType: load balancer + EOT + } + tags = concat(var.tags, ["scenario-1"]) +} +``` + + + + +A tip for gathering the required values to provide the `values` attribute is to visit the Palette console and create the app profile through the UI. During the app profile creation process, click on the API button to display the API payload. Review the payload's `values` attribute to find all of the properties of the service. You can copy the entire string and pass it to the resource `spectrocloud_application_profile` as an input for the `values` attribute. + + +![UI's ability to display the API object](/tutorials/deploy-app/devx_apps_deploy-apps_ui-api-display.png) + + +The last Terraform resource to review before deploying the application is located in the **application.tf** file. The resource `spectrocloud_application.hello-universe-ui` is what creates the *app*. In Palette, an app combines a virtual cluster and an app profile. When you deploy an app profile into a virtual cluster, you create an app. This resource points to the app profile `spectrocloud_application_profile.hello-universe-ui` and the cluster resource `spectrocloud_virtual_cluster.cluster-1`. The two resources are required to create an app. + +
+ + + + + +```hcl +resource "spectrocloud_application" "scenario-1" { + name = "single-scenario" + application_profile_uid = spectrocloud_application_profile.hello-universe-ui.id + + config { + cluster_name = spectrocloud_virtual_cluster.cluster-1.name + cluster_uid = spectrocloud_virtual_cluster.cluster-1.id + } + tags = concat(var.tags, ["scenario-1"]) +} +``` + + + +You can preview the resources Terraform will create by issuing the following command. + +```shell +terraform plan +``` + +``` +// Output condensed for readability +Plan: 3 to add, 0 to change, 0 to destroy. +``` + +The output displays the resources Terraform will create in an actual implementation. If you review the output, you will find the three resources previously discussed in great detail. + +Go ahead and deploy the application by using the `terraform apply` command. + +```shell +terraform apply -auto-approve +``` + +``` +// Output condensed for readability +Apply complete! Resources: 3 added, 0 changed, 0 destroyed. +``` + +Log in to [Palette](https://console.spectrocloud.com), navigate to the left **Main Menu**, and select **Apps**. Click on the **scenario-1** row, which takes you to the application’s overview page. Once you are on the scenario-1 overview page, click on the exposed URL for the service. A hyperlink for port 8080 is available. + + +![scenario-1 overview page with an arrow pointing to the URL](/tutorials/deploy-app/devx_app_deploy-apps_scenario-1-overview.png) + +
+ +:::caution + + +It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. + +::: + +Welcome to Hello Universe, a demo application to help you learn more about Palette and its features. Feel free to click on the logo to increase the global counter and for a fun image change. + + +![Hello Universe landing page displaying global clicks](/tutorials/deploy-app/devx_apps_deploy-apps_hello-universe.png) + + +You have deployed your first app profile to Palette. Your first application is a single container application with no upstream dependencies. In a production environment, you often deploy applications that consume other services and require connectivity with other resources. The following scenario expands on the single application scenario by adding an API server and Postgres database to simulate a common application architecture encountered in a production environment. + + +### Deploy Multiple Applications + + +The second scenario contains two additional microservices, an API, and a Postgres database. This time, instead of using a the global API for storing clicks, you will instead deploy your own API server and Postgres database. The following diagram illustrates the network connectivity path and behavior discussed. + + +![A diagram of the three-tier architecture where the load balancer forwards all requests to the UI container OR the API container](/tutorials/deploy-app/devx_apps_deploys-apps_reverse-proxy-diagram.png) + +To deploy the second scenario, you will again deploy the same three resource types previously discussed but another instance of them. + +- `spectrocloud_virtual_cluster` - `cluster-2` - this resource will create the second virtual cluster. + + +- `spectrocloud_application_profile` - `hello-universe-complete` - the application profile that will contain the three different services, database, API, and UI. + + +- `spectrocloud_application` - `scenario-2` - the application that will be deployed into cluster-2 that uses the `spectrocloud_application_profile.hello-universe-complete` app profile. + + +You can review all the resources for the second scenario in the respective Terraform files. You can find the second scenario code after the comment block in all of the files that have resources specific to the second scenario. + +```hcl +########################################## +# Scenario 2: Multiple Applications +########################################## +``` + + +From a Terraform perspective, there are no significant differences in the authoring experience. The main difference in the second scenario lies in the application profile resource `spectrocloud_application_profile.hello-universe-complete`. The other difference is that the virtual cluster you will deploy in the second scenario, cluster-2, is much larger than cluster-1. + +You can add multiple services to an app profile, but you must add a `pack {}` block for each service in the `spectrocloud_application_profile` resource. Take a close look at the `spectrocloud_application_profile.hello-universe-complete` resource below. + +
+ +```hcl +resource "spectrocloud_application_profile" "hello-universe-complete" { + count = var.enable-second-scenario == true ? 1 : 0 + name = "hello-universe-complete" + description = "Hello Universe as a three-tier application" + version = "1.0.0" + pack { + name = "postgres-db" + type = data.spectrocloud_pack_simple.postgres_service.type + source_app_tier = data.spectrocloud_pack_simple.postgres_service.id + properties = { + "dbUserName" = var.database-user + "databaseName" = var.database-name + "databaseVolumeSize" = "8" + "version" = var.database-version + } + } + pack { + name = "api" + type = data.spectrocloud_pack_simple.container_pack.type + registry_uid = data.spectrocloud_registry.public_registry.id + source_app_tier = data.spectrocloud_pack_simple.container_pack.id + values = <<-EOT +pack: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" +postReadinessHooks: + outputParameters: + - name: CONTAINER_NAMESPACE + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: metadata.namespace + - name: CONTAINER_SVC + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: metadata.annotations["spectrocloud.com/service-fqdn"] + - name: CONTAINER_SVC_EXTERNALHOSTNAME + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: status.load balancer.ingress[0].hostname + conditional: true + - name: CONTAINER_SVC_EXTERNALIP + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: status.load balancer.ingress[0].ip + conditional: true + - name: CONTAINER_SVC_PORT + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: spec.ports[0].port +containerService: + serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" + registryUrl: "" + image: ${var.multiple_container_images["api"]} + access: public + ports: + - "3000" + serviceType: load balancer + env: + - name: DB_HOST + value: "{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}" + - name: DB_USER + value: "{{.spectro.app.$appDeploymentName.postgres-db.USERNAME}}" + - name: DB_PASSWORD + value: "{{.spectro.app.$appDeploymentName.postgres-db.PASSWORD}}" + - name: DB_NAME + value: counter + - name: DB_INIT + value: "true" + - name: DB_ENCRYPTION + value: "${var.database-ssl-mode}" + - name: AUTHORIZATION + value: "true" + EOT + } + pack { + name = "ui" + type = data.spectrocloud_pack_simple.container_pack.type + registry_uid = data.spectrocloud_registry.public_registry.id + source_app_tier = data.spectrocloud_pack_simple.container_pack.id + values = <<-EOT + pack: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" + postReadinessHooks: + outputParameters: + - name: CONTAINER_NAMESPACE + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: metadata.namespace + - name: CONTAINER_SVC + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: metadata.annotations["spectrocloud.com/service-fqdn"] + - name: CONTAINER_SVC_EXTERNALHOSTNAME + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: status.load balancer.ingress[0].hostname + conditional: true + - name: CONTAINER_SVC_EXTERNALIP + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: status.load balancer.ingress[0].ip + conditional: true + - name: CONTAINER_SVC_PORT + type: lookupSecret + spec: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + secretName: "{{.spectro.system.appdeployment.tiername}}-custom-secret" + ownerReference: + apiVersion: v1 + kind: Service + name: "{{.spectro.system.appdeployment.tiername}}-svc" + keyToCheck: spec.ports[0].port + containerService: + serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" + registryUrl: "" + image: ${var.multiple_container_images["ui"]} + access: public + ports: + - "8080" + env: + - name: "API_URI" + value: "http://{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}:3000" + - name: "TOKEN" + value: "${var.token}" + serviceType: load balancer + EOT + } + tags = concat(var.tags, ["scenario-2"]) +} +``` + +Each service has its own `pack {}` and a set of unique properties and values. + +The database service block uses a different data resource, `data.spectrocloud_pack_simple.postgres_service`, to find the Postgres service. If you review the data resource, you will find a different type, `operator-instance`. The Postgres service uses a Postgres [operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) to manage the database inside the virtual cluster. + +
+ +``` +data "spectrocloud_pack_simple" "postgres_service" { + name = "postgresql-operator" + type = "operator-instance" + version = "1.8.2" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +Inside the `pack {}` block, the database services uses the `properties` attribute instead of the `values` attribute. The `properties` values provided are the same properties you must fill out when creating the database service through the UI workflow. + +
+ +```hcl + pack { + name = "postgres-db" + type = data.spectrocloud_pack_simple.postgres_service.type + source_app_tier = data.spectrocloud_pack_simple.postgres_service.id + properties = { + "dbUserName" = var.database-user + "databaseName" = var.database-name + "databaseVolumeSize" = "8" + "version" = var.database-version + } + } +``` + +If you go further down the app profile stack, you will find the `pack {}` object for the API. A good part of the content provided to the `values` attribute will be removed in the following code snippet to improve readability. Take a closer look at the `env` block inside the `containerService` section. The API server requires a set of environment variables to start properly, such as the database hostname, user, password, and more. The Postgres service lower in the app profile stack exposes output variables you can use to provide information to other services higher up in the app profile stack. + +The `env` section uses the output variables exposed by the Postgres service. Other environment variables specified will be populated during Terraform runtime because they reference Terraform variables. Palette will populate the environment variables referencing a Palette output variable at runtime inside the virtual cluster. + +
+ +```hcl +pack { + name = "api" + type = data.spectrocloud_pack_simple.container_pack.type + registry_uid = data.spectrocloud_registry.public_registry.id + source_app_tier = data.spectrocloud_pack_simple.container_pack.id + values = <<-EOT +pack: + namespace: "{{.spectro.system.appdeployment.tiername}}-ns" + releaseNameOverride: "{{.spectro.system.appdeployment.tiername}}" +postReadinessHooks: + outputParameters: + #.... + #... +containerService: + serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" + registryUrl: "" + image: ${var.multiple_container_images["api"]} + access: public + ports: + - "3000" + serviceType: load balancer + env: + - name: DB_HOST + value: "{{.spectro.app.$appDeploymentName.postgres-db.POSTGRESMSTR_SVC}}" + - name: DB_USER + value: "{{.spectro.app.$appDeploymentName.postgres-db.USERNAME}}" + - name: DB_PASSWORD + value: "{{.spectro.app.$appDeploymentName.postgres-db.PASSWORD}}" + - name: DB_NAME + value: counter + - name: DB_INIT + value: "true" + - name: DB_ENCRYPTION + value: "${var.database-ssl-mode}" + - name: AUTHORIZATION + value: "true" + EOT + } +``` + +The last `pack {}` block in the app profile resource `spectrocloud_application_profile.hello-universe-complete` is for the UI. Like the API service, environment variables are used to initialize the UI and the reverse proxy. The UI service requires the URL of the API service and the URL of the public-facing load balancer. Palette output variables are used to populate these two environment variables. A Terraform variable will populate the authentication token required for all API requests. + +
+ +```hcl +pack { + name = "ui" + type = data.spectrocloud_pack_simple.container_pack.type + registry_uid = data.spectrocloud_registry.public_registry.id + source_app_tier = data.spectrocloud_pack_simple.container_pack.id + values = <<-EOT + # .... + # .... + containerService: + serviceName: "{{.spectro.system.appdeployment.tiername}}-svc" + registryUrl: "" + image: ${var.multiple_container_images["ui"]} + access: public + ports: + - "8080" + env: + - name: "API_URI" + value: "http://{{.spectro.app.$appDeploymentName.api.CONTAINER_SVC_EXTERNALHOSTNAME}}:3000" + - name: "TOKEN" + value: "${var.token}" + serviceType: load balancer + EOT + } +``` + + + +:::info + +All container services expose their service address, Kubernetes hostname, and the exposed service ports as output variables. +You will use output variables frequently when creating app profiles in the future. You can learn more about connecting services by referring to the [Service Connectivity](../app-profile/services/connectivity.md) documentation. + +::: + + +Open the **inputs.tf** file and set the `variable enable-second-scenario"` default value to `true`. + +
+ +```terraform +variable "enable-second-scenario" { + type = bool + description = "Whether to enable the second scenario" + default = true +} +``` + +Next, issue the command `terraform apply` to deploy the second scenario. Notice how the `-var` flag is included with the token value in the command. + +
+ +```shell +terraform apply -var="token=931A3B02-8DCC-543F-A1B2-69423D1A0B94" -auto-approve +``` + +``` +// Output condensed for readability +Apply complete! Resources: 3 added, 0 changed, 0 destroyed. +``` + +Log in to [Palette](https://console.spectrocloud.com) and navigate to the left **Main Menu**, click on **Apps**. Select the **scenario-2** row. When you are on the scenario-2 overview page, click on the exposed URL for the service. A hyperlink for port 8080 and port 3000 is available. + +![A view of the scenario-2 overview page](/tutorials/deploy-app/devx_apps_deploy_scenario-2-overview.png) + +Click on the UI’s service URL for port **8080** to access the Hello Universe application in a three-tier configuration. + +
+ +:::caution + + +It takes between one to three minutes for DNS to properly resolve the public load balancer URL. We recommend waiting a few moments before clicking on the service URL to prevent the browser from caching an unresolved DNS request. + +::: + +![View of the self-hosted hello universe app](/tutorials/deploy-app/devx_apps_deploy-app_self-hosted-hello-universe.png) + +The global counter is no longer available. Instead, you have a counter that starts at zero. Each time you click on the center image, the counter is incremented and stored in the Postgres database along with metadata. + + +### Cleanup + +To remove all resources created in this tutorial, issue the `terraform destroy` command. + +
+ +```shell +terraform destroy -var="token=931A3B02-8DCC-543F-A1B2-69423D1A0B94" -auto-approve +``` + +```shell +Destroy complete! Resources: 6 destroyed. +``` + +
+ +If you are using the tutorial container and want to exit the container, type `exit` in your terminal session and press the **Enter** key. Next, issue the following command to stop the container. + +
+ +```shell +docker stop tutorialContainer && \ +docker rmi --force ghcr.io/spectrocloud/tutorials:1.0.4 +``` + +:::info + +If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for **Force Delete**. To trigger a force delete, navigate to the respective cluster’s details page and click on **Settings**. Click on the **Force Delete Cluster** to delete the cluster. Palette will automatically remove clusters stuck in the cluster deletion phase for over 24 hours. + +::: + + + +## Wrap-Up + +In this tutorial, you learned about Palette’s Dev Engine and App Mode. You deployed two virtual clusters, each containing a different architecture and configuration of the Hello Universe application. Palette’s Dev Engine enables developers to quickly deploy applications into a Kubernetes environment without requiring Kubernetes knowledge. In a matter of minutes, you deployed a new Kubernetes cluster and all its applications without having to write Kubernetes configuration files. + +To learn more about Palette Dev Engine and its capabilities, check out the references resource below. + + +- [Palette Modes](../../introduction/palette-modes.md) +- [Spectro Cloud Terraform Provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) +- [App Profiles](../app-profile/app-profile.md) +- [App Services](../app-profile/services/services.md) +- [Palette Virtual Clusters](../palette-virtual-clusters/palette-virtual-clusters.md) +- [Hello Universe GitHub respository](https://github.com/spectrocloud/hello-universe) diff --git a/docs/docs-content/devx/apps/logs.md b/docs/docs-content/devx/apps/logs.md new file mode 100644 index 0000000000..4826bc197b --- /dev/null +++ b/docs/docs-content/devx/apps/logs.md @@ -0,0 +1,66 @@ +--- +sidebar_label: "App Logs" +title: "App Logs" +description: "Download Palette application logs." +hide_table_of_contents: false +sidebar_position: 20 +tags: ["devx", "app mode", "pde"] +--- + +Palette Dev Engine (PDE) provides access to application configuration and status logs for each application. The following files are available for download. + +| **File** | **Description** | +|---------------------------|-----------------------------------------------------------------------------------------------------------------------------| +| **cloudconfig.yaml** | The cluster configuration file, which contains Kubernetes specifications for the cluster. | +| **manifest.yaml** | The generated manifest file, which contains pack details. | +| **spec_description.yaml** | A file that contains metadata about clusters and applications. This file captures status probes and their results. | + + +:::info + +To download cluster logs, navigate to the cluster's **Overview** page and select **Settings** > **Download Logs**. Select the log files you want to review. + +::: + + +## Download Application Logs + +Use the steps below to download application logs. The download bundle is a zip file containing all the log files. + + +### Prerequisites + +* A deployed application in app mode. + +* Access to view the application and its logs. + + +### Download Logs + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. If you are not already in App Mode, navigate to the **User Menu** and select **Switch to App Mode**. + + +3. On the left **Main Menu**, select **Apps**. + + +4. Select you application. + + +5. Click the **Actions** drop-down menu. + + +6. Select **Download Logs**. + + +7. A message displays with the download link. + + +8. Click the download link and review the files. + + +### Validate + +To review the logs, locate a zip file with file name format `[clusterNameHere]-logs-[currentTimeStamp]` in the downloads folder on your device. \ No newline at end of file diff --git a/docs/docs-content/devx/devx.md b/docs/docs-content/devx/devx.md new file mode 100644 index 0000000000..704f246660 --- /dev/null +++ b/docs/docs-content/devx/devx.md @@ -0,0 +1,93 @@ +--- +sidebar_label: "Palette Dev Engine" +title: "Palette Dev Engine (PDE)" +description: "Explore Palette Dev Engine" +hide_table_of_contents: false +sidebar_custom_props: + icon: "users" +tags: ["devx", "app mode", "pde"] +--- + + +Palette provides two different modes for deploying and managing applications. The first mode is *Cluster Mode* - this mode enables you to create, deploy, and manage Kubernetes clusters and applications. The second mode is *App Mode* - a mode optimized for a simpler and streamlined developer experience that allows you to only focus on the building, maintenance, testing, deployment, and monitoring of your applications. + +You can leverage Spectro Cloud's complementary managed Kubernetes cluster when using App Mode. The complementary resources have a limit of 12 vCPU, 16 GiB of memory, and 20 GiB of free storage. Alternatively, you may deploy applications on Kubernetes clusters that belong to your organization and are managed by Palette. + +:::info +Check out the in-depth explanation of [App Mode and Cluster Mode](../introduction/palette-modes.md) to learn more about each mode. +::: + +## Get Started + +To get started with App Mode, give the tutorial [Deploy an Application using Palette Dev Engine](apps/deploy-app.md) a try so that you can learn how to use App Mode with Palette Dev Engine. + + +## Supported Platforms + +App Mode is available for the following Palette platforms. + +| Platform | Supported | Palette Version | +|---|----|---| +| SaaS | ✅| `v3.0.0` or greater. | +| Self-hosted | ✅ | `3.4.0` or greater. | +| Airgap Self-hosted | ❌| N/A. + + +## Manage Resources + +The PDE dashboard provides a snapshot of resource utilization in your PDE environment. You can keep track of the resource utilization in your PDE environment without having to navigate to different views. The dashboard displays the following information. +
+ +* The number of apps deployed. + + +* The number of virtual clusters and their respective status. + + +* The aggregate resource utilization at both the tenant and system levels for these resources. + * Virtual clusters + * CPU + * Memory + * Storage + + +* The number of app profiles available. + + + +![A view of the PDE dashboard with resources in use.](/docs_devx_pde-dashboard-utilization.png) + + +## Automation Support + +You can manage PDE resources through the [Palette API](/api/introduction), [Spectro Cloud Terraform provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs), and the Palette CLI. Download the Palette CLI from the [Downloads](/spectro-downloads#palettecli) page to start programmatically using PDE. + +![A view of the Palette CLI menu from a terminal](/devx_devx_cli-display.png) + +:::tip + +Check out the Palette CLI [install guide](../palette-cli/install-palette-cli.md) for more information on how to install and configure the CLI. +::: + +
+ +## PDE Visual Studio Code Extension + +You can create and manage lightweight Kubernetes clusters from within Visual Studio (VS) Code by using the PDE VS Code Extension. The plugin accelerates developing and testing your containerized applications and integrates with the Kubernetes Extension for VS Code. To learn about features of PDE VS Code Extension and how to install and activate it, check out [PDE Extension for Visual Studio Code](https://marketplace.visualstudio.com/items?itemName=SpectroCloud.extension-palette). + + +## Resources + +- [Use Cases](enterprise-user.md) + + +- [App Profiles](app-profile/app-profile.md) + + +- [Apps](./apps/apps.md) + + +- [Palette Virtual Clusters](palette-virtual-clusters/palette-virtual-clusters.md) + + +- [Manage Dev Engine](manage-dev-engine/manage-dev-engine.md) diff --git a/docs/docs-content/devx/enterprise-user.md b/docs/docs-content/devx/enterprise-user.md new file mode 100644 index 0000000000..b8382f3bec --- /dev/null +++ b/docs/docs-content/devx/enterprise-user.md @@ -0,0 +1,63 @@ +--- +sidebar_label: "Use Cases" +title: "Use Cases" +description: "Explore Palette Dev Engine use cases." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["devx", "app mode", "pde"] +--- + + +You can use the Palette Developer Experience (PDE) to serve two common use cases: enterprise developers and individual application authors who want to deploy a containerized application without worrying about infrastructure overhead. + +Refer to the [Enterprise Users](#enterprise-users) section to learn more about enabling PDE for a large set of downstream users. If you are an individual application author, check out the [Individual Application Author](#application-authors) section to get started. + +## Enterprise Users + +To enable PDE for downstream users, start by reviewing the following resources. + + +1. Understand the difference between [Cluster Mode and App Mode](../introduction/palette-modes.md). + + +2. Create and Manage the [Cluster Group](../clusters/cluster-groups/cluster-groups.md). + + +3. Allocate [User Quota](manage-dev-engine/resource-quota.md). + + +The next set of references are not required but good for Palette administrators to be aware of. + +- [Enable Disk Backup on Virtual Clusters](../clusters/cluster-groups/cluster-group-backups.md). + +- [Set Up Ingress for a Cluster Group](../clusters/cluster-groups/ingress-cluster-group.md). + +- [Pause and Resume Palette Virtual Clusters](palette-virtual-clusters/pause-restore-virtual-clusters.md). + +- [Resize Palette Virtual Clusters](palette-virtual-clusters/resize-virtual-clusters.md). + + +## Application Authors + +Use PDE to deploy your containerized applications to Palette. Leverage Palette's free-tier offering of PDE to get started. Create your application profiles and deploy your applications to Palette in no time. + + +Use the following resource to get started with PDE today. + +* Learn about [Palette's Free Tier Offering](../getting-started/palette-freemium.md). + +* [Tutorial](./apps/deploy-app.md). + +* Learn about [App Mode versus Cluster Mode](../introduction/palette-modes.md). + +* Familiarize yourself with [App Profiles](app-profile/app-profile.md). + +* Review the supported [out-of-the-box-services](app-profile/services/service-listings/service-listings.mdx). + + + + + + + + diff --git a/docs/docs-content/devx/manage-dev-engine/_category_.json b/docs/docs-content/devx/manage-dev-engine/_category_.json new file mode 100644 index 0000000000..e7e7c54966 --- /dev/null +++ b/docs/docs-content/devx/manage-dev-engine/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 40 +} diff --git a/docs/docs-content/devx/manage-dev-engine/manage-dev-engine.md b/docs/docs-content/devx/manage-dev-engine/manage-dev-engine.md new file mode 100644 index 0000000000..9acfe6ef5f --- /dev/null +++ b/docs/docs-content/devx/manage-dev-engine/manage-dev-engine.md @@ -0,0 +1,28 @@ +--- +sidebar_label: "Manage Dev Engine" +title: "Manage Dev Engine" +description: "Palette Dev Engine Management" +hide_table_of_contents: false +tags: ["devx", "app mode", "pde"] +--- + +# Manage Dev Engine + +You manage Palette Dev Engine (PDE) primarily through the [Cluster Group's Virtual Cluster](../../clusters/cluster-groups/cluster-groups.md) settings. You also can manage PDE through the resources shared below to help you customize the PDE environment to better fit your needs. + + + +## Resources + +- [Resource Quotas](resource-quota.md) + + +- [Dev Engine Registries](registries.md) + + +- [Manage Single Sign-On (SSO)](sso.md) + + + + + diff --git a/docs/docs-content/devx/manage-dev-engine/registries.md b/docs/docs-content/devx/manage-dev-engine/registries.md new file mode 100644 index 0000000000..9c38646ca7 --- /dev/null +++ b/docs/docs-content/devx/manage-dev-engine/registries.md @@ -0,0 +1,24 @@ +--- +sidebar_label: "Dev Engine Registries" +title: "Dev Engine Registries" +description: "Palette Dev Engine Registries" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["devx", "app mode", "pde"] +--- + + +The Pack registry is a server-side application that stores and serves packs to its clients. Packs from a pack registry are retrieved and presented as options during the creation of a cluster profile. Palette supports the configuration of multiple registries. + +## Default Registry +The default pack registry is the public pack registry. It consists of several packs that reduce the friction for a user to quickly create a cluster profile and launch a Kubernetes cluster with their choice of integrations. We maintain all the packs in the default pack registry, this includes taking care of upgrades in the pack registry whenever required. + +## Custom Pack Registry +Users can set up a custom pack registry using a Docker image provided by Spectro Cloud to upload and maintain custom packs. You can use the [Packs Registry CLI](../../registries-and-packs/spectro-cli-reference.md) tool to interact with and manage pack content in the pack registry. Custom registries offer a mechanism for extending the capabilities of a platform by defining additional integrations. + +Palette Dev Engine supports the following types of custom registries: + +* [Helm Registry](../../registries-and-packs/helm-charts.md) + +* [OCI Registry](../../registries-and-packs/oci-registry.md) + diff --git a/docs/docs-content/devx/manage-dev-engine/resource-quota.md b/docs/docs-content/devx/manage-dev-engine/resource-quota.md new file mode 100644 index 0000000000..a2cd0cfbdf --- /dev/null +++ b/docs/docs-content/devx/manage-dev-engine/resource-quota.md @@ -0,0 +1,172 @@ +--- +sidebar_label: "Resource Quotas" +title: "Resource Quotas" +description: "Learn about Palette Dev Engine resource quotas." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["devx", "app mode", "pde"] +--- + +This section covers the available deployment environments for Palette Virtual Clusters and the resource quotas that apply to users and virtual clusters. + +## Available Environments + +Palette Dev Engine users have access to a Palette-managed cluster group named *beehive*. The beehive cluster group is also a *system-level cluster group*, meaning that Spectro Cloud manages it. The beehive cluster group falls under the free tier of Palette and comes with its own set of resource limits. All users are subject to the following resource quotas when using the beehive cluster group. + +| Type | Max Limit | Description | +|-----------------|-----------|-------------------------------------------------------------------------------------------------------| +| Virtual Cluster | 2 | Each user is allowed to deploy a total of two virtual clusters. | +| CPU | 12 | Each user is allowed to consume a total of 12 CPU. This limit spans both virtual clusters. | +| Memory | 12 Gib | Each user is allowed to consume a total of 12 GiB of Memory. This limit spans both virtual clusters. | +| Storage | 20 GiB | Each user is allowed to consume a total of 20 GiB of storage. This limit spans both virtual clusters. | + + +Palette administrators can remove the beehive cluster, and other system-level cluster groups for all downstream users by setting the tenant developer setting **Hide system-level cluster groups from tenant users** to **true**. When this setting value is **true**, the beehive cluster is not displayed in the cluster group drop-down menu when deploying Palette virtual clusters. + +![The deployment path for a user](/045-devx_resource-quota_is-beehive-enabled.png) + +You can change tenant developer settings by switching to the tenant scope and navigating from the left **Main Menu** to **Tenant Settings > Developers Settings**. Toggle the **Hide system-level cluster groups from tenant users** button. + + +## Virtual Cluster Resource Quota + +Virtual clusters inherit resource quotas from the parent cluster group. The cluster group's virtual cluster settings determine the maximum resources per virtual cluster, and can be used to limit the number of resources a virtual cluster can claim from the group. By default, each virtual cluster requires at least 4 CPU, 4 GiB of memory, and 2 GiB of storage. Keep the required minimum values in mind when deploying virtual clusters or when defining the cluster group's virtual cluster settings. + +|**Virtual Cluster** | **Minimum Limit**| +|------------------------------|-----------------| +|CPU (per request) | 4 | +|Memory (per request) | 4 GiB | +|Storage (per request) | 2 GiB | + + +:::caution + +A virtual cluster requires a minimum of 4 CPU, 4 GiB of memory, and 2 Gib of storage to launch successfully. The default settings in the cluster group virtual cluster configuration YAML file has the following values: + +```yaml +vcluster + resources: + limits: + cpu: 1000m + memory: 1Gi + ephemeral-storage: 1Gi + requests: + cpu: 200m + memory: 256Mi + ephemeral-storage: 128Mi +``` + +Increasing the limit and request values could result in a virtual cluster requiring more resources than the default values of CPU, 4 GiB Memory, and 2 Gib of storage. + +::: + +If a user attempts to create a virtual cluster that needs more resources than the cluster group allows, the request will be denied because it exceeds the cluster group's defined limits. + + +Refer to the [Create and Manage Cluster Groups](../../clusters/cluster-groups/create-cluster-group.md) to learn more about adjusting cluster group's virtual cluster settings. + + +## User Resource Quotas + +All Palette users are subject to resource quotas. The two entities that impact a user's resource quotas when interacting with virtual clusters are the tenant developer user quotas and the cluster group virtual cluster settings. + +## Tenant Developer User Quotas + +The global user quotas that a Palette administrator has defined in the tenant developer settings are always evaluated first. The tenant user quotas define the maximum set of resources a user can claim. + +* Virtual clusters + +* CPU + +* Memory + +* Storage + +For example, assume the following tenant developer user quotas for four virtual clusters are defined as 20 CPU, 32 GiB of memory, and 60 GiB of storage. With these settings, all users could deploy four virtual clusters, each virtual cluster with a maximum size allowed by the cluster group limits. + +Users can also deploy a single virtual cluster that consumes 20 CPU, 32 GiB of memory, and 60 GiB of storage. In the latter example, the user cannot deploy additional clusters because CPU, memory, and storage resources are exhausted. + +
+ +:::info + +To change tenant user quotas, switch the scope to **Tenant Admin** and navigate from the left **Main Menu** to **Tenant Settings** > **Developer Settings**. In the **User Quota** section, you can adjust the maximum number of resources for users. + + +::: + +## Quota Evaluation + +Palette evaluates each virtual cluster creation request to verify the requesting user has enough resource quotas remaining based on the defined tenant user quota and if the virtual cluster request falls within the allowed limits of the parent cluster group. + +The following diagram displays the evaluation process Palette uses to determine the status of a virtual cluster creation request. + +![Order of flow when it comes to evaluating cluster requests](/045-devx_resource-quota_evaluation-process.png) + +To better understand this concept, use the following examples. + +* Tenant Developer User Quotas: + * Number of virtual clusters: 4 + * CPU: 20 + * Memory: 32 GiB + * Storage: 60 GiB +* Hide system-level cluster groups from tenant users: false + + +* Cluster Group *dev-special* Virtual Cluster Settings + * CPU (per requests): 8 + * Memory (per requests): 12 GiB + * Storage (per requests): 12 GiB + + +* User A's Current Resource Utilization + * 1 virtual Cluster in dev-special + * 8 CPU + * 12 GiB Memory + * 20 GiB of Storage + + +* User B's Current Resource Utilization + * 4 Virtual Cluster in dev-special + * 16 CPU + * 32 GiB Memory + * 60 GiB of Storage + + +### Scenario 1 + +User A is creating a request to deploy a virtual cluster to the dev-special cluster group. The virtual cluster is requesting the following resources: +* 8 CPU +* 12 GiB Memory +* 20 GiB Memory + +**Request**: ✅ + +**Explanation**: Based on tenant user quota, user A has these remaining resources for two virtual clusters: 12 CPU, 20 GiB Memory, and 40 GiB of storage. Based on cluster group quota, user A is within the resource limits of the dev-special cluster group. + +
+ +### Scenario 2 + +User B is creating a request to deploy a virtual cluster to the dev-special cluster group. The virtual cluster is requesting the following resources: +* 4 CPU +* 8 GiB Memory +* 4 GiB Storage + +**Request**: ❌ + +**Explanation**: User B has exceeded the tenant user quota for four clusters. Based on cluster group quota, the virtual cluster request falls within the approved limits. + +
+ +### Scenario 3 + +User B is creating a request to deploy a virtual cluster to the beehive cluster group. The virtual cluster is requesting the following resources: +* 4 CPU +* 8 GiB Memory +* 4 GiB Storage + +**Request**: ✅ + +**Explanation**: The request is accepted because it targets a system-level cluster group, the beehive cluster group and not a cluster group managed by the tenant. Based on the cluster group quota, the number of requested resources falls within the within the approved limits of the system-level quota. + diff --git a/docs/docs-content/devx/manage-dev-engine/sso.md b/docs/docs-content/devx/manage-dev-engine/sso.md new file mode 100644 index 0000000000..38407a216b --- /dev/null +++ b/docs/docs-content/devx/manage-dev-engine/sso.md @@ -0,0 +1,88 @@ +--- +sidebar_label: "Manage Single Sign-On (SSO)" +title: "Manage Single Sign-On (SSO)" +description: "Learn how to configure SSO for Palette Dev Engine." +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["devx", "app mode", "pde"] +--- + + + +Palette supports the ability to use Single Sign-On (SSO) and third-party Social Sign-In Providers, such as Google and GitHub. Use the following steps to either enable or disable the feature. + +## Enable SSO + +To enable SSO with third-party Social Sign-In Providers use the following steps. + +:::info + +To learn more about the Sign-In Flow, refer to the [User Authentication](../../user-management/user-authentication.md#sign-in-flow) documentation. + +::: + + +### Prerequisites + +* Palette Tenant Administrator access. + + +### Enable SSO + +1. Log in to [Palette](https://console.spectrocloud.com) as a Tenant Admin. + + +2. Navigate to the left **Main Menu**, select **Tenant Settings**, and select **SSO**. + + +3. Next, click the **Auth Providers** tab and toggle the **Enable Provider Login** button on. + + + ![The Auth providers tenant settings page with an arrow toward the toggle button.](/devx_manage-dev-engine_sso_display-oidc-page.png) + + +4. Select one of the supported Social Sign-In providers, and confirm your change. + + + + +### Validate + +You can validate SSO is enabled by attempting to log into your Palette tenant through SSO. Select the third-party provider you enabled for SSO. + + +![Palette's login view with the SSO providers highlighted.](/devx_manage-dev-engine_sso_palette-login-view.png) + + +## Disable SSO + +Palette provides the flexibility to disable SSO to restrict this capability. Use the following steps to disable SSO for Palette. + + +### Prerequisites + +* Palette Tenant Administrator access. + + + +### Disable Steps + +1. Log in to [Palette](https://console.spectrocloud.com) as a Tenant Admin. + + +2. Navigate to the left **Main Menu**, select **Tenant Settings**, and select **SSO**. + + +3. Next, click the **Auth Providers** tab and toggle the **Enable Provider Login** button off. + + +4. Log out of Palette. + + +### Validate + +You can validate SSO is disabled by attempting to log into your Palette tenant through SSO. Any SSO attempts will fail due to SSO being disabled at the tenant level. + + + diff --git a/docs/docs-content/devx/palette-virtual-clusters/_category_.json b/docs/docs-content/devx/palette-virtual-clusters/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/devx/palette-virtual-clusters/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/devx/palette-virtual-clusters/palette-virtual-clusters.md b/docs/docs-content/devx/palette-virtual-clusters/palette-virtual-clusters.md new file mode 100644 index 0000000000..061961a3c6 --- /dev/null +++ b/docs/docs-content/devx/palette-virtual-clusters/palette-virtual-clusters.md @@ -0,0 +1,91 @@ +--- +sidebar_label: "Palette Virtual Clusters" +title: "Palette Virtual Clusters" +description: "Explore Palette Dev Engine as Free Developers" +hide_table_of_contents: false +tags: ["devx", "app mode", "pde"] +--- + +Palette Virtual Clusters are Kubernetes clusters that run as nested clusters within an existing cluster (also known as a Host Cluster) or Host Cluster groups and share the host cluster resources, such as CPU, memory, and storage. By default, virtual clusters use k3s as virtual Kubernetes cluster, which is a highly available, certified Kubernetes distribution designed for production workloads. Palette Virtual Clusters are powered by [vCluster](https://www.vcluster.com/) + +The Palette platform provisions and orchestrates all Palette Virtual CLusters, making it simple to use the lightweight, Kubernetes technology stack and tools ecosystem. Deploy virtual clusters on Host Cluster Group by following the wizard and attaching Add-on profiles. + +### Create a Virtual Cluster + +To create your new Palette Virtual Cluster complete the following actions. + + +1. Log in to [Palette](https://console.spectrocloud.com) + +2. Navigate to the top-right **User Dropdown Menu** and select **App Mode**. + +3. Select the `Palette Virtual Clusters` from the **left Main Menu**, click `+ Palette Virtual Clusters,` and provide the following information to the app creation wizard. + * Select the Cluster Group: From the available host cluster group, select the cluster group to host the new virtual cluster. + * Palette virtual cluster name: Provide a custom virtual cluster name or go with the default name. + * Provide the resource limit in terms of CPU, memory, and storage. + +
+ + |**Palette Virtual Cluster Resource ** | **Default ** |**Minimum Limit**| + |------------------------------|-------------------|-----------------| + | CPU (per request) | 4 | 3 | + | Memory (per request) | 4 GiB | 3 GiB | + | Storage (per request) | 2 GiB | 0 GiB | + +
+ + +4. Review the information and deploy the Palette virtual cluster. The Palette virtual cluster will be provisioned within the next few minutes. + +
+ +### Resource Tracking for Palette Virtual Clusters + +Palette users can track the available resources within a Cluster Group while launching a virtual cluster. The UI color codes give a rough estimation of available CPU, memory, and storage within the selected Cluster Group. The interpretations are as follows: + +
+ +* **Grey**: Resources are already in use. + + +* **Green**: The resources are allocated to the virtual cluster that is deploying. + + +* **White**: The resources available within the cluster group can be utilized after deploying the new virtual cluster. + + +### Example Scenario + +The example screenshot below illustrates the following scenario. The Cluster Group selected in the example has a virtual cluster already running on it. The info box displays the recommended minimum CPU and memory allocated to the new virtual cluster. The color-coded bar summarizes the used, allocated, and available CPU, storage, and memory within the Cluster Group. Users can use this information to plan resource utilization per available resources. + +![color-tracking.png](/color-tracking.png) + + +## Palette Virtual Cluster Pause and Resume + +Palette allows the pause and resume of Palette Virtual Clusters when not in use. This feature enables the users to optimize resource utilization by pausing the virtual clusters not in use. This adds significant flexibility in managing operating costs and resource management for the Palette Virtual Clusters. + +
+ +### System and Resource Impact + +* The quota allocation is independent of a virtual cluster's pause or resume status. + + +* The CPU and memory are freed and returned to the cluster group when you pause a virtual cluster. + + +* Resources such as storage, and load balancers remain allocated to a virtual cluster regardless of the state. + + +* The Apps deployed on a virtual cluster go to a pause state when the cluster is paused. + + +* New Apps cannot be deployed on a virtual cluster in the paused state. + + +* Virtual clusters in a paused state will continue to appear as an entry in the Palette Dev Engine Console. + + +[Click here to know How to Pause and Release your Palette Virtual Cluster](pause-restore-virtual-clusters.md). + diff --git a/docs/docs-content/devx/palette-virtual-clusters/pause-restore-virtual-clusters.md b/docs/docs-content/devx/palette-virtual-clusters/pause-restore-virtual-clusters.md new file mode 100644 index 0000000000..b11114cda6 --- /dev/null +++ b/docs/docs-content/devx/palette-virtual-clusters/pause-restore-virtual-clusters.md @@ -0,0 +1,49 @@ +--- +sidebar_label: "Pause and Resume Virtual Clusters" +title: "Pause and Resume Virtual Clusters" +description: "Learn how to pause and resume Palette Virtual Clusters." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["devx", "app mode", "pde"] +--- + + +To optimize resource utilization, Palette allows you to pause and resume virtual clusters that are not in use. This adds significant flexibility in managing operating costs and resource management for virtual clusters. + +## Prerequisite + +* An active [Palette Virtual Cluster](palette-virtual-clusters.md). + +## Pause and Resume a Palette Virtual Cluster + +Invoke the pause and resume operations from the Palette Console. + +1. Log in to the **Palette Dev Engine** console. + + +2. Navigate to the **Main Menu** and select **Palette Virtual Cluster** to be paused. + + +3. Go to the cluster details page by clicking the name of the virtual cluster to be paused. + + +4. Click **Settings** and select the **Pause** option. To resume a paused virtual cluster, select the **Resume** option. + +## Validate Pause/Resume + +You can verify the state of a cluster by reviewing the cluster details. To review the state of a cluster and its details, do the following steps. + +1. First, navigate to the left "Main Menu" on the Palette Dev Engine console and click on Virtual Clusters. + + +2. Click on the specific cluster you want to check the status. This will take you to the cluster detail page. On this page, look for a section titled Status. The status section displays the current state of the cluster. + + +3. The Palette Virtual Cluster shows the following cluster **Status**: + +* **Paused**: For a paused virtual cluster +* **Running**: For a resumed or running virtual cluster + +:::tip +The status of a virtual cluster can also be viewed on the cluster listing page. The health column displays the status of the virtual cluster. +::: diff --git a/docs/docs-content/devx/palette-virtual-clusters/resize-virtual-clusters.md b/docs/docs-content/devx/palette-virtual-clusters/resize-virtual-clusters.md new file mode 100644 index 0000000000..703b34a5c5 --- /dev/null +++ b/docs/docs-content/devx/palette-virtual-clusters/resize-virtual-clusters.md @@ -0,0 +1,43 @@ +--- +sidebar_label: "Resize Virtual Clusters" +title: "Resize Virtual Clusters" +description: "Learn how to resize Palette Virtual Clusters" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["devx", "app mode", "pde"] +--- + +You can resize virtual clusters from the default size of 4 CPU, 4 GiB Memory, 2 GiB Storage to a size that does not exceed the system-level quota for a cluster group like Beehive or the user quota for tenant-level cluster groups. + +## Prerequisite + +* An active and healthy [virtual cluster](palette-virtual-clusters.md). + +## Resize Virtual Clusters + + +Use the following steps to resize a virtual cluster. + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. In App Mode, click **Virtual Clusters** in the **Main Menu**. + + +3. Select the virtual cluster you want to resize, and click **Settings > Cluster Settings**. + + +4. Click **Cluster Size** and specify new resource allocations for your virtual cluster. The size you specify cannot be greater than the system-level quota for a cluster group like Beehive or the user quota for tenant-level cluster groups. To learn more about resource quotas, refer to the [resource quota](../manage-dev-engine/resource-quota.md) documentation. + + +5. Save your changes. + + +## Validate + +To verify your changes, click **Virtual Clusters** in the left **Main Menu** and select the resized cluster. The virtual cluster Overview page displays the new **Allocated Quota** for the cluster. + + + + diff --git a/docs/docs-content/enterprise-version/_category_.json b/docs/docs-content/enterprise-version/_category_.json new file mode 100644 index 0000000000..dd5ae35c33 --- /dev/null +++ b/docs/docs-content/enterprise-version/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 160 +} diff --git a/docs/docs-content/enterprise-version/air-gap-repo.md b/docs/docs-content/enterprise-version/air-gap-repo.md new file mode 100644 index 0000000000..82c0d88ea7 --- /dev/null +++ b/docs/docs-content/enterprise-version/air-gap-repo.md @@ -0,0 +1,716 @@ +--- +sidebar_label: "Install in an Air Gap Environment" +title: "Install in an Air Gap Environment" +description: "Learn how to install Palette into an air gap environment." +icon: "" +hide_table_of_contents: false +sidebar_position: 70 +tags: ["self-hosted", "enterprise", "air-gap"] +--- + +You can install a self-hosted version of Palette into a VMware environment without direct internet access. This type of installation is referred to as an *air gap* installation. + +In a standard Palette installation, the following artifacts are downloaded by default from the public Palette repository. + +* Palette platform manifests and required platform packages. + + +* Container images for core platform components and 3rd party dependencies. + + +* Palette Packs. + + +The installation process changes a bit in an air gap environment due to the lack of internet access. Before the primary Palette installation step, you must download the three required Palette artifacts mentioned above. The other significant change is that Palette's default public repository is not used. Instead, a private repository supports all Palette operations pertaining to storing images and packages. + +The following diagram is a high-level overview of the order of operations required to deploy a self-hosted instance of Palette in an airgap environment. + + +![An architecture diagram outlining the five different install phases](/enterprise-version_air-gap-repo_overview-order-diagram.png) + + +The airgap installation can be simplified into five major phases. + + +1. Download the Open Virtual Appliance (OVA) image and deploy the instance hosting the private repository that supports the airgap environment. + + +2. The private Spectro Cloud repository is initialized, and all the Palette-required artifacts are downloaded and available. + + +3. The Palette Install OVA is deployed, configured, and initialized. + + +4. The scale-up process to a highly available three-node installation begins. + + +5. Palette is ready for usage. + + +This guide focuses on the first two installation phases, as the remaining ones are covered in the [Migrate Cluster to Enterprise](deploying-an-enterprise-cluster.md) guide and the [Install Using Quick-Start Mode](deploying-the-platform-installer.md) guide. + + +## Prerequisites + +* The following minimum resources are required to deploy Palette. + * 2 vCPU + * 4 GB of Memory + * 100 GB of Storage. Storage sizing depends on your intended update frequency and data retention model.

+ +* Ensure the following ports allow inbound network traffic. + * 80 + * 443 + * 5000 + * 8000 + + +* Request the Palette self-hosted installer image and the Palette air gap installer image. To request the installer images, please contact our support team by sending an email to support@spectrocloud.com. Kindly provide the following information in your email: + + - Your full name + - Organization name (if applicable) + - Email address + - Phone number (optional) + - A brief description of your intended use for the Palette Self-host installer image. + +Our dedicated support team will promptly get in touch with you to provide the necessary assistance and share the installer image. + +If you have any questions or concerns, please feel free to contact support@spectrocloud.com. + + +## Deploy Air Gapped Appliance + + +1. Log in to vCenter Server by using the vSphere Client. + + +2. Navigate to the Datacenter and select the cluster you want to use for the installation. Right-click on the cluster and select **Deploy OVF Template**. + + +3. Select the airgap OVA installer image you downloaded after receiving guidance from our support team. + + +4. Select the folder where you want to install the Virtual Machine (VM) and assign a name to the VM. + + +5. Next, select the compute resource. + + +6. Review the details page. You may get a warning message stating the certificate is not trusted. You can ignore the message and click **Next**. + + +7. Select your storage device and storage policy. Click on **Next** to proceed. + + +8. Choose a network for your appliance and select **Next**. + + +9. Fill out the remaining template customization options. You can modify the following input fields.

+ + | Parameter | Description | Default Value | + | --- | --- | -- | + | **Encoded user-data** | In order to fit into an XML attribute, this value is base64 encoded. This value will be decoded, and then processed normally as user-data. | - | + | **ssh public keys** | This field is optional but indicates that the instance should populate the default user's `authorized_keys` with the provided public key. | -| + | **Default User's password** | Setting this value allows password-based login. The password will be good for only a single login. If set to the string `RANDOM` then a random password will be generated, and written to the console. | - | + | **A Unique Instance ID for this instance** | Specifies the instance id. This is required and used to determine if the machine should take "first boot" actions| `id-ovf`| + | **Hostname** | Specifies the hostname for the appliance. | `ubuntuguest` | + | **URL to seed instance data from** | This field is optional but indicates that the instance should 'seed' user-data and meta-data from the given URL.| -| + +10. Click on **Next** to complete the deployment wizard. Upon completion, the cloning process will begin. The cloning process takes a few minutes to complete. + + +11. Power on the VM and click on the **Launch Web Console** button to access the instance's terminal. + + +12. Configure a static IP address on the node by editing **/etc/netplan/50-cloud-init.yaml**. + + ```shell + sudo vi /etc/netplan/50-cloud-init.yaml + ``` + + Use the following sample configuration as a starting point but feel free to change the configuration file as required for your environment. To learn more about Netplan, check out the [Netplan configuration examples](https://netplan.io/examples) from Canonical. + +
+ + ```yaml + network: + version: 2 + renderer: networkd + ethernets: + ens192: + dhcp4: false + addresses: + - 10.10.244.9/18 # your static IP and subnet mask + gateway4: 10.10.192.1 # your gateway IP + nameservers: + addresses: [10.10.128.8] # your DNS nameserver IP address. + ``` + + To exit Vi, press the **ESC** key and type `:wq` followed by the **Enter** key.

+ +13. Issue the `netplan` command to update the network settings. + +
+ + ```shell + sudo netplan apply + ``` + +14. Give the instance one to two minutes before issuing the following command. The next step is to start the airgap setup script that stands up the Spectro Repository. Issue the command below and replace `X.X.X.X` with the static IP you provided to the Netplan configuration file. + +
+ + ```shell + sudo /opt/spectro/airgap-setup.sh X.X.X.X + ``` + + Record the output of the setup command as you will use it when deploying the Quick Start appliance later on in the installation process. + + Example Output: + ```shell hideClipboard + Setting up Manifests + Setting up Manifests + Setting up SSL Certs + Setup Completed + + Details: + ------- + Spectro Cloud Repository + UserName: XXXXXXXXX + Password: XXXXXXXXXX + Location: https://10.10.249.12 + Artifact Repo Certificate: + LS0tLS1CRUdJ............. + + Pack Registry + URL: https://10.10.249.12:5000 + Username: XXXXXXXXX + Password: XXXXXXXXX + ``` + +15. If you need to configure the instance with proxy settings, go ahead and do so now. You can configure proxy settings by using environment variables. Replace the values with your environment's respective values. + +
+ + ```shell + export http_proxy=http://10.1.1.1:8888 + export https_proxy=https://10.1.1.1:8888 + export no_proxy=.example.dev,10.0.0.0/8 + ``` + +16. The next set of steps will download the required binaries to support a Palette installation, such as the Palette Installer, required Kubernetes packages, and kubeadm packages. You can download these artifacts from the instance, or externally and transfer them to the instance. Click on each tab for further guidance. + +
+ + :::caution + + You must download the following three resources. Our support team will provide you with the credentials and download URL. + Click on each tab to learn more about each resource and steps for downloading. + + ::: + +
+ + + + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/airgap-v3.3.15.bin \ + --output airgap-k8s-v3.3.15.bin + ``` + +:::tip + + If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-k8s-v3.3.15.bin && sudo ./airgap-k8s-v3.3.15.bin + ``` + + Example Output: + ```shell + sudo ./airgap-k8s-v3.3.15.bin + Verifying archive integrity... 100% MD5 checksums are OK. All good. + Uncompressing Airgap K8S Images Setup - Version 3.3.15 100% + Setting up Packs + Setting up Images + - Pushing image k8s.gcr.io/kube-controller-manager:v1.22.10 + - Pushing image k8s.gcr.io/kube-proxy:v1.22.10 + - Pushing image k8s.gcr.io/kube-apiserver:v1.22.10 + - Pushing image k8s.gcr.io/kube-scheduler:v1.22.10 + … + Setup Completed + ``` + + + + +
+ + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/airgap-k8s-v3.3.15.bin \ + --output airgap-k8s-v3.3.15.bin + ``` + + +:::tip + + If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-k8s-v3.3.15.bin && sudo ./airgap-k8s-v3.3.15.bin + ``` + + Example Output: + ```shell + sudo ./airgap-k8s-v3.3.15.bin + Verifying archive integrity... 100% MD5 checksums are OK. All good. + Uncompressing Airgap K8S Images Setup - Version 3.3.15 100% + Setting up Packs + Setting up Images + - Pushing image k8s.gcr.io/kube-controller-manager:v1.22.10 + - Pushing image k8s.gcr.io/kube-proxy:v1.22.10 + - Pushing image k8s.gcr.io/kube-apiserver:v1.22.10 + - Pushing image k8s.gcr.io/kube-scheduler:v1.22.10 + … + Setup Completed + ``` + + +
+ + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-kubeadm.bin \ + --output airgap-edge-kubeadm.bin + ``` + +:::tip + + If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-kubeadm.bin && sudo ./airgap-edge-kubeadm.bin + ``` + + Example Output: + ```shell + sudo ./airgap-edge-kubeadm.bin + Verifying archive integrity... 100% MD5 checksums are OK. All good. + Uncompressing Airgap Edge Packs - Kubeadm Images 100% + Setting up Images + - Skipping image k8s.gcr.io/coredns/coredns:v1.8.6 + - Pushing image k8s.gcr.io/etcd:3.5.1-0 + - Pushing image k8s.gcr.io/kube-apiserver:v1.23.12 + - Pushing image k8s.gcr.io/kube-controller-manager:v1.23.12 + - Pushing image k8s.gcr.io/kube-proxy:v1.23.12 + … + Setup Completed + ``` + +
+ + +
+ +
+ +17. If you will be using Edge deployments, go ahead and download the packages your Edge deployments will need. If you are not planning to use Edge, skip to end. You can come back to this step in the future and add the packages if needed. Click on the `...` tab for additional options. + + +
+ + + + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu22-k3s.bin \ + --output airgap-edge-ubuntu22-k3s.bin + ``` + +:::tip + + If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-ubuntu22-k3s.bin && sudo ./airgap-edge-ubuntu22-k3s.bin + ``` + + +
+ + + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu22-rke.bin \ + --output airgap-edge-ubuntu22-rke.bin + ``` + +:::tip + + If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-ubuntu22-rke.bin && sudo ./airgap-edge-ubuntu22-rke.bin + ``` + +
+ + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu22-kubeadm.bin \ + --output airgap-edge-ubuntu22-kubeadm.bin + ``` + +:::tip + +If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-ubuntu22-kubeadm.bin && sudo ./airgap-edge-ubuntu22-kubeadm.bin + ``` + +
+ + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu20-k3s.bin \ + --output airgap-edge-ubuntu20-k3s.bin + ``` + +:::tip + +If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-ubuntu20-k3s.bin && sudo ./airgap-edge-ubuntu20-k3s.bin + ``` + +
+ + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu20-rke.bin \ + --output airgap-edge-ubuntu20-rke.bin + ``` + +:::tip + +If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-ubuntu20-rke.bin && sudo ./airgap-edge-ubuntu20-rke.bin + ``` + +
+ + + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-ubuntu20-kubeadm.bin \ + --output airgap-edge-ubuntu20-kubeadm.bin + ``` + +:::tip + +If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-ubuntu20-kubeadm.bin && sudo ./airgap-edge-ubuntu20-kubeadm.bin + ``` + +
+ + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-opensuse-k3s.bin \ + --output airgap-edge-opensuse-k3s.bin + ``` + +:::tip + +If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-opensuse-k3s.bin && sudo ./airgap-edge-opensuse-k3s.bin + ``` + +
+ + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-opensuse-rke.bin \ + --output airgap-edge-opensuse-rke.bin + ``` + +:::tip + +If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-opensuse-rke.bin && sudo ./airgap-edge-opensuse-rke.bin + ``` + +
+ + + + Download the binary by using the URL provided by the Palette support team. Change the version number as needed. + +
+ + ```shell + curl --user XXXX:YYYYY https:///airgap/packs/3.3/airgap-edge-opensuse-kubeadm.bin \ + --output airgap-edge-opensuse-kubeadm.bin + ``` + +:::tip + +If you receive a certificate error, use the `-k` or `--insecure` flag. + +::: + + Assign the proper permissions and start the download script. + +
+ + ```shell + sudo chmod 755 ./airgap-edge-opensuse-kubeadm.bin && sudo ./airgap-edge-opensuse-kubeadm.bin + ``` + +
+ + +
+ + +---- + + +The next step of the installation process is to begin the deployment of an appliance using the instructions in the [Migrate Cluster to Enterprise Mode](deploying-an-enterprise-cluster.md). If you need to review the Spectro Cloud Repository details, issue the following command for detailed output. + +
+ +```shell +sudo /bin/airgap-setup.sh +``` + +
+ +:::info + +You can review all the logs related to the setup of the private Spectro repository in **/tmp/airgap-setup.log**. + +::: + + +## Validate + +You can validate that the Spectro Repository you deployed is available and ready for the next steps of the installation process. If you provided the appliance with an SSH key then you can skip to step five. + +
+1. Log in to vCenter Server by using the vSphere Client. + + +2. Navigate to your Datacenter and locate your VM. Click on the VM to access its details page. + + +3. Power on the VM. + + +4. Click on **Launch Web Console** to access the terminal. + + +5. Log in with the user `ubuntu` and the user password you specified during the installation. If you are using SSH, use the following command, and ensure you specify the path to your SSH private key and replace the IP address with your appliance's static IP. + +
+ + ```shell + ssh --identity_file ~/path/to/your/file ubuntu@10.1.1.1 + ``` + + +6. Verify the registry server is up and available. Replace the `10.1.1.1` value with your appliance's IP address. + +
+ + ```shell + curl --insecure https://10.1.1.1:5000/health + ``` + + Example Output: + ```shell + {"status":"UP"} + ``` + +7. Ensure you can log into your registry server. Use the credentials provided to you by the `airgap-setup.sh` script. Replace the `10.1.1.1` value with your appliance's IP address. + +
+ + ```shell + curl --insecure --user admin:admin@airgap https://10.1.1.1:5000/v1/_catalog + ``` + + Example Output: + ``` + {"metadata":{"lastUpdatedTime":"2023-04-11T21:12:09.647295105Z"},"repositories":[{"name":"amazon-linux-eks","tags":[]},{"name":"aws-efs","tags":[]},{"name":"centos-aws","tags":[]},{"name":"centos-azure","tags":[]},{"name":"centos-gcp","tags":[]},{"name":"centos-libvirt","tags":[]},{"name":"centos-vsphere","tags":[]},{"name":"cni-aws-vpc-eks","tags":[]},{"name":"cni-aws-vpc-eks-helm","tags":[]},{"name":"cni-azure","tags":[]},{"name":"cni-calico","tags":[]},{"name":"cni-calico-azure","tags":[]},{"name":"cni-cilium-oss","tags":[]},{"name":"cni-custom","tags":[]},{"name":"cni-kubenet","tags":[]},{"name":"cni-tke-global-router","tags":[]},{"name":"csi-aws","tags":[]},{"name":"csi-aws-ebs","tags":[]},{"name":"csi-aws-efs","tags":[]},{"name":"csi-azure","tags":[]},{"name":"csi-gcp","tags":[]},{"name":"csi-gcp-driver","tags":[]},{"name":"csi-longhorn","tags":[]},{"name":"csi-longhorn-addon","tags":[]},{"name":"csi-maas-volume","tags":[]},{"name":"csi-nfs-subdir-external","tags":[]},{"name":"csi-openstack-cinder","tags":[]},{"name":"csi-portworx-aws","tags":[]},{"name":"csi-portworx-gcp","tags":[]},{"name":"csi-portworx-generic","tags":[]},{"name":"csi-portworx-vsphere","tags":[]},{"name":"csi-rook-ceph","tags":[]},{"name":"csi-rook-ceph-addon","tags":[]},{"name":"csi-tke","tags":[]},{"name":"csi-topolvm-addon","tags":[]},{"name":"csi-vsphere-csi","tags":[]},{"name":"csi-vsphere-volume","tags":[]},{"name":"edge-k3s","tags":[]},{"name":"edge-k8s","tags":[]},{"name":"edge-microk8s","tags":[]},{"name":"edge-native-byoi","tags":[]},{"name":"edge-native-opensuse","tags":[]},{"name":"edge-native-ubuntu","tags":[]},{"name":"edge-rke2","tags":[]},{"name":"external-snapshotter","tags":[]},{"name":"generic-byoi","tags":[]},{"name":"kubernetes","tags":[]},{"name":"kubernetes-aks","tags":[]},{"name":"kubernetes-coxedge","tags":[]},{"name":"kubernetes-eks","tags":[]},{"name":"kubernetes-eksd","tags":[]},{"name":"kubernetes-konvoy","tags":[]},{"name":"kubernetes-microk8s","tags":[]},{"name":"kubernetes-rke2","tags":[]},{"name":"kubernetes-tke","tags":[]},{"name":"portworx-add-on","tags":[]},{"name":"spectro-mgmt","tags":[]},{"name":"tke-managed-os","tags":[]},{"name":"ubuntu-aks","tags":[]},{"name":"ubuntu-aws","tags":[]},{"name":"ubuntu-azure","tags":[]},{"name":"ubuntu-coxedge","tags":[]},{"name":"ubuntu-edge","tags":[]},{"name":"ubuntu-gcp","tags":[]},{"name":"ubuntu-libvirt","tags":[]},{"name":"ubuntu-maas","tags":[]},{"name":"ubuntu-openstack","tags":[]},{"name":"ubuntu-vsphere","tags":[]},{"name":"volume-snapshot-controller","tags":[]}],"listMeta":{"continue":""}} + ``` + + +8. Next, validate the Spectro repository is available. Replace the IP with your appliance's IP address. + +
+ + ```shell + curl --insecure --user spectro:admin@airgap https://10.1.1.1 + ``` + + Output: + ```html hideClipboard + + + + Welcome to nginx! + + + +

Welcome to nginx!

+

If you see this page, the nginx web server is successfully installed and + working. Further configuration is required.

+ +

For online documentation and support please refer to + nginx.org.
+ Commercial support is available at + nginx.com.

+ +

Thank you for using nginx.

+ + + ``` diff --git a/docs/docs-content/enterprise-version/deploying-an-enterprise-cluster.md b/docs/docs-content/enterprise-version/deploying-an-enterprise-cluster.md new file mode 100644 index 0000000000..81b04f91ed --- /dev/null +++ b/docs/docs-content/enterprise-version/deploying-an-enterprise-cluster.md @@ -0,0 +1,388 @@ +--- +sidebar_label: "Install Enterprise Cluster" +title: "Install Enterprise Cluster" +description: "Learn how to install self-hosted Palette or convert a self-hosted single node cluster to a highly available three node cluster." +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["self-hosted", "enterprise"] +--- + +You have two options for installing Palette. You can use the Palette CLI to install a new self-hosted Palette instance or convert an existing single-node cluster (Quick-Start Mode) to a highly available three-node cluster. Select the method below that corresponds to your installation type. + +- [Install With CLI](#install-with-cli) + +- [Install With OVA](#install-with-ova) + + +
+ +:::caution + + +Starting with Palette 4.0.0, the Palette CLI, and the Helm Chart, are the only supported methods for installing Palette. The Palette OVA installation method is only available for versions 3.4 and earlier. Refer to the CLI tab below, or the [Kubernetes Install Helm Chart](deploying-palette-with-helm.md) guide for additional guidance on how to install Palette. + +::: + + +
+ + + +## Install With CLI + +You install Palette using the Palette Command Line Interface (CLI) that guides you for details to create a configuration file and a three-node enterprise cluster for high availability (HA). You can invoke the Palette CLI on any Linux x86-64 system with the Docker daemon installed and connectivity to VMware vSphere where Palette will be deployed. + + +### Prerequisites + + +- An AMD64 Linux environment with connectivity to the VMware vSphere environment. + + + +- [Docker](https://docs.docker.com/engine/install/) or equivalent container runtime installed and available on the Linux host. + + + +- Palette CLI installed and available. Refer to the Palette CLI [Install](../palette-cli/install-palette-cli.md#download-and-setup) page for guidance. + + + +- Review required VMware vSphere [permissions](on-prem-system-requirements.md#vmware-privileges). + + + +- We recommended the following resources for Palette. Refer to the [Palette size guidelines](on-prem-system-requirements.md#self-hosted-configuration) for additional sizing information. + + - 8 CPUs per VM. + + - 16 GB Memory per VM. + + - 100 GB Disk Space per VM. + + +- The following network ports must be accessible for Palette to operate successfully. + + - TCP/443: Inbound to and outbound from the Palette management cluster. + + - TCP/6443: Outbound traffic from the Palette management cluster to the deployed cluster's Kubernetes API server. + + +- Ensure you have an SSL certificate that matches the domain name you will assign to Palette. You will need this to enable HTTPS encryption for Palette. Reach out to your network administrator or security team to obtain the SSL certificate. You need the following files: + + - x509 SSL certificate file in base64 format. + + - x509 SSL certificate key file in base64 format. + + - x509 SSL certificate authority file in base64 format. This file is optional. + + +- Zone tagging is required for dynamic storage allocation across fault domains when provisioning workloads that require persistent storage. Refer to [Zone Tagging](on-prem-system-requirements.md#zone-tagging) for information. + + +- Assigned IP addresses for application workload services, such as Load Balancer services. + + +- Shared Storage between VMware vSphere hosts. + +
+ +:::info + +Self-hosted Palette installations provide a system Private Cloud Gateway (PCG) out-of-the-box and typically do not require a separate, user-installed PCG. However, you can create additional PCGs as needed to support provisioning into remote data centers that do not have a direct incoming connection from the Palette console. To learn how to install a PCG on VMware, check out the [VMware](../clusters/data-center/vmware.md) guide. + +::: + +
+ +### Deployment + + +The video below provides a demonstration of the installation wizard and the prompts you will encounter. Take a moment to watch the video before you begin the installation process. Make sure to use values that are appropriate for your environment. Use the **three-dots Menu** in the lower right corner of the video to expand the video to full screen and to change the playback speed. + +
+ + + + +Use the following steps to install Palette. + + +1. Open a terminal window and invoke the Palette CLI by using the `ec` command to install the enterprise cluster. The interactive CLI prompts you for configuration details and then initiates the installation. For more information about the `ec` subcommand, refer to [Palette Commands](../palette-cli/commands.md#ec). + + ```bash + palette ec install + ``` + +2. At the **Enterprise Cluster Type** prompt, choose **Palette**. + + +3. Type `y` if you want to use Ubuntu Pro. Otherwise, type `n`. If you choose to use Ubuntu Pro, you will be prompted to enter your Ubuntu Pro token. + + +4. Provide the repository URL you received from our support team. + + +5. Enter the repository credentials. + + +6. Choose `VMware vSphere` as the cloud type. This is the default. + + +7. Type an enterprise cluster name. + + +8. When prompted, enter the information listed in each of the following tables. + +
+ + #### Environment Configuration + + |**Parameter**| **Description**| + |:-------------|----------------| + |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all EC nodes and all of its target cluster nodes. Example: `https://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| + |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all EC nodes and all of its target cluster nodes. Example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| + |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: `maas.company.com,10.10.0.0/16`.| + |**Proxy CA Certificate Filepath**|The default is blank. You can provide the filepath of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| + |**Pod CIDR**|Enter the CIDR pool IP that will be used to assign IP addresses to pods in the EC cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| + |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the EC cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| + +
+ + +9. Select the OCI registry type and provide the configuration values. Review the following table for more information. + +
+ + #### Pack & Image Registry Configuration + + | **Parameter** | **Description** | + |---------------------------|-----------------------------------------| + | **Registry Type** | Specify the type of registry. Allowed values are `OCI` or `OCI ECR`. | + | **Registry Name** | Enter the name of the registry. | + | **Registry Endpoint** | Enter the registry endpoint. | + | **Registry Base Path** | Enter the registry base path. | + |**Allow Insecure Connection** | Bypasses x509 verification. Type `Y` if using a vSphere instance with self-signed Transport Layer Security (TLS) certificates. Otherwise, type `n`.| + | **Registry Username** or **Registry Access Key** | Enter the registry username or the access key if using `OCI ECR`. | + | **Registry Password** or **Registry Secret Key** | Enter the registry password or the secret key if using `OCI ECR`. | + | **Registry Region** | Enter the registry region. This option is only available if you are using `OCI ECR`. | + | **ECR Registry Private** | Type `y` if the registry is private. Otherwise, type `n`. | + | **Use Public Registry for Images** | Type `y` to use a public registry for images. Type `n` to a different registry for images. If you are using another registry for images, you will be prompted to enter the registry URL, base path, username, and password. | + +
+ +10. Next, specify the database storage size to allocate for Palette. The default is 20 GB. Refer to the [size guidelines](on-prem-system-requirements.md#system-requirements) for additional information. + + + +11. The next set of prompts is for the VMware vSphere account information. Enter the information listed in the following table. + +
+ + #### VMware vSphere Account Information + + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + |**vSphere Endpoint** | VMware vSphere endpoint. Must be a fully qualified domain name (FQDN) or IP address without a scheme - that is, without an IP protocol, such as `https://`. Example: `vcenter.mycompany.com`.| + |**vSphere Username** | VMware vSphere account username.| + |**vSphere Password**| VMware vSphere account password.| + |**Allow Insecure Connection** | Bypasses x509 verification. Type `Y` if using a VMware vSphere instance with self-signed Transport Layer Security (TLS) certificates. Otherwise, type `n`.| + +
+ + #### VMware vSphere Cluster Configuration + + This information determines where Palette will be deployed in your VMware vSphere environment. The Palette CLI will use the provided VMware credentials to retrieve information from your VMware vSphere environment and present options for you to select from. + +
+ + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + |**Datacenter**| The installer retrieves the Datacenter automatically. | + |**Folder** | Select the folder that contains the VM instance. | + | **Cluster** | Select the cluster where you want to deploy Palette. | + | **Network** | Select the network where you want to deploy Palette. | + | **Resource Pool** | Select the resource pool where you want to deploy Palette. | + | **Datastore** | Select the datastore where you want to deploy Palette. | + |**Fault Domains** | Configure one or more fault domains by selecting values for these properties: Cluster, Network (with network connectivity), Resource Pool, and Storage Type (Datastore or VM Storage Policy). Note that when configuring the Network, if you are using a distributed switch, choose the network that contains the switch. | + |**NTP Servers** | You can provide a list of Network Time Protocol (NTP) servers. | + |**SSH Public Keys** | Provide any public SSH keys to access your Palette VMs. This option opens up your system's default text editor. Vi is the default text editor for most Linux distributions. To review basic vi commands, check out the [vi Commands](https://www.cs.colostate.edu/helpdocs/vi.html) reference. | + + +12. Specify the IP pool configuration. The placement type can be Static or Dynamic Domain Name Server (DDNS). Choosing static placement creates an IP pool from which VMs are assigned IP addresses. Choosing DDNS assigns IP addresses using DNS. + +
+ + #### Static Placement Configuration + | **Parameter** | **Description** | + |---------------------------|-----------------------------------------| + | **IP Start range** | Enter the first address in the EC IP pool range. | + | **IP End range** | Enter the last address in the EC IP pool range. | + | **Network Prefix** | Enter the network prefix for the IP pool range. Valid values are in [0, 32]. Example: `18`. | + | **Gateway IP Address** | Enter the IP address of the static IP gateway. | + | **Name servers** | Comma-separated list of DNS name server IP addresses. | + | **Name server search suffixes** | An optional comma-separated list of DNS search domains. | + + +
+ + +13. The last set of prompts is for the vSphere machine configuration. Enter the information listed in the following table. + +
+ + #### vSphere Machine Configuration + + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **Number of CPUs** | The number of CPUs allocated to each VM node instance.| + | **Memory** | The amount of memory allocated to each VM node instance.| + | **Disk Size** | The size of the disk allocated to each VM node instance.| + + +
+ + + The installation process stands up a [kind](https://kind.sigs.k8s.io/) cluster locally that will orchestrate the remainder of the installation. The installation takes some time. + +
+ + Upon completion, the enterprise cluster configuration file named `ec.yaml` contains the information you provided, and its location is displayed in the terminal. Credentials and tokens are encrypted in the YAML file. + +
+ + ```bash hideClipboard + ==== Enterprise Cluster config saved ==== + Location: :/home/spectro/.palette/ec/ec-20230706150945/ec.yaml + ``` + +
+ + When the installation is complete, Enterprise Cluster Details that include a URL and default credentials are displayed in the terminal. You will use these to access the Palette system console. + +
+ + ```bash hideClipboard + ==================================== + ==== Enterprise Cluster Details ==== + ==================================== + Console URL: https://10.10.189.100/system + Username: ********** + Password: ********** + ``` + + +14. Copy the URL to the browser to access the system console. You will be prompted to reset the password. + +
+ + :::info + + The first time you visit the Palette system console, a warning message about an untrusted SSL certificate may appear. This is expected, as you have not yet uploaded your SSL certificate to Palette. You can ignore this warning message and proceed. + + ::: + +
+ + ![Screenshot of the Palette system console showing Username and Password fields.](/palette_installation_install-on-vmware_palette-system-console.png) + +
+ + +15. Log in to the system console using the credentials provided in the Enterprise Cluster Details output. After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette system console. + + +16. After login, a Summary page is displayed. Palette is installed with a self-signed SSL certificate. To assign a different SSL certificate you must upload the SSL certificate, SSL certificate key, and SSL certificate authority files to Palette. You can upload the files using the Palette system console. Refer to the [Configure HTTPS Encryption](/vertex/system-management/ssl-certificate-management) page for instructions on how to upload the SSL certificate files to Palette. + + +17. The last step is to start setting up a tenant. To learn how to create a tenant, check out the [Tenant Management](../vertex/system-management/tenant-management.md) guide. + +
+ + ![Screenshot of the Summary page showing where to click Go to Tenant Management button.](/palette_installation_install-on-vmware_goto-tenant-management.png) + + +### Validate + +You can verify the installation is successful if you can access the system console using the IP address provided in Enterprise Cluster Details and if the Summary page displays the **Go to Tenant Management** button. + +You can also validate that a three-node Kubernetes cluster is launched and Palette is deployed on it. + +
+ +1. Log in to the vCenter Server by using vSphere Client. + + +2. Navigate to the Datacenter and locate your VM instance. + + +3. Select the VM to access its details page, and verify three nodes are listed. + + +4. Open a web browser session, and use the IP address provided in Enterprise Cluster Details at the completion of the installation to connect to the Palette system console. Copy the IP address to the address bar and append `/system`. + + +5. Log in using your credentials. + + +6. A **Summary** page will be displayed that contains a tile with a **Go to Tenant Management** button. After initial installation, the **Summary** page shows there are zero tenants. + + + + +## Install With OVA + +### Enterprise Mode + +The Palette Enterprise Mode is a multi-node, highly-available installation of the Palette platform suitable for production purposes. Installation involves instantiating the on-prem platform installer VM and invoking the "Enterprise Cluster Migration" wizard. Please follow [these](deploying-the-platform-installer.md) steps to deploy the installer VM and observe the [monitoring console](deploying-the-platform-installer.md#monitor-installation) to ensure installation is successful. After a successful installation of the platform installer, proceed to enterprise cluster migration. + +
+ +:::info + +Deployment of an enterprise cluster is a migration process from the quick start mode. You may choose to deploy the enterprise cluster on day 1 right after instantiating the platform installer VM, or use the system in the quick start mode initially and at a later point invoke the enterprise cluster migration wizard to deploy the enterprise cluster. All the data from the quick start mode is migrated to the enterprise cluster as part of this migration process. + +::: + + + +1. Open the On-Prem system console from a browser window by navigating to https://<VM IP Address>/system and log in. + + +2. Navigate to the Enterprise Cluster Migration wizard from the menu on the left-hand side. + + +3. Enter the vCenter credentials to be used to launch the enterprise cluster. Provide the vCenter server, username, and password. Check the `Use self-signed certificates` if applicable. Validate your credentials and click on `Next` button to proceed to IP Pool Configuration. + + +4. Enter the IPs to be used for Enterprise Cluster VMs as a `Range` or a `Subnet`. At least 5 IP addresses should be required in the range for the installation and the ongoing management. Provide the details of the `Gateway` and the `Nameserver addresses`. Any search suffixes being used can be entered in the `Nameserver search suffix` box. Click on `Next` to proceed to Cloud Settings. + + +5. Select the datacenter and the folder to be used for the enterprise cluster VMs. Select the desired compute cluster, resource pools, datastore, and network. For high availability purposes, you may choose to distribute the three VMs across multiple compute clusters. If this is desired, invoke the "Add Domain" option to enter multiple sets of properties. + + +6. Add SSH Public key and optionally NTP servers and click "Confirm". + + +7. The Enterprise cluster deployment will proceed through the following three steps: + * Deployment - A 3 node Kubernetes cluster is launched and Palette Platform is deployed on it. This typically takes 10 mins. + * Data Migration - Data from the installer VM is migrated to the newly created enterprise cluster. + * Tenant Migration - If any tenants were created prior to the enterprise cluster migration, which would typically be the case if the system was used in the quick start mode initially, all those tenants, as well as the management of any such tenant clusters previously deployed, will be migrated to the enterprise cluster. + + +8. Once Enterprise Cluster is fully deployed, the On-Prem System and Management Console should be accessed on this new cluster. The platform installer VM can be safely powered off at this point. + + + + +
+ +## Resources + +- [Palette CLI](../palette-cli/install-palette-cli.md#download-and-setup) + + +- [Airgap Install Instructions](air-gap-repo.md) \ No newline at end of file diff --git a/docs/docs-content/enterprise-version/deploying-palette-with-helm.md b/docs/docs-content/enterprise-version/deploying-palette-with-helm.md new file mode 100644 index 0000000000..f8f210a724 --- /dev/null +++ b/docs/docs-content/enterprise-version/deploying-palette-with-helm.md @@ -0,0 +1,669 @@ +--- +sidebar_label: "Install using Helm Chart" +title: "Install using Helm Chart" +description: "Learn how to deploy self-hosted Palette to a Kubernetes cluster using a Helm Chart." +icon: "" +hide_table_of_contents: false +sidebar_position: 30 +tags: ["self-hosted", "enterprise"] +--- + + +You can use the Palette Helm Chart to install Palette in a multi-node Kubernetes cluster in your production environment. + +This installation method is common in secure environments with restricted network access that prohibits using Palette SaaS. Review our [architecture diagrams](../architecture/networking-ports.md) to ensure your Kubernetes cluster has the necessary network connectivity for Palette to operate successfully. + + +Depending on what version of Palette you are using, the available parameters will be different. Select the tab below that corresponds to the version of Palette you are using. + +
+ + + + +## Prerequisites + +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed and available. + + +- [Helm](https://helm.sh/docs/intro/install/) is installed and available. + + +- Access to the target Kubernetes cluster's kubeconfig file. You must be able to interact with the cluster using `kubectl` commands and have sufficient permissions to install Palette. We recommend using a role with cluster-admin permissions to install Palette. + + +- The Kubernetes cluster must be set up on a supported version of Kubernetes, which includes versions v1.25 to v1.27. + + + +- Ensure the Kubernetes cluster does not have Cert Manager installed. Palette requires a unique Cert Manager configuration to be installed as part of the installation process. If Cert Manager is already installed, you must uninstall it before installing Palette. + + +- The Kubernetes cluster must have a Container Storage Interface (CSI) installed and configured. Palette requires a CSI to store persistent data. You may install any CSI that is compatible with your Kubernetes cluster. + + + +- We recommended the following resources for Palette. Refer to the [Palette size guidelines](on-prem-system-requirements.md#system-requirements) for additional sizing information. + + - 8 CPUs per node. + + - 16 GB Memory per node. + + - 100 GB Disk Space per node. + + - A Container Storage Interface (CSI) for persistent data. + + - A minimum of three worker nodes or three untainted control plane nodes. + + +- The following network ports must be accessible for Palette to operate successfully. + + - TCP/443: Inbound and outbound to and from the Palette management cluster. + + - TCP/6443: Outbound traffic from the Palette management cluster to the deployed clusters' Kubernetes API server. + + +- Ensure you have an SSL certificate that matches the domain name you will assign to Palette. You will need this to enable HTTPS encryption for Palette. Reach out to your network administrator or security team to obtain the SSL certificate. You need the following files: + + - x509 SSL certificate file in base64 format. + + - x509 SSL certificate key file in base64 format. + + - x509 SSL certificate authority file in base64 format. + + +- Ensure the OS and Kubernetes cluster you are installing Palette onto is FIPS-compliant. Otherwise, Palette and its operations will not be FIPS-compliant. + + +- A custom domain and the ability to update Domain Name System (DNS) records. You will need this to enable HTTPS encryption for Palette. + + +- Access to the Palette Helm Charts. Refer to the [Access Palette](enterprise-version.md#download-palette-installer) for instructions on how to request access to the Helm Chart + + + +
+ +:::caution + +Do not use a Palette-managed Kubernetes cluster when installing Palette. Palette-managed clusters contain the Palette agent and Palette-created Kubernetes resources that will interfere with the installation of Palette. + +::: + + +## Install Palette + +Use the following steps to install Palette on Kubernetes. + + +:::info + +The following instructions are written agnostic to the Kubernetes distribution you are using. Depending on the underlying infrastructure provider and your Kubernetes distribution, you may need to modify the instructions to match your environment. Reach out to our support team if you need assistance. + +::: + + +1. Open a terminal session and navigate to the directory where you downloaded the Palette Helm Charts provided by our support. We recommend you place all the downloaded files into the same directory. You should have the following Helm Charts: + + - Spectro Management Plane Helm Chart. + + - Cert Manager Helm Chart. + + +2. Extract each Helm Chart into its directory. Use the commands below as a reference. Do this for all the provided Helm Charts. + +
+ + ```shell + tar xzvf spectro-mgmt-plane-*.tgz + ``` + +
+ + ```yaml + tar xzvf cert-manager-*.tgz + ``` + + +3. Install Cert Manager using the following command. Replace the actual file name of the Cert Manager Helm Chart with the one you downloaded, as the version number may be different. + +
+ + ```shell + helm upgrade --values cert-manager/values.yaml cert-manager cert-manager-1.11.0.tgz --install + ``` + +
+ + :::info + + The Cert Manager Helm Chart provided by our support team is configured for Palette. Do not modify the **values.yaml** file unless instructed to do so by our support team. + + ::: + + +4. Open the **values.yaml** in the **spectro-mgmt-plane** folder with a text editor of your choice. The **values.yaml** contains the default values for the Palette installation parameters, however, you must populate the following parameters before installing Palette. + +
+ + | **Parameter** | **Description** | **Type** | + | --- | --- | --- | + | `env.rootDomain` | The URL name or IP address you will use for the Palette installation. | string | + | `ociPackRegistry` or `ociPackEcrRegistry` | The OCI registry credentials for Palette FIPS packs.| object | + | `scar` | The Spectro Cloud Artifact Repository (SCAR) credentials for Palette FIPS images. These credentials are provided by our support team. | object | + + + Save the **values.yaml** file after you have populated the required parameters mentioned in the table. + +
+ + :::info + + You can learn more about the parameters in the **values.yaml** file in the [Helm Configuration Reference](deploying-palette-with-helm.md) page. + + ::: + + + +5. Install the Palette Helm Chart using the following command. + +
+ + ```shell + helm upgrade --values spectro-mgmt-plane/values.yaml hubble spectro-mgmt-plane-0.0.0.tgz --install + ``` + + +6. Track the installation process using the command below. Palette is ready when the deployments in the namespaces `cp-system`, `hubble-system`, `ingress-nginx`, `jet-system` , and `ui-system` reach the *Ready* state. The installation takes between two to three minutes to complete. + +
+ + ```shell + kubectl get pods --all-namespaces --watch + ``` + + +7. Create a DNS CNAME record that is mapped to the Palette `ingress-nginx-controller` load balancer. You can use the following command to retrieve the load balancer IP address. You may require the assistance of your network administrator to create the DNS record. + +
+ + ```shell + kubectl get service ingress-nginx-controller --namespace ingress-nginx --output jsonpath='{.status.loadBalancer.ingress[0].hostname}' + ``` + +
+ + :::info + + As you create tenants in Palette, the tenant name is prefixed to the domain name you assigned to Palette. For example, if you create a tenant named `tenant1` and the domain name you assigned to Palette is `palette.example.com`, the tenant URL will be `tenant1.palette.example.com`. You can create an additional wildcard DNS record to map all tenant URLs to the Palette load balancer. + + ::: + + +8. Use the custom domain name or the IP address of the load balancer to visit the Palette system console. To access the system console, open a web browser and paste the custom domain URL in the address bar and append the value `/system`. Replace the domain name in the URL with your custom domain name or the IP address of the load balancer. Alternatively, you can use the load balancer IP address with the appended value `/system` to access the system console. + +
+ + :::info + + The first time you visit the Palette system console, a warning message about an untrusted SSL certificate may appear. This is expected, as you have not yet uploaded your SSL certificate to Palette. You can ignore this warning message and proceed. + + ::: + +
+ + ![Screenshot of the Palette system console showing Username and Password fields.](/palette_installation_install-on-vmware_palette-system-console.png) + + +9. Log in to the system console using the following default credentials. + +
+ + | **Parameter** | **Value** | + | --- | --- | + | Username | `admin` | + | Password | `admin` | + +
+ + After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette system console. + +
+ +10. After login, a summary page is displayed. Palette is installed with a self-signed SSL certificate. To assign a different SSL certificate you must upload the SSL certificate, SSL certificate key, and SSL certificate authority files to Palette. You can upload the files using the Palette system console. Refer to the [Configure HTTPS Encryption](../vertex/system-management/ssl-certificate-management.md) page for instructions on how to upload the SSL certificate files to Palette. + + +
+ +:::caution + +If you plan to deploy host clusters into different networks, you may require a reverse proxy. Check out the [Configure Reverse Proxy](reverse-proxy.md) guide for instructions on how to configure a reverse proxy for Palette VerteX. + +::: + + +You now have a self-hosted instance of Palette installed in a Kubernetes cluster. Make sure you retain the **values.yaml** file as you may need it for future upgrades. + + +## Validate + +Use the following steps to validate the Palette installation. + +
+ + +1. Open up a web browser and navigate to the Palette system console. To access the system console, open a web browser and paste the following URL in the address bar and append the value `/system`. Replace the domain name in the URL with your custom domain name or the IP address of the load balancer. + + + +2. Log in using the credentials you received from our support team. After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette system console. + + +3. Open a terminal session and issue the following command to verify the Palette installation. The command should return a list of deployments in the `cp-system`, `hubble-system`, `ingress-nginx`, `jet-system` , and `ui-system` namespaces. + +
+ + ```shell + kubectl get pods --all-namespaces --output custom-columns="NAMESPACE:metadata.namespace,NAME:metadata.name,STATUS:status.phase" \ + | grep -E '^(cp-system|hubble-system|ingress-nginx|jet-system|ui-system)\s' + ``` + + Your output should look similar to the following. + + ```shell hideClipboard + cp-system spectro-cp-ui-689984f88d-54wsw Running + hubble-system auth-85b748cbf4-6drkn Running + hubble-system auth-85b748cbf4-dwhw2 Running + hubble-system cloud-fb74b8558-lqjq5 Running + hubble-system cloud-fb74b8558-zkfp5 Running + hubble-system configserver-685fcc5b6d-t8f8h Running + hubble-system event-68568f54c7-jzx5t Running + hubble-system event-68568f54c7-w9rnh Running + hubble-system foreq-6b689f54fb-vxjts Running + hubble-system hashboard-897bc9884-pxpvn Running + hubble-system hashboard-897bc9884-rmn69 Running + hubble-system hutil-6d7c478c96-td8q4 Running + hubble-system hutil-6d7c478c96-zjhk4 Running + hubble-system mgmt-85dbf6bf9c-jbggc Running + hubble-system mongo-0 Running + hubble-system mongo-1 Running + hubble-system mongo-2 Running + hubble-system msgbroker-6c9b9fbf8b-mcsn5 Running + hubble-system oci-proxy-7789cf9bd8-qcjkl Running + hubble-system packsync-28205220-bmzcg Succeeded + hubble-system spectrocluster-6c57f5775d-dcm2q Running + hubble-system spectrocluster-6c57f5775d-gmdt2 Running + hubble-system spectrocluster-6c57f5775d-sxks5 Running + hubble-system system-686d77b947-8949z Running + hubble-system system-686d77b947-cgzx6 Running + hubble-system timeseries-7865bc9c56-5q87l Running + hubble-system timeseries-7865bc9c56-scncb Running + hubble-system timeseries-7865bc9c56-sxmgb Running + hubble-system user-5c9f6c6f4b-9dgqz Running + hubble-system user-5c9f6c6f4b-hxkj6 Running + ingress-nginx ingress-nginx-controller-2txsv Running + ingress-nginx ingress-nginx-controller-55pk2 Running + ingress-nginx ingress-nginx-controller-gmps9 Running + jet-system jet-6599b9856d-t9mr4 Running + ui-system spectro-ui-76ffdf67fb-rkgx8 Running + ``` + + +## Next Steps + +You have successfully installed Palette in a Kubernetes cluster. Your next steps are to configure Palette for your organization. Start by creating the first tenant to host your users. Use the [Create a Tenant](../vertex/system-management/tenant-management.md#create-a-tenant) page for instructions on how to create a tenant. + + + + +
+ + +## Prerequisites + +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed. + + +- Configure a Container Storage Interface (CSI) for persistent data. + + +- Have at least three worker nodes or three untainted control plane nodes. + + +- [Cert Manager](https://cert-manager.io/docs) v1.11.0 or greater installed in the Kubernetes cluster. Use the official Cert Manager [installation guide](https://cert-manager.io/docs/installation/) for additional guidance. + + + +- Allocate a minimum of 4 CPUs and 12 GB of Memory per node. + + +- A custom domain and the ability to update Domain Name System (DNS) records. + + + +- Access to the Palette Helm Chart. Contact support@spectrocloud.com to gain access to the Helm Chart. + + +- For AWS EKS, ensure you have the [AWS CLI](https://aws.amazon.com/cli/) and the [kubectl CLI](https://github.com/weaveworks/eksctl#installation) installed. + +
+ +:::caution + +Palette cannot manage the cluster that it is installed onto due to component conflicts. Consider using a managed Kubernetes service to minimize management overhead. The Palette Helm Chart is not tied to any particular managed Kubernetes service. + + +::: + + +## Install Palette + +Choose the installation steps for your target environment. The steps in the generic tab apply to all Kubernetes clusters. Steps in other tabs have instructions explicitly tailored to the target environment. + +
+ + + + + +1. Download the kubeconfig file for the Kubernetes cluster where you will deploy Palette. Ensure you can interact with the target cluster. You can validate by issuing a `kubectl` command. + +
+ + ```shell + kubectl get pods -A + ``` + + +2. Extract the **values.yaml** from the Helm Chart with the following command: + +
+ + ```shell + tar xzvf /path/to/chart.tgz spectro-mgmt-plane/values.yaml + ``` + + +3. Review the **values.yaml** . You must populate the `env.rootDomain` parameter to the domain you will use for the installation. All other parameter values are optional, and you can reset or change them with a Helm upgrade operation. + +
+ + :::caution + + Do not use a wildcard in the root domain value for the `env.rootDomain` parameter. Use a complete domain name when assigning a root domain name value. + + ::: + + +4. Install the Helm Chart using the following command. Replace the path in the command to match your local path of the Palette Helm Chart. + +
+ + ```shell + helm install palette /path/to/chart.tgz -f /path/to/values.yaml + ``` + + +5. Monitor the deployment using the command below. Palette is ready when the deployments in namespaces `cp-system`, `hubble-system`, `jet-system` , and `ui-system` reach the *Ready* state. + +
+ + ```shell + kubectl get pods --all-namespaces --watch + ``` + +6. Create a DNS record that is mapped to the Palette `ingress-nginx-controller` load balancer. You can use the following command to retrieve the load balancer IP address. + +
+ + ```shell + kubectl get service ingress-nginx-controller --namespace nginx --output jsonpath='{.status.loadBalancer.ingress[0].hostname}' + ``` + +You now have a self-hosted instance of Palette installed in a Kubernetes cluster. Make sure you retain the **values.yaml** file as you will need it for future upgrades. + +
+ +
+ + + +1. Ensure the AWS CLI is configured with your credentials. You can use the following command to configure your credentials. Refer to the [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) guide for additional help. + +
+ + ```shell + aws configure + ``` + +2. Next, create an EKS cluster. + +
+ + ```shell + eksctl create cluster \ + --name palette-selfhost \ + --node-type m5.xlarge \ + --nodes 3 \ + --nodes-min 3 \ + --nodes-max 4 \ + --region eu-west-2 \ + --kubeconfig ~/Downloads/palette-selfhost.kubeconfig + ``` + + Change `--region` and `--nodes` as required. You can also change the instance size. + + Note that the [minimum instance requirement](https://aws.amazon.com/ec2/instance-types/) is three nodes with a least 4 CPUs and 12 GB of Memory per node. + + +3. When the cluster is available, go ahead and configure the OpenID Connect (OIDC) for the cluster to use Palette as the Identity Provider (IDP). + +
+ + ```shell + eksctl utils associate-iam-oidc-provider --cluster=palette-selfhost --approve + ``` + +4. Next, add the EBS Container Storage Interface (CSI) driver IAM role. Replace the `` with your AWS account ID. + +
+ + ```shell + eksctl create addon --name aws-ebs-csi-driver \ + --cluster palette-selfhost \ + --service-account-role-arn arn:aws:iam:::role/AmazonEKS_EBS_CSI_DriverRole \ + --force + ``` + +5. Log in to the [AWS console](https://console.aws.amazon.com) and navigate to the EKS Dashboard. + + + +6. Select the **palette-selfhost** cluster to access its details page. + + + +7. From the cluster details page, click on **Compute** > **Node Group**. Next, click on **Node IAM role ARN link**. + + ![A view of the cluster details page with the Node IAM role ARN highlighted](/enterprise-version_deploying-palette-with-helm_aws-iam-role.png) + + +8. From the **Permissions** tab, click on the **Add Permissions** button, and select **Attach Policies**. + + +9. Search for the **AmazonEBSCSIDriverPolicy** policy and add it to the role. + +
+ + :::info + + You can find additional guidance about Amazon EBS CSI drivers and requirements by reviewing the [EBS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) and the [Manage EBS with EKS](https://github.com/awsdocs/amazon-eks-user-guide/blob/master/doc_source/managing-ebs-csi.md) guide. + + ::: + +10. Extract the Helm Chart files from the compressed asset we provided to you. Replace the file path and version placeholder as needed. + +
+ + ```shell + tar xzvf path/to-file/spectro-mgmt-helm-charts-X.X.tar.gz + ``` + +11. Navigate to the **spectro-mgmt-helm-charts-X.X** folder. + +
+ + ```shell + cd spectro-mgmt-helm-charts-X.X + ``` + +12. Review the **values.yaml** . You must populate the `env.rootDomain` parameter to the domain you will use for the installation. In addition, add the same `rootDomain` with port `:4222` to the `natsUrl` in the `nats` section of the YAML. Example: `env.rootDomain: my-domain.com:4222`. All other parameter values are optional, and you can reset or change them with the Palette API. + +
+ + :::caution + + Do not use a wildcard in the root domain value for the `env.rootDomain` parameter. Use a complete domain name when assigning a root domain name value. + + ::: + + 13. If you wish to use [AWS ACM for SSL Certs](https://docs.aws.amazon.com/acm/latest/userguide/acm-overview.html), instead of the default self-signed certificate that the Nginx *ingress controller* generates, you can add it to the `annotations` under `ingress`. + +
+ + ```yaml + ingress: + ingress: + # Whether to front NGINX Ingress Controller with a cloud + # load balancer (internal == false) or use host network + internal: false + + # Default SSL certificate and key for NGINX Ingress Controller (Optional) + # A wildcard cert for config.env.rootDomain, e.g., *.myfirstpalette.spectrocloud.com + # If left blank, the NGINX ingress controller will generate a self-signed cert (when terminating TLS upstream of ingress-nginx-controller) + # certificate: "" + # key: "" + + annotations: + # AWS example + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + ingressStaticIP: "" + # Used to terminate HTTPS traffic at the load balancer versus passing through the load balancer. This parameter is available in Palette 3.3 or greater. + terminateHTTPSAtLoadBalancer: true + ``` + + 14. Download the kubeconfig file for the EKS cluster. Ensure you can interact with the target cluster. You can validate by issuing a `kubectl` command. For additional guidance, refer to the [kubeconfig file for an Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html) guide. + + + +15. Install the Helm Chart using the following command. Replace the path in the command to match your local path of the Palette Helm Chart. + +
+ + ```shell + helm install palette /path/to/chart.tgz -f /path/to/values.yaml + ``` + +16. Monitor the deployment using the command below. Palette is ready when the deployments in namespaces `cp-system`, `hubble-system`, `jet-system` , and `ui-system` reach the *Ready* state. + +
+ + ```shell + kubectl get pods --all-namespaces --watch + ``` + +17. Create a DNS record mapped to the load balancer created by the Palette service `ingress-nginx-controller` . You can use the following command to retrieve the load balancer IP address. + +
+ + ```shell + kubectl get service ingress-nginx-controller --namespace nginx --output jsonpath='{.status.loadBalancer.ingress[0].hostname}' + ``` + +You now have a self-hosted instance of Palette installed in a Kubernetes cluster. Make sure you retain the **values.yaml** file as you will need it for future upgrades. + +
+ +
+ + +
+ +# Validate + +You can validate that the installation of Palette is successful by visiting the custom domain you assigned to the +`env.rootDomain` parameter in the **values.yaml**. + +
+ + +:::caution + +If you notice that the pods in the `hubble-system` namespace are not initializing as expected, it might be due to a delay in adding the DNS records for the rootDomain. The workaround is to terminate all pods except the pods related to `mongo-db` in the `hubble-system` namespace to trigger a redeployment of the pods. + +
+ + ```shell + kubectl delete pods --namespace hubble-system --selector=role!=mongo + ``` + +::: + + +## Upgrade Palette + + + +To upgrade Palette with a new Helm release, use the following steps.

+ +1. Download the new version of the Helm Chart. + + + +2. Extract the new **values.yaml** file from the Helm Chart with the following command: + +
+ + ```shell + tar xzvf /path/to/chart.tgz spectro-mgmt-plane/values.yaml + ``` + + +3. Compare the new **values.yaml** against the original **values.yaml** you used for the initial Palette installation. Address any new parameters added to the values file. + + + + +4. Issue the following command to upgrade Palette. Use the same **values.yaml** file you used for the Palette installation. + +
+ + ```shell + helm upgrade palette /path/to/chart.tgz --file /path/to/orginal_values.yaml + ``` + + +### Post-Install Configuration Values + +The values you specified in the **values.yaml** file all fall under the parameter section `values.config` and are stored in the `configserver-cm` ConfigMap. + +After the installation, if you need to change any configuration values under `values.config` in the **values.yaml** file, you must use the Palette API. +When you use the `helm upgrade` command, internal system configurations stored in the Kubernetes ConfigMap `configserver-cm` will display as updated, but Palette will not apply the new values. Palette only accepts changes to these configuration values if they are submitted via API. + +If you find yourself in this scenario, contact our support team by emailing us at support@spectrocloud.com for additional guidance. + + + +## Next Steps + +Start exploring the Palette system dashboard so that you become familiar with the available actions you can take as an administrator. Check out the [System Console Dashboard](system-console-dashboard.md) resource to learn more. + + +
+ + +
+
\ No newline at end of file diff --git a/docs/docs-content/enterprise-version/deploying-the-platform-installer.md b/docs/docs-content/enterprise-version/deploying-the-platform-installer.md new file mode 100644 index 0000000000..6b9fe8faa0 --- /dev/null +++ b/docs/docs-content/enterprise-version/deploying-the-platform-installer.md @@ -0,0 +1,90 @@ +--- +sidebar_label: "Install Using Quick-Start Mode" +title: "VMware Quick Start Installatio" +description: "Learn how to install self-hosted Palette by deploying a single node instance." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["self-hosted", "enterprise"] +--- + + +The Palette On-Prem Quick Start Mode is a single node installation of the Palette platform, used for PoC environments to quickly understand the capabilities of the Palette platform. We do not recommended for Production deployments as it does not provide high availability or scalability. + +As a prerequisite, download the platform installer OVA using the link}>Please contact us to receive download instructions. provided, and upload it into vCenter. + + +:::caution + + +Starting with Palette 4.0.0, the Palette CLI, and the Helm Chart, are the only supported methods for installing Palette. The Palette OVA installation method is only available for versions 3.4 and earlier. Refer to the [Install Enterprise Cluster](deploying-an-enterprise-cluster.md), or the [Kubernetes Install Helm Chart](deploying-palette-with-helm.md) guides for additional guidance on how to install Palette. + +::: + +## Deploy Platform Installer + +1. Log in to the vSphere console and navigate to VMs and Templates. +2. Navigate to the Datacenter and folder you would like to use for the installation. +3. Right-click on the folder and invoke the VM creation wizard by selecting the option to Deploy OVF Template. +4. Complete all the steps of the OVF deployment wizard. Provide values for various fields as follows: + * URL: <Location of the platform installer> + * Virtual Machine Name: <vm name> + * Folder: <Select desired folder> + * Select the desired Datacenter, Storage, and Network for the platform installer VM as you proceed through the next steps. The Platform installer VM requires an outgoing internet connection. Select a network that provides this access directly, or via a proxy. + * Customize the template as follows: + * Name: <The name to identify the platform installer> + * SSH Public Keys: Create a new SSH key pair (or pick an existing one). Enter the public key in this field. The public key will be installed in the installer VM to provide SSH access, as the user `ubuntu`. This is useful for troubleshooting purposes. + * Monitoring Console Password: A monitoring console is deployed in the platform installer VM to provide detailed information about the installation progress as well as to provide access to various logs. This console can be accessed after the VM is powered on at https://<VM IP Address>:5080. The default monitoring console credentials are: + + * User Name: admin + * Password: admin + + Provide a different password for the monitoring console if desired. Leave the field blank to accept the default password. + * Pod CIDR: Optional - provide an IP range exclusive to pods. This range should be different to prevent an overlap with your network CIDR. (e.g: 192.168.0.0/16) + * Service cluster IP range: Optional - assign an IP range in the CIDR format exclusive to the service clusters. This range also must not overlap with either the pod CIDR range or your network CIDR. (e.g: 10.96.0.0/12) + * Static IP Address: <VM IP Address> Optional IP address (e.g: 192.168.10.15) to be specified only if static IP allocation is desired. DHCP is used by default. + * Static IP subnet prefix: <Network Prefix> Static IP subnet prefix (e.g: 18), required only for static IP allocation. + * Static IP gateway: <Gateway IP Address> (e.g: 192.168.0.1) required only for static IP allocation. + * Static IP DNS: <Name servers> Comma separated DNS addresses (e.g: 8.8.8.8, 192.168.0.8), required only for static IP allocation. + * HTTP Proxy: <endpoint for the http proxy server>, e.g: _http://USERNAME:PASSWORD@PROXYIP:PROXYPORT_. An optional setting, required only if a proxy is used for outbound connections. + * HTTPS Proxy: <endpoint for the https proxy server>, e.g: _http://USERNAME:PASSWORD@PROXYIP:PROXYPORT_. An optional setting, required only if a proxy is used for outbound connections. + * NO Proxy: <comma-separated list of vCenter server, local network CIDR, hostnames, domain names that should be excluded from proxying>, e.g: _vcenter.company.com_,10.10.0.0/16. + * Spectro Cloud Repository settings: The platform installer downloads various platform artifacts from a repository. Currently, this repository is hosted by Palette and the installer VM needs to have an outgoing internet connection to the repository. Upcoming releases will enable the option to privately host a dedicated repository to avoid having to connect outside. This option is currently unavailable. Leave all the fields under Palette Repository settings blank + * Finish the OVF deployment wizard and wait for the template to be created. This may take a few minutes as the template is initially downloaded. +5. Power on the VM. + +## Monitor Installation + +The platform installer contains a web application called the Supervisor, to provide detailed progress of the installation. After the VM is powered on, perform the following steps to ensure installation is completed successfully. + +1. Open the Supervisor application in a browser window by navigating to https://<VM IP Address>:5080. +2. Observe the installation status in the Status tab. The page auto-refreshes to provide updated installation progress. +3. Once the final installation step is complete, you will see URLs to navigate to the On-Prem System Console as well as the Management Console. + * On-Prem System Console: Initial login:admin/admin + * Management Console: Tenant credentials to be created and used [Configure System for First Time](#initial-configuration). +4. Navigate to the On-Prem System Console to perform the initial configuration. Additional administration tasks like SMTP setup, certificate management, etc. can also be performed from the On-Prem System Console. + +:::info +Typically, the installation takes around 10 mins after powering on the virtual machine. If the installation fails or takes an unusually long time, please look for failure messages in the install status page, or access system logs from the "Logs" tab to get detailed information about the failure. +::: + +## Initial Configuration + +The On-Prem System Console provides options for performing various administrative setup tasks. Most of these are optional and can be performed at any later time. To quickly start using the platform's functionality, all that is needed is to create the first tenant and activate it. + +1. Open the system console. You can access the system console by opening a browser window and typing in the IP address of the platform installer VM or the custom domain name if configured. Append `/system` to the URL to access the system console. Example `https://10.10.10.100/system`. + +2. Log in using username: 'admin' and password: 'admin'. + +3. Reset the default password. + +4. Choose "Quick Start" when prompted for a choice for the startup mode. + +5. Navigate to the Tenant Management section and create your first tenant. + +6. Copy the tenant activation link and invoke it in a browser window to activate the newly created tenant. + +7. Enter the desired password and proceed and login as a tenant into the Management Console. + + +Next, continue to perform various tasks as desired from the management console like creating gateways, cloud accounts, cluster profiles, and launching of clusters. diff --git a/docs/docs-content/enterprise-version/enterprise-cluster-management.md b/docs/docs-content/enterprise-version/enterprise-cluster-management.md new file mode 100644 index 0000000000..fe243653ce --- /dev/null +++ b/docs/docs-content/enterprise-version/enterprise-cluster-management.md @@ -0,0 +1,237 @@ +--- +sidebar_label: "Enterprise Management" +title: "Enterprise Management" +description: "Learn how to manage your enterprise clusters." +icon: "" +hide_table_of_contents: false +sidebar_position: 60 +tags: ["self-hosted", "enterprise"] +--- + + + +Palette supports several Day-2 operations to manage the end-to-end lifecycle of the Kubernetes clusters launched through Palette On-Premises Enterprise Mode. It provides several capabilities across clusters to keep your clusters secure, compliant, up-to-date, and perform ongoing management operations like backup/restore and cluster migration across Private Cloud Gateway (PCGs). + + + + + + + +## Palette PCG Migration + +Palette enables PCG migration to route the traffic between PCGs to ensure uninterrupted PCG service availability. If a PCG goes unhealthy, it can be deleted after migrating the clusters launched through that PCG to another healthy PCG. This ensures that cluster operations such as deletion are carried out without interruption. + +## When Will You Migrate + +The possible conditions of PCG migration are: + +* Unhealthy PCG to healthy PCG + + +* Healthy PCG to healthy PCG + + +## How to Migrate a PCG Traffic + +To migrate the traffic from a PCG: +
+ +1. Log in as **Tenant Admin** to the Palette Console. + + +2. From the **Tenant Settings**, go to the **Private Cloud Gateways** tab to list all PCGs. + + +3. Click the 'Kebab' menu (three-dot ellipsis) towards the PCG to be migrated to see the drop-down option of **Migrate**. + + +4. Click the **Migrate** option to open the wizard to select your destination PCG. + + +5. The wizard will display the drop-down list of all healthy PCGs to which traffic can be migrated. Select the PCG of your choice from the drop-down. + + +6. Confirm the migration operation to get a UI confirmation of the successful migration. + + +7. Once the migration is completed, the unhealthy/source PCG can be deleted successfully. Clear the residual resources manually to complete the deletion process. + + +8. The **Audit Logs** gives the migration update. + +
+ + + +## Backup and Restore for Enterprise Clusters + +Palette provides convenient backup options to backup the Enterprise Kubernetes cluster state into object storage. It restores it at a later point in time if required to the same or a different cluster. Besides backing up Kubernetes native objects like Pods, DaemonSets, Services, etc., a snapshot of the persistent volume is taken and maintained as part of the backup. The two options of backup creation are: + +* FTP + + +* S3 + +FTP mode backup is sending the backup data of your enterprise cluster to a dedicated FTP server using the File Transfer Protocol (FTP). + +S3 buckets for backup make it trivial for everyone to use Amazon’s infrastructure for remote backups and secure cluster objects online. In addition, this feature provides the advantages of scheduling, strong encryption, compression, easy access to your backup files. + +### Instructions + +1. Log in to enterprise mode as administrator: + + * https://system_IP/system + * Username: admin + * Password: custom password + + +2. Select **Administration** from left panel. + + +3. On the **Administration** page, select **Backup/Restore** from the top ribbon. + + +4. Complete the backup configuration wizard to complete the mode of backup creation. + + +5. Select the mode of backup from the two available options: + * FTP + * S3 + + +### FTP + +The following information is filled to create a backup location in FTP mode: + +1. Provide the ftp:// server details. + + +2. The directory name for the backup storage. + + +3. Username and Password to log in to the server. + + +4. Scheduling details of the backup. + * **Interval** specifies the number of days between two consecutive backups. + * **Retention period** for backup in days. + * **Hours of the day** (UTC 0 to 23 hours) specifies the time of the specified day to take the backup. + + +5. This configuration is saved and used for creating an FTP backup by clicking the **+Create FTP backup** button on the top-right corner of the page. + + +6. The configuration can be edited as per the requirements. + + +7. Delete/Restore a specific backup from the actions panel. + +:::info +The saved configuration details can be used to create multiple backup locations. +Any changes can be made to the existing configuration and saved for reuse. +::: + +### S3 Backup Location + +:::caution + +An AWS S3 bucket created is a prerequisite. + +The following permissions need to be enabled. + +::: + +#### Permission Sets +Ensure that the IAM user or the `root` user role created should have the following two IAM policies included: + +**EC2-Policy** + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:PutObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts" + ], + "Resource": [ + ";" + ] + } + ] +} +``` + + +**S3-Policy** + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateSnapshot", + "ec2:DeleteSnapshot" + ], + "Resource": "" + } + ] +} +``` + +The following information is needed: + + +* AWS Account Access key + + +* AWS Account Secret Key + + +* AWS Region + + +* AWS Bucket name + + +* Folder name to which the backup is stored in the S3 bucket + + +* Scheduling details of the backup, + * **Interval** specifies the number of days between two consecutive backups. + * **Retention period** of backup in days. + * **Hours of the day** (UTC 0 to 23 hours) specifies the time of the specified day to take the backup. + + +* Validate the information and save the configurations. + + +* The saved configuration is used for creating an S3 backup by clicking the **+Create S3 backup** button on the top-right corner of the page. + + +* Once the backup is created, the details such as Backup uid, Mode, Status, Finish Time, and Actions is viewed from the console for the individual backup. + + +* Delete/Restore a specific backup from the actions panel. + + +:::info +The saved configuration details can be used to create multiple backup locations. Any changes can be made to the existing configuration and saved for reuse. +::: + + + + +
diff --git a/docs/docs-content/enterprise-version/enterprise-version.md b/docs/docs-content/enterprise-version/enterprise-version.md new file mode 100644 index 0000000000..c8af81d8ab --- /dev/null +++ b/docs/docs-content/enterprise-version/enterprise-version.md @@ -0,0 +1,108 @@ +--- +sidebar_label: "Self-Hosted Installation" +title: "Self-Hosted Installation" +description: "Understanding, installing and operating Spectro Cloud's Enterprise Self-Hosted variant." +hide_table_of_contents: false +sidebar_custom_props: + icon: "warehouse" +tags: ["self-hosted", "enterprise"] +--- + + +Palette is available as a self-hosted platform offering. You can install the self-hosted version of Palette in your data centers or public cloud providers to manage Kubernetes clusters. You can install Palette by using the following four methods: + + +:::caution + + +Starting with Palette 4.0.0, the Palette CLI, and the Helm Chart, are the only supported methods for installing Palette. The Palette OVA installation method is only available for versions 3.4 and earlier. Refer to the [Install Enterprise Cluster](deploying-an-enterprise-cluster.md), or the [Kubernetes Install Helm Chart](deploying-palette-with-helm.md) guides for additional guidance on how to install Palette. + +::: + +- [VMware Quick Start](deploying-the-platform-installer.md) + + +- [VMware Enterprise](deploying-an-enterprise-cluster.md) + + +- [Kubernetes Install Helm Chart](deploying-palette-with-helm.md) + + +- [AirGap Install](air-gap-repo.md) + +## VMware Quick Start + +A single-node Palette installation that is ideal for Proof of Concept (PoC) environments. Refer to the [Quick Start Installation](deploying-the-platform-installer.md) guide for more details. + +## VMware Enterprise + +A highly available multi-node Palette installation that is typically used for production purposes. Check out the [Enterprise Mode](deploying-an-enterprise-cluster.md) guide to get started. + +## Kubernetes Install Helm Chart + +Install Palette onto a Kubernetes cluster using a Helm Chart. Review the [Helm Chart Mode](deploying-palette-with-helm.md) guide to learn more. + + +## Airgap Install + +Palette can be installed in a VMware environment without internet access, known as an air gap installation, requiring pre-download of platform manifests, required platform packages, container images for core components, third-party dependencies, and Palette Packs, all sourced from a private rather than the default public Palette repository. + +## Download Palette Installer + +To request the Palette Self-hosted installer image, please contact our support team by sending an email to support@spectrocloud.com. Kindly provide the following information in your email: + +- Your full name +- Organization name (if applicable) +- Email address +- Phone number (optional) +- A brief description of your intended use for the Palette Self-host installer image. + +Our dedicated support team will promptly get in touch with you to provide the necessary assistance and share the installer image. + +If you have any questions or concerns, please feel free to contact support@spectrocloud.com. + + +## Upgrade Notes + +Review the [Upgrade Notes](upgrade.md) before attempting to upgrade Palette. + + +
+ +## Resources + + +* [System Requirements](on-prem-system-requirements.md) + + +* [Quick Start Mode](deploying-the-platform-installer.md) + + +* [Enterprise Mode](deploying-an-enterprise-cluster.md) + + +* [Helm Chart Mode](deploying-palette-with-helm.md) + + +* [System Console Dashboard](system-console-dashboard.md) + + +* [Creating a VMware Cloud Gateway](../clusters/data-center/vmware.md#install-pcg) + + +* [Create VMware Cloud Account](../clusters/data-center/vmware.md#create-vmware-cloud-gateway) + + +* [Deploy a VMware Cluster](../clusters/data-center/vmware#deploy-a-vmware-cluster) + + +* [PCG Troubleshooting](../troubleshooting/pcg.md) + + +* [Upgrade Notes](upgrade.md) + + +
+ +
+ diff --git a/docs/docs-content/enterprise-version/helm-chart-install-reference.md b/docs/docs-content/enterprise-version/helm-chart-install-reference.md new file mode 100644 index 0000000000..4e6ce5f0eb --- /dev/null +++ b/docs/docs-content/enterprise-version/helm-chart-install-reference.md @@ -0,0 +1,720 @@ +--- +sidebar_label: "Helm Chart Install Reference" +title: "Helm Chart Install References" +description: "Reference for Palette Helm Chart installation parameters." +icon: "" +hide_table_of_contents: false +sidebar_position: 40 +tags: ["self-hosted", "enterprise"] +--- + + +You can use the Palette Helm Chart to install Palette in a multi-node Kubernetes cluster in your production environment. The Helm chart allows you to customize values in the **values.yaml** file. This reference lists and describes parameters available in the **values.yaml** file from the Helm Chart for your installation. To learn how to install Palette using the Helm Chart, refer to [Helm Chart Mode](deploying-palette-with-helm.md). + + +Depending on what version of Palette you are using, the available parameters will be different. Select the version below that corresponds to the version of Palette you are using. + +- [4.0.0 or greater](#400-or-greater) + +- [3.4.0 or earlier](#340-or-earlier) + +
+ + + +## 4.0.0 or Greater + +### Required Parameters + +The following parameters are required for a successful installation of Palette. + + +| **Parameters** | **Description** | **Type** | +| --- | --- | --- | +| `config.env.rootDomain` | Used to configure the domain for the Palette installation. We recommend you create a CNAME DNS record that supports multiple subdomains. You can achieve this using a wild card prefix, `*.palette.abc.com`. Review the [Environment parameters](#environment) to learn more. | String | +| `config.env.ociRegistry` or `config.env.ociEcrRegistry`| Specifies the FIPS image registry for Palette. You can use an a self-hosted OCI registry or a public OCI registry we maintain and support. For more information, refer to the [Registry](#registries) section. | Object | +| `scar`| The Spectro Cloud Artifact Repository (SCAR) credentials for Palette FIPS images. Our support team provides these credentials. For more information, refer to the [Registry](#registries) section. | Object | + + +:::caution + +If you are installing an air-gapped version of Palette, you must provide the image swap configuration. For more information, refer to the [Image Swap Configuration](#image-swap-configuration) section. + + +::: + + +### MongoDB + +Palette uses MongoDB Enterprise as its internal database and supports two modes of deployment:

+ +- MongoDB Enterprise deployed and active inside the cluster. + + +- MongoDB Enterprise is hosted on a software-as-a-service (SaaS) platform, such as MongoDB Atlas. + +The table below lists the parameters used to configure a MongoDB deployment. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `internal` | Specifies the MongoDB deployment either in-cluster or using Mongo Atlas. | Boolean | `true` | +| `databaseUrl`| The URL for MongoDB Enterprise. If using a remote MongoDB Enterprise instance, provide the remote URL. This parameter must be updated if `mongo.internal` is set to `false`. | String | `mongo-0.mongo,mongo-1.mongo,mongo-2.mongo` | +| `databasePassword`| The base64-encoded MongoDB Enterprise password. If you don't provide a value, a random password will be auto-generated. | String | `""` | +| `replicas`| The number of MongoDB replicas to start. | Integer | `3` | +| `memoryLimit`| Specifies the memory limit for each MongoDB Enterprise replica.| String | `4Gi` | +| `cpuLimit` | Specifies the CPU limit for each MongoDB Enterprise member.| String | `2000m` | +| `pvcSize`| The storage settings for the MongoDB Enterprise database. Use increments of `5Gi` when specifying the storage size. The storage size applies to each replica instance. The total storage size for the cluster is `replicas` * `pvcSize`. | string | `20Gi`| +| `storageClass`| The storage class for the MongoDB Enterprise database. | String | `""` | + + +```yaml +mongo: + internal: true + databaseUrl: "mongo-0.mongo,mongo-1.mongo,mongo-2.mongo" + databasePassword: "" + replicas: 3 + cpuLimit: "2000m" + memoryLimit: "4Gi" + pvcSize: "20Gi" + storageClass: "" +``` + +### Config + +Review the following parameters to configure Palette for your environment. The `config` section contains the following subsections: + + +#### Install Mode + +You can install Palette in connected or air-gapped mode. The table lists the parameters to configure the installation mode. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `installMode` | Specifies the installation mode. Allowed values are `connected` or `airgap`. Set the value to `airgap` when installing in an air-gapped environment. | String | `connected` | + +```yaml +config: + installationMode: "connected" +``` + +#### SSO + +You can configure Palette to use Single Sign-On (SSO) for user authentication. Configure the SSO parameters to enable SSO for Palette. You can also configure different SSO providers for each tenant post-install, check out the [SAML & SSO Setup](../user-management/saml-sso/saml-sso.md) documentation for additional guidance. + +To configure SSO, you must provide the following parameters. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | --- | +| `saml.enabled` | Specifies whether to enable SSO SAML configuration by setting it to true. | Boolean | `false` | +| `saml.acsUrlRoot` | The root URL of the Assertion Consumer Service (ACS).| String | `myfirstpalette.spectrocloud.com`| +| `saml.acsUrlScheme` | The URL scheme of the ACS: `http` or `https`. | String | `https` | +| `saml.audienceUrl` | The URL of the intended audience for the SAML response.| String| `https://www.spectrocloud.com` | +| `saml.entityID` | The Entity ID of the Service Provider.| String | `https://www.spectrocloud.com`| +| `saml.apiVersion` | Specify the SSO SAML API version to use.| String | `v1` | + +```yaml +config: + sso: + saml: + enabled: false + acsUrlRoot: "myfirstpalette.spectrocloud.com" + acsUrlScheme: "https" + audienceUrl: "https://www.spectrocloud.com" + entityId: "https://www.spectrocloud.com" + apiVersion: "v1" +``` + +#### Email + +Palette uses email to send notifications to users. The email notification is used when inviting new users to the platform, password resets, and when [webhook alerts](../clusters/cluster-management/health-alerts.md) are triggered. Use the following parameters to configure email settings for Palette. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `enabled` | Specifies whether to enable email configuration. | Boolean| `false`| +| `emailID ` | The email address for sending mail.| String| `noreply@spectrocloud.com` | +| `smtpServer` | Simple Mail Transfer Protocol (SMTP) server used for sending mail. | String | `smtp.gmail.com` | +| `smtpPort` | SMTP port used for sending mail.| Integer | `587` | +| `insecureSkipVerifyTLS` | Specifies whether to skip Transport Layer Security (TLS) verification for the SMTP connection.| Boolean | `true` | +| `fromEmailID` | Email address of the ***From*** address.| String | `noreply@spectrocloud.com` | +| `password` | The base64-encoded SMTP password when sending emails.| String | `""` | + +```yaml +config: + email: + enabled: false + emailId: "noreply@spectrocloud.com" + smtpServer: "smtp.gmail.com" + smtpPort: 587 + insecureSkipVerifyTls: true + fromEmailId: "noreply@spectrocloud.com" + password: "" +``` + +#### Environment + +The following parameters are used to configure the environment. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `env.rootDomain` | Specifies the URL name assigned to Palette Vertex. The value assigned should have a Domain Name System (DNS) CNAME record mapped to exposed IP address or the load balancer URL of the service *ingress-nginx-controller*. Optionally, if `ingress.ingressStaticIP` is provided with a value you can use same assigned static IP address as the value to this parameter.| String| `""` | +| `env.installerMode` | Specifies the installer mode. Do not modify the value.| String| `self-hosted` | +| `env.installerCloud` | Specifies the cloud provider. Leave this parameter empty if you are installing a self-hosted Palette. | String | `""` | + +```yaml +config: + env: + rootDomain: "" +``` +
+ +:::caution + +As you create tenants in Palette, the tenant name is prefixed to the domain name you assigned to Palette. For example, if you create a tenant named tenant1 and the domain name you assigned to Palette is `palette.example.com`, the tenant URL will be `tenant1.palette.example.com`. We recommend you create an additional wildcard DNS record to map all tenant URLs to the Palette load balancer. For example, `*.palette.example.com`. + +::: + +#### Cluster + +Use the following parameters to configure the Kubernetes cluster. + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `stableEndpointAccess` | Set to `true` if the Kubernetes cluster is deployed in a public endpoint. If the cluster is deployed in a private network through a stable private endpoint, set to `false`. | Boolean | `false` | + +```yaml +config: + cluster: + stableEndpointAccess: false +``` + +### Registries + +Palette requires credentials to access the required Palette images. You can configure different types of registries for Palette to download the required images. You must configure at least one Open Container Initiative (OCI) registry for Palette. You must also provide the credentials for the Spectro Cloud Artifact Repository (SCAR) to download the required FIPS images. + +
+ +#### OCI Registry + + +Palette requires access to an OCI registry that contains all the required FIPS packs. You can host your own OCI registry and configure Palette to reference the registry. Alternatively, you can use the public OCI registry that we provide. Refer to the [`ociPackEcrRegistry`](#oci-ecr-registry) section to learn more about the publicly available OCI registry. + + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `ociPackRegistry.endpoint` | The endpoint URL for the registry. | String| `""` | +| `ociPackRegistry.name` | The name of the registry. | String| `""` | +| `ociPackRegistry.password` | The base64-encoded password for the registry. | String| `""` | +| `ociPackRegistry.username` | The username for the registry. | String| `""` | +| `ociPackRegistry.baseContentPath`| The base path for the registry. | String | `""` | +| `ociPackRegistry.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the registry connection. | Boolean | `false` | +| `ociPackRegistry.caCert` | The registry's base64-encoded certificate authority (CA) certificate. | String | `""` | + + +```yaml +config: + ociPackRegistry: + endpoint: "" + name: "" + password: "" + username: "" + baseContentPath: "" + insecureSkipVerify: false + caCert: "" +``` + +#### OCI ECR Registry + +We expose a public OCI ECR registry that you can configure Palette to reference. If you want to host your own OCI registry, refer to the [OCI Registry](#oci-registry) section. +The OCI Elastic Container Registry (ECR) is hosted in an AWS ECR registry. Our support team provides the credentials for the OCI ECR registry. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `ociPackEcrRegistry.endpoint` | The endpoint URL for the registry. | String| `""` | +| `ociPackEcrRegistry.name` | The name of the registry. | String| `""` | +| `ociPackEcrRegistry.accessKey` | The base64-encoded access key for the registry. | String| `""` | +| `ociPackEcrRegistry.secretKey` | The base64-encoded secret key for the registry. | String| `""` | +| `ociPackEcrRegistry.baseContentPath`| The base path for the registry. | String | `""` | +| `ociPackEcrRegistry.isPrivate` | Specifies whether the registry is private. | Boolean | `true` | +| `ociPackEcrRegistry.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the registry connection. | Boolean | `false` | +| `ociPackEcrRegistry.caCert` | The registry's base64-encoded certificate authority (CA) certificate. | String | `""` | + +```yaml +config: + ociPackEcrRegistry: + endpoint: "" + name: "" + accessKey: "" + secretKey: "" + baseContentPath: "" + isPrivate: true + insecureSkipVerify: false + caCert: "" +``` + +#### Spectro Cloud Artifact Repository (SCAR) + +SCAR credentials are required to download the necessary FIPS manifests. Our support team provides the SCAR credentials. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `scar.endpoint` | The endpoint URL of SCAR. | String| `""` | +| `scar.username` |The username for SCAR. | String| `""` | +| `scar.password` | The base64-encoded password for the SCAR. | String| `""` | +| `scar.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the SCAR connection. | Boolean | `false` | +| `scar.caCert` | The base64-encoded certificate authority (CA) certificate for SCAR. | String | `""` | + +
+ + ```yaml + config: + scar: + endpoint: "" + username: "" + password: "" + insecureSkipVerify: false + caCert: "" + ``` + +#### Image Swap Configuration + +You can configure Palette to use image swap to download the required images. This is an advanced configuration option, and it is only required for air-gapped deployments. You must also install the Palette Image Swap Helm chart to use this option, otherwise, Palette will ignore the configuration. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `imageSwapInitImage` | The image swap init image. | String | `gcr.io/spectro-images-public/thewebroot/imageswap-init:v1.5.2` | +| `imageSwapImage` | The image swap image. | String | `gcr.io/spectro-images-public/thewebroot/imageswap:v1.5.2` | +| `imageSwapConfig`| The image swap configuration for specific environments. | String | `""` | +| `imageSwapConfig.isEKSCluster` | Specifies whether the cluster is an Amazon EKS cluster. Set to `false` if the Kubernetes cluster is not an EKS cluster. | Boolean | `true` | + +
+ + ```yaml + config: + imageSwapImages: + imageSwapInitImage: "gcr.io/spectro-images-public/thewebroot/imageswap-init:v1.5.2" + imageSwapImage: "gcr.io/spectro-images-public/thewebroot/imageswap:v1.5.2" + + imageSwapConfig: + isEKSCluster: true + ``` + +### NATS + +Palette uses [NATS](https://nats.io) and gRPC for communication between Palette components. Dual support for NATS and gRPC is available. You can enable the deployment of an additional load balancer for NATS. Host clusters deployed by Palette use the load balancer to communicate with the Palette control plane. This is an advanced configuration option and is not required for most deployments. Speak with your support representative before enabling this option. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `nats.enabled`| Specifies whether to enable the deployment of a NATS load balancer. | Boolean | `true` | +| `nats.internal`| Specifies whether to deploy a load balancer or use the host network. If this value is set to `true`, then the remaining NATS parameters are ignored. | Boolean | `true` | +| `nats.natsUrl`| The NATS URL. This can be a comma separated list of mappings for the NATS load balancer service. For example, "message1.dev.spectrocloud.com:4222,message2.dev.spectrocloud.com:4222". This parameter is mandatory if `nats.internal` is set to `false`. If `nats.internal` is set to `true`, you can leave this parameter empty. | String | `""` | +| `nats.annotations`| A map of key-value pairs that specifies load balancer annotations for NATS. You can use annotations to change the behavior of the load balancer and the Nginx configuration. This is an advanced setting. We recommend you consult with your assigned support team representative prior to modification. | Object | `{}` | +| `nats.natsStaticIP`| Specify a static IP address for the NATS load balancer service. If empty, a dynamic IP address will be assigned to the load balancer. | String | `""` | + + +
+ + ```yaml + nats: + enabled: true + internal: true + natsUrl: "" + annotations: {} + natsStaticIP: +``` + + + + +### gRPC + +gRPC is used for communication between Palette components. You can enable the deployment of an additional load balancer for gRPC. Host clusters deployed by Palette use the load balancer to communicate with the Palette control plane. This is an advanced configuration option, and it is not required for most deployments. Speak with your support representative before enabling this option. Dual support for NATS and gRPC is available. + +If you want to use an external gRPC endpoint, you must provide a domain name for the gRPC endpoint and a valid x509 certificate. Additionally, you must provide a custom domain name for the endpoint. A CNAME DNS record must point to the IP address of the gRPC load balancer. For example, if your Palette domain name is `palette.example.com`, you could create a CNAME DNS record for `grpc.palette.example.com` that points to the IP address of the load balancer dedicated to gRPC. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `external`| Specifies whether to use an external gRPC endpoint. | Boolean | `false` | +| `endpoint`| The gRPC endpoint. | String | `""` | +| `caCertificateBase64`| The base64-encoded certificate authority (CA) certificate for the gRPC endpoint. | String | `""` | +| `serverCrtBase64`| The base64-encoded server certificate for the gRPC endpoint. | String | `""` | +| `serverKeyBase64`| The base64-encoded server key for the gRPC endpoint. | String | `""` | +| `insecureSkipVerify`| Specifies whether to skip Transport Layer Security (TLS) verification for the gRPC endpoint. | Boolean | `false` | + + + + +```yaml +grpc: + external: false + endpoint: "" + caCertificateBase64: "" + serverCrtBase64: "" + serverKeyBase64: "" + insecureSkipVerify: false +``` + +### Ingress + +Palette deploys an Nginx Ingress Controller. This controller is used to route traffic to the Palette control plane. You can change the default behavior and omit the deployment of an Nginx Ingress Controller. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `enabled`| Specifies whether to deploy an Nginx controller. Set to `false` if you do not want an Nginx controller deployed. | Boolean | `true` | +| `ingress.internal`| Specifies whether to deploy a load balancer or use the host network. | Boolean | `false` | +| `ingress.certificate`| Specify the base64-encoded x509 SSL certificate for the Nginx Ingress Controller. If left blank, the Nginx Ingress Controller will generate a self-signed certificate. | String | `""` | +| `ingress.key`| Specify the base64-encoded x509 SSL certificate key for the Nginx Ingress Controller. | String | `""` | +| `ingress.annotations`| A map of key-value pairs that specifies load balancer annotations for ingress. You can use annotations to change the behavior of the load balancer and the Nginx configuration. This is an advanced setting. We recommend you consult with your assigned support team representative prior to modification. | Object | `{}` | +| `ingress.ingressStaticIP`| Specify a static IP address for the ingress load balancer service. If empty, a dynamic IP address will be assigned to the load balancer. | String | `""` | +| `ingress.terminateHTTPSAtLoadBalancer`| Specifies whether to terminate HTTPS at the load balancer. | Boolean | `false` | + + +```yaml +ingress: + enabled: true + ingress: + internal: false + certificate: "" + key: "" + annotations: {} + ingressStaticIP: "" + terminateHTTPSAtLoadBalancer: false +``` + +### Spectro Proxy + +You can specify a reverse proxy server that clusters deployed through Palette can use to facilitate network connectivity to the cluster's Kubernetes API server. Host clusters deployed in private networks can use the [Spectro Proxy pack](../integrations/frp.md) to expose the cluster's Kubernetes API to downstream clients that are not in the same network. Check out the [Reverse Proxy](reverse-proxy.md) documentation to learn more about setting up a reverse proxy server for Palette. + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `frps.enabled`| Specifies whether to enable the Spectro server-side proxy. | Boolean | `false` | +| `frps.frpHostURL`| The Spectro server-side proxy URL. | String | `""` | +| `frps.server.crt`| The base64-encoded server certificate for the Spectro server-side proxy. | String | `""` | +| `frps.server.key`| The base64-encoded server key for the Spectro server-side proxy. | String | `""` | +| `frps.ca.crt`| The base64-encoded certificate authority (CA) certificate for the Spectro server-side proxy. | String | `""` | + +```yaml +frps: + frps: + enabled: false + frpHostURL: "" + server: + crt: "" + key: "" + ca: + crt : "" +``` + +### UI System + +The table lists parameters to configure the Palette User Interface (UI) behavior. You can disable the UI or the Network Operations Center (NOC) UI. You can also specify the MapBox access token and style layer ID for the NOC UI. MapBox is a third-party service that provides mapping and location services. To learn more about MapBox and how to obtain an access token, refer to the [MapBox Access tokens](https://docs.mapbox.com/help/getting-started/access-tokens) guide. + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `enabled`| Specifies whether to enable the Palette UI. | Boolean | `true` | +| `ui.nocUI.enable`| Specifies whether to enable the Palette Network Operations Center (NOC) UI. Enabling this parameter requires the `ui.nocUI.mapBoxAccessToken`. Once enabled, all cluster locations will be reported to MapBox. This feature is not FIPS compliant. | Boolean | `false` | +| `ui.nocUI.mapBoxAccessToken`| The MapBox access token for the Palette NOC UI. | String | `""` | +| `ui.nocUI.mapBoxStyledLayerID`| The MapBox style layer ID for the Palette NOC UI. | String | `""` | + + + +```yaml +ui-system: + enabled: true + ui: + nocUI: + enable: false + mapBoxAccessToken: "" + mapBoxStyledLayerID: "" +``` + + + + +### Reach System + +You can configure Palette to use a proxy server to access the internet. Set the parameter `reach-system.reachSystem.enabled` to `true` to enable the proxy server. Proxy settings are configured in the `reach-system.reachSystem.proxySettings` section. + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `reachSystem.enabled`| Specifies whether to enable the usage of a proxy server for Palette. | Boolean | `false` | +| `reachSystem.proxySettings.http_proxy`| The HTTP proxy server URL. | String | `""` | +| `reachSystem.proxySettings.https_proxy`| The HTTPS proxy server URL. | String | `""` | +| `reachSystem.proxySettings.no_proxy`| A list of hostnames or IP addresses that should not be proxied. | String | `""` | + + + ```yaml + reach-system: + reachSystem: + enabled: false + proxySettings: + http_proxy: "" + https_proxy: "" + no_proxy: + ``` + +--- + +
+ +## 3.4.0 or Earlier + +### Required Parameters + +The following parameters in the **values.yaml** file are required:

+ +- **env.rootDomain** - Used to configure the domain for the Palette installation. You should create a CNAME DNS record separately, and it should be a wildcard to account for Organization prefixes. Review the [Environment parameters](helm-chart-install-reference.md#environment) to learn more.

+ +- **natsUrl** - The URL format specifies how to configure NATS servers to the IP address and port. Review the [Network Address Translation (NATS) parameters](helm-chart-install-reference.md#network-address-translation-nats) to learn more.

+ + + +- **Registry and Palette Artifact Repository** - Specifies the Docker registry where chart images are stored and the Palette Artifact Repository (PAR). Refer to the [Registry and Palette Artifact Repository parameters](helm-chart-install-reference.md#registry-and-palette-artifact-repository-par). + +### MongoDB + +Palette uses MongoDB as its database and supports two modes of deployment:

+ +- MongoDB deployed and active inside the cluster. + + +- MongoDB hosted on a software as a service (SaaS) platform, such as Atlas. + +The table lists the parameters used to configure a MongoDB deployment. + +| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | +| --- | --- | --- | --- | --- | +| `internal` | `n/a` | Boolean | Specifies the MongoDB deployment either in-cluster or using Mongo Atlas. | Required | +| `databaseUrl` | `mongo-0.mongo,mongo-1.mongo,mongo-2.mongo` | String | URL for MongoDB. Change the URL if you are using Mongo Atlas.| Required| +| `databasePassword` | `""` | String | The base64 encoded MongoDB password. | Optional | +| `replicas` | `3` | Integer | Specifies the number of MongoDB replicas to start.| Required | +| `cpuLimit` | `2000m` | String | Specifies the CPU limit for each MongoDB replica.| Optional | +| `memorylimit` | `4Gi` | String |Specifies the memory limit for each MongoDB replica.| Optional | +| `pvcSize` | `20Gi` | String | Specifies the Persistent Volume Claim (PVC) size for each MongoDB replica.|Optional | +| `storageClass` | `""` | String | Storage class for the PVC. Leave this empty to use the default storage class. |Optional | + + +```yaml +mongo: + databaseUrl: "mongo-0.mongo,mongo-1.mongo,mongo-2.mongo" + replicas: 3 + cpuLimit: "2000m" + memoryLimit: "4Gi" + pvcSize: "20Gi" + storageClass: "" +``` + +### Config + +The configuration file contains the following sections. + +#### SSO + +The table lists parameters to configure SSO SAML authentication in Palette. + +| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | +| --- | --- | --- | --- | --- | +| `saml.enabled` | `false` | Boolean | Specifies whether to enable SSO SAML configuration by setting it to true. | Optional| +| `saml.acsUrlRoot` | `myfirstpalette.spectrocloud.com` | String | Root URL of the Assertion Consumer Service (ACS).| Optional| +| `saml.acsUrlScheme` | `https` | String | URL scheme of the ACS either http or https. | Optional | +| `saml.audienceUrl` | `https://www.spectrocloud.com` | String | URL of the intended audience for the SAML response.| Optional| +| `saml.entityID` | `https://www.spectrocloud.com` | String | Entity ID of the Service Provider.| Optional | +| `saml.apiVersion` | `v1` | String |SSO SAML API version to use.| Optional | + +```yaml +config: + sso: + saml: + enabled: false + acsUrlRoot: "myfirstpalette.spectrocloud.com" + acsUrlScheme: "https" + audienceUrl: "https://www.spectrocloud.com" + entityId: "https://www.spectrocloud.com" + apiVersion: "v1" +``` + +#### Email + +The table lists the parameters to configure email settings in Palette's self-hosted mode. + +| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | +| --- | --- | --- | --- | --- | +| `enabled` | `false` | Boolean | Specifies whether to enable email configuration. | Optional| +| `emailID ` | `""` | String | Email address for sending mail.| Optional| +| `smtpServer` | `smtp.gmail.com` | String | Simple Mail Transfer Protocol (SMTP) server used for sending mail. | Optional | +| `smtpPort` | `587` | Integer | SMTP port used for sending mail.| Optional| +| `insecureSkipVerifyTIs` | `true` | Boolean | Specifies whether to skip Transport Layer Security (TLS) verification for the SMTP connection.| Optional | +| `fromEmailID` | `noreply@spectrocloud.com` | String |Email address of the ***From*** address.| Optional | +| `password` | `""` | String |The base64-encoded SMTP password when sending emails.| Optional | + +```yaml +config: + email: + enabled: false + emailId: "@spectrocloud.com" + smtpServer: "smtp.gmail.com" + smtpPort: 587 + insecureSkipVerifyTls: true + fromEmailId: "noreply@spectrocloud.com" + password: "" +``` + +#### Environment + +The table lists environment variables required to deploy Palette. + +| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | +| --- | --- | --- | --- | --- | +| `env.rootDomain` | `""` | String | Specifies the default Domain Name System (DNS) record mapped to the *ingress-nginx-controller* load balancer. It is required if false. Otherwise, leave it empty. | Required| +| `env.installerMode` | `self-hosted` | String | Specifies the installer mode. Do not modify the value.| Required| +| `env.installerCloud` | `""` | String | Specifies the cloud provider. It should be left empty. | Optional | + +```yaml +config: + env: + rootDomain: "" + installerMode: "self-hosted" + installerCloud: "" +``` + +#### Cluster + +The cluster parameter specifies how the Kubernetes cluster is deployed. + + +| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | +| --- | --- | --- | --- | --- | +| `stableEndpointAccess` | `false` | Boolean | False indicates the Kubernetes cluster is deployed in a private network through a stable private endpoint. True indicates the cluster is deployed through a public endpoint. | Optional| + +```yaml +config: + cluster: + stableEndpointAccess: false +``` + +#### Registry and Palette Artifact Repository (PAR) + +The table lists Registry and Palette Artifact Repository (PAR) parameters to install Palette using Helm Chart. + +| **Parameters** | **Default value** | **Type** | **Description** | **Required/Optional** | +| --- | --- | --- | --- | --- | +| `registry.endpoint` | `""` | String | The endpoint URL for the registry. | Required| +| `registry.name` | `""` | String | The name of the registry. | Required| +| `registry.password` | `""` | String | The password for the registry. | Required| +| `registry.username` | `""` | String | The username for the registry. | Required| +| `scar.endpoint` | `""` | String | The endpoint URL of the PAR. | Required| +| `scar.username` | `""` | String | The username for the PAR. | Required| +| `scar.password` | `""` | String | The password for the PAR. | Required| + +```yaml +config: + registry: + endpoint: "" + name: "" + password: "" + username: "" + + scar: + endpoint: "" + username: "" + password: "" +``` + +Contact support@spectrocloud.com to gain access to the Helm Chart. + +### Network Address Translation (NATS) + +The table lists Network Address Translation (NATS) parameters that Palette uses for communication between the tenant and management clusters. The internal flag determines whether NATS uses a new load balancer or the existing ingress service. To learn about NATS cluster configuration map properties, refer to [NATS clustering configuration.](https://docs.nats.io/running-a-nats-service/configuration/clustering/cluster_config) + +| **Parameters ** | **Default Value** | **Type ** | **Description** | **Required/Optional** | +| ------------ | ------------- | ------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| `internal` | `true` | Boolean | `true` means NATS shares the ingress load balancer or uses hostNetwork. `false` means a cloud load balancer is used. | Optional | +| `natsUrl` | `""` | String | Comma-separated list of mappings for NATS load balancer service. Required if `nats.internal` is false. | Required | +| `annotations`| `{}` | Map | A map of key-value pairs that specify the load balancer annotations for NATS. These annotations vary depending on the cloud provider. | Optional | +| `routes` | `[]` | List | List of server URLs for clustering (excluding self-routes) that can include authentication via token or username/password in the URL. | Optional | +| `natsStaticIP`| `""` | String | Static IP for the NATS load balancer service. If empty, a dynamic IP address will be generated. | Optional | + +```yaml +nats: + internal: true + natsUrl: "" + annotations: {} + routes: [] + natsStaticIP: "" +``` + +### Ingress + +The table lists parameters used to configure the NGINX Ingress Controller, which provides an external HTTP load balancer for Kubernetes services. Refer to [Set Up Ingress](../clusters/cluster-groups/ingress-cluster-group.md) for more guidance. + +| **Parameters** | **Default Value** | **Type** | **Description** | **Required/Optional** | +|--------------------------------|---------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------|--------------------| +| `Internal` | `false` | Boolean |Specify whether to use a cloud load balancer or host network. | Required | +| `certificate` | `""` | String | Default SSL certificate for NGINX Ingress Controller. If left blank, the NGINX Ingress Controller will generate a self-signed certificate. | Optional | +| `key` | `""` | String | Default SSL key for the NGINX Ingress Controller. | Optional | +| `annotations` | `{}` | Map | A map of key-value pairs that specifies load balancer annotations for ingress. | Optional | +| `ingressStaticIP` | `""` | String | Static IP for the ingress load balancer service. If empty, a dynamic IP address will be generated. | Optional | +| `terminateHTTPSAtLoadBalancer` | `false` | Boolean | Specify whether to terminate HTTPS at the load balancer. | Optional | + +```yaml +ingress: + ingress: + internal: false + certificate: "" + key: "" + annotations: {} + ingressStaticIP: "" + terminateHTTPSAtLoadBalancer: false +``` + +### Spectro Proxy + +The table lists parameters to configure the Spectro server-side proxy. + +| **Parameters** | **Default Value** | **Type** | **Description** | **Required/Optional** | +|---------------------|------------------------------|---------|---------------------------------------------------------------|--------------------| +| `enabled` | `false` | Boolean | Specifies whether Spectro Proxy is enabled or not. | Optional | +| `frpHostURL` | `proxy.sample.spectrocloud.com` | String | The URL of the Spectro proxy host. | Optional | +| `server.crt` | `"LS0..."` | String | Specifies the certificate file for the Spectro Proxy server. | Optional | +| `server.key` | `"LS0..."` | String | Specifies the private key file for the Spectro Proxy server. | Optional | +| `ca` | `"LS0..."` | String | Specifies the Certificate Authority (CA) for the Spectro Proxy server. | Optional | +| `ca.crt` | `"LS0..."` | String | Specifies the CA certificate file for the Spectro Proxy server. | Optional | + +```yaml +frps: + frps: + enabled: false + frpHostURL: proxy.sample.spectrocloud.com + server: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURqekNDQW5lZ0F3SUJBZ0lVZTVMdXBBZGljd0Z1SFJpWWMyWEgzNTFEUzJJd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tERW1NQ1FHQTFVRUF3d2RjSEp2ZUhrdWMyRnRjR3hsTG5Od1pXTjBjbTlqYkc5MVpDNWpiMjB3SGhjTgpNakl4TURFME1UTXlOREV5V2hjTk1qY3hNREV6TVRNeU5ERXlXakI3TVFzd0NRWURWUVFHRXdKVlV6RUxNQWtHCkExVUVDQk1DUTBFeEV6QVJCZ05WQkFjVENsTmhiblJoUTJ4aGNtRXhGVEFUQmdOVkJBb1RERk53WldOMGNtOUQKYkc5MVpERUxNQWtHQTFVRUN4TUNTVlF4SmpBa0JnTlZCQU1USFhCeWIzaDVMbk5oYlhCc1pTNXpjR1ZqZEhKdgpZMnh2ZFdRdVkyOXRNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXd5bEt3MmlxClBXM2JrQU0wV3RhaEFLbEppcWFHd05LUDVRRTZ6ZW5NM2FURko3TjIwN0dWcUNGYzJHTDNodmNhTDFranZjeEkKK2lybHpkbm9hcVhUSmV3ZkJiTGs2SGVhZmdXUVp3NHNNeE5QRUVYYlNXYm54Mm03Y2FlbVJiUWZSQWhPWXRvWgpIWG1IMzQ1Q25mNjF0RnhMeEEzb0JRNm1yb0JMVXNOOUh2WWFzeGE5QUFmZUNNZm5sYWVBWE9CVmROalJTN1VzCkN5NmlSRXpEWFgvem1nOG5WWFUwemlrcXdoS3pqSlBJd2FQa2ViaXVSdUJYdEZ0VlQwQmFzS3VqbURzd0lsRFQKVmR4SHRRQUVyUmM4Q2Nhb20yUkpZbTd1aHNEYlo2WVFzS3JiMmhIbU5rNENVWUd5eUJPZnBwbzR2bFd1S2FEcgpsVFNYUXlPN0M0ejM1d0lEQVFBQm8xNHdYREJhQmdOVkhSRUVVekJSZ2dsc2IyTmhiR2h2YzNTSEJIOEFBQUdDCkhYQnliM2g1TG5OaGJYQnNaUzV6Y0dWamRISnZZMnh2ZFdRdVkyOXRnaDhxTG5CeWIzaDVMbk5oYlhCc1pTNXoKY0dWamRISnZZMnh2ZFdRdVkyOXRNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUEvRFJFVm54SWJRdi9uMDEvSQpJd1d0ekhKNGNHOUp6UlB6dmszNUcvRGJOVzZYZ0M3djBoWlFIVHg5bzMrckxoSUFiWTNmbjc1VEtlN3hMRWpiCkI3M3pGWURJSStkYzM5NkQzZU51M2NxRGIvY01kYmlFalhod2ttZk9NRm9qMnpOdHJIdzFsSjA0QlNFMWw1YWgKMDk0Vy9aaEQ2YTVLU3B0cDh1YUpKVmNrejRYMEdRWjVPYjZadGdxZVVxNytqWVZOZ0tLQzJCMW1SNjMyMDNsZwozVFZmZEkrdmI3b292dVdOOFRBVG9qdXNuS25WMmRMeTFBOWViWXYwMEM3WWZ6Q0NhODgrN2dzTGhJaUJjRHBPClJkWjU3QStKanJmSU5IYy9vNm5YWFhDZ2h2YkFwUVk1QnFnMWIzYUpUZERNWThUY0hoQVVaQzB5eU04bXcwMnQKWHRRQwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBd3lsS3cyaXFQVzNia0FNMFd0YWhBS2xKaXFhR3dOS1A1UUU2emVuTTNhVEZKN04yCjA3R1ZxQ0ZjMkdMM2h2Y2FMMWtqdmN4SStpcmx6ZG5vYXFYVEpld2ZCYkxrNkhlYWZnV1FadzRzTXhOUEVFWGIKU1dibngybTdjYWVtUmJRZlJBaE9ZdG9aSFhtSDM0NUNuZjYxdEZ4THhBM29CUTZtcm9CTFVzTjlIdllhc3hhOQpBQWZlQ01mbmxhZUFYT0JWZE5qUlM3VXNDeTZpUkV6RFhYL3ptZzhuVlhVMHppa3F3aEt6akpQSXdhUGtlYml1ClJ1Qlh0RnRWVDBCYXNLdWptRHN3SWxEVFZkeEh0UUFFclJjOENjYW9tMlJKWW03dWhzRGJaNllRc0tyYjJoSG0KTms0Q1VZR3l5Qk9mcHBvNHZsV3VLYURybFRTWFF5TzdDNHozNXdJREFRQUJBb0lCQUFPVVZFeTFOTG9mczdFMgpmZFZVcm10R3I1U2RiVWRJRlYrTDREbzZtWWxQSmxhT0VoWGI0ZlROZDloNEtEWVBmaWwwSnhXcUU0U1RHTmZuCnNUMlRnUVhuQ01LZi8xYk1Lc2M0N3VjVStYYU9XaHJnVFI5UmhkckFjN0duODRLL3hQc0ljL2VZTEhHLzh1QUUKeWUvLzVmRkM2QmpXY0hUM1NkTlZnd3duamJudG5XTXIzTFJBVnJBamZBckxveWUwS0F2YytYdXJLTEVCcmMyVQpjaHlDbitZemJKN0VlSG44UXdQNGdBNXVSK0NCMFJPeFErYXIzS3M5YUhkZTQ1OEVNNEtLMnpUOXA4RWZRc1lFCkFtNUpxWjliR0JEVHV1dEkyNm9GK0pLQ1IzZzhXNERRcHVYRUZoVjlya0pMSm13RDhQb0JaclF6UzZvdmJhdkkKRk42QVM4RUNnWUVBOEcxQzFxZVh4dTQ4aEYxak5MTCswRmxkeWdFem9SMmFoRGJCai8weUZkQVVjU2pYTzk0NAozN1dORTBUUG10WG1Vc3NZTlBTR21XaWI2OUhicEFoMTY3SWVwNE9LaVlZdkozYm1oUC9WNzFvK3M0SWJlSHh1CkVJbWVVckFOZWRoQURVQnZ4c1lXRWxlVlVJSFFRcjY1VHM2ZjIrWkpTKzg4TU05bUorL3BmcmNDZ1lFQXo4MXgKR3JiSE5oak56RjhZMjhiK0hMNW5rdDR0SUdkU3hnbW9PMFFJeGkrQVNZTzB0WW42VFk0ZHI5ZXErMzE3b21ZawpMbDNtNENORDhudG1vYzRvWnM4SUpDQ0IrZjNqcTY4OHdoQU9vVHZ4dDhjZVJqOFRhRHl1SHZwS043OVNsVVd2CjBJd2ZRNDNIemd3SWJiSWhjcTRJVGswanI0VHdWbThia283VElGRUNnWUJoNnUzVXhHN0JHeGZVaE1BNW4waSsKREJkeGhPbkZEV3gzdW1FOHhrN1dxV2NaNnhzMWk3eTRCNVhNS2pNdkNUeURyYWxQTCtOOXFTZ1BjK216TmFybwo4aU1mOENmRStMeE5vMVFoQ0p6Vm5YaDUzVnhZeHJ5QXlidU1TNTFCYVh3MHFYQ2NrT0krV0NNOHBaSHZEUVFsCmYydUZ3SlZMY3NTZDBHbjNpL01ab3dLQmdBY1BzUjg2Uk15MnpROTd6OGx3R3FSNVorV2F2U2ZUdXdGVnhLeTIKNUNGdjdja1J1NnRMbEFEY3FtK1dRWTRvTm5KUFREMXpIV3hTWm5XdjhjM2Z4b212MFZRQThzbSs4ZVNjb05EcgpZTVBqMkpQcEpVTTMwMzRBU2Q1dG5PWUdEMVZaTjk4N1U3aWs4Ynd6dG5tYnl2MHRvc1NlWkc4TGNtdE5mVDllCnNSZnhBb0dCQUpTV1lDellyTlRMNnRUSnh5M2FqWm5jZkxrMEV0eWNCd05FRXZHVzVSVE9LOUFYTE96RzN0eHUKajZqWlRpaUFRU09aaVd0clJHU0U0bEkyQ1MvcjNjd3VuSGlnZlovd1dKZldkZ0JpRnZqOTVFbUVQWUZaRDRobQpkT3l5UHhRRXFTRmprQ21BS2plOFBpTDdpU01GbGhBZTZQWFljQlExdCtzd01UeXBnY3RrCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== + ca: + crt : LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURNVENDQWhtZ0F3SUJBZ0lVSHhWK0ljVGZHUElzdW8yY3dqQ0Q0Z2RSTFFRd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0tERW1NQ1FHQTFVRUF3d2RjSEp2ZUhrdWMyRnRjR3hsTG5Od1pXTjBjbTlqYkc5MVpDNWpiMjB3SGhjTgpNakl4TURFME1UTXlOREV5V2hjTk16WXdOakl5TVRNeU5ERXlXakFvTVNZd0pBWURWUVFEREIxd2NtOTRlUzV6CllXMXdiR1V1YzNCbFkzUnliMk5zYjNWa0xtTnZiVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBSy90WXBHVi9HRURUWnZzL25QQ2lOK0U3K1dOQ21GeU1NQjdkazVOT3JzQWZIaVVvZ1JRVUo0WQptSjhwVmYrSzhTRFBsdGNYcW40WVVTbmxiUERsVlBkWU5zOTEwT3RaS1EwNW96aUtGV2pNbS85NHlLSjVyVzNsCndDNEN0ayttUm9Ib0ZQQS81dmFVbVZHdlVadjlGY0JuL0pKN2F4WnRIQk1PRiticXQ0Zmd0ci9YMWdOeWhPVzUKZTVScGpESkozRjJTVnc5NUpBQSt4a3V3UitFSmVseEtnQVpxdDc0ejB4U2ROODZ0QzNtK0wxRGs2WVVlQWEzZApvM3Rsa3ZkeDV6dUJvSmI2QmpZWEV4UE1PbThRcHFNVWRLK3lDZUdrem9XQStDOUtFdGtVaERCWktENStNWXRZCktVMUh1RXJCbmw2Z3BuWTRlbzJjVTRxdkNwZzZ4S3NDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRklKMkRkTjgKc2ZtVjRCT1ZFL0FjZ0VEejArNmlNQjhHQTFVZEl3UVlNQmFBRklKMkRkTjhzZm1WNEJPVkUvQWNnRUR6MCs2aQpNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQWhQVi9RMVl1YWVTOTZVCmhjVGQ4RWdJaHhpbHFiTWlTQm5WaVdrdlJzWk94UUIwNTFScWtwT3g0UTRsckdaOGVJWWc3T0trTTdzejhuTVQKL2pxS21sZDY0MzJCcURCMlNkNVp5ZFdReHAwU1laRTlnVWszYk9KRGtZVXQ4b1cvZDBWeG9uU05LQVN3QmZKaApWV1VZUUlpNm55K0ZZZmtuRFNvRnFlY2Z3SDBQQVUraXpnMkI3KzFkbko5YisyQ21IOUVCallOZ2hoNlFzVlFQCkh2SkdQQURtandPNkJOam5HK0Z3K0Z6cmFXUTNCTjAwb08zUjF6UmgxZERmTTQzR3oxRmZGRW5GSXI5aGFuUnQKWHJFZm8vZWU5bjBLWUFESEJnV1g4dlhuNHZrRmdWRjgwYW9MUUJSQTBxWXErcW1pVlp6YnREeE9ldFEyRWFyTQpyNmVWL0lZPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== +``` + +### UI System + +The table lists parameters for the Network Operations Center User Interface (NOC UI). Palette's NOC UI enables easy location monitoring of multi-location clusters through an intuitive UI. + +| **Parameters ** | **Default Value** | **Type** | **Description** | **Required/Optional** | +|---------------------|---------------|---------|------------------------------------------------------|--------------------| +| `enabled` | `false` | Boolean | Specifies whether to enable the Palette Network Operations Center (NOC) UI. Enabling this parameter requires the `ui.nocUI.mapBoxAccessToken`. Once enabled, all cluster locations will be reported to MapBox. | Optional | +| `mapBoxAccessToken` | `""` | String | Access token for the MapBox API. | Optional | +| `mapBoxStyledLayerID`| `""` | String | ID for the MapBox style layer. | Optional | + +```yaml +ui-system: + ui: + nocUI: + enable: false + mapBoxAccessToken: "" + mapBoxStyledLayerID: "" +``` + + + + diff --git a/docs/docs-content/enterprise-version/monitoring.md b/docs/docs-content/enterprise-version/monitoring.md new file mode 100644 index 0000000000..55020a9dc0 --- /dev/null +++ b/docs/docs-content/enterprise-version/monitoring.md @@ -0,0 +1,56 @@ +--- +sidebar_label: "Cluster Monitoring Metrics" +title: "Enterprise Cluster Monitoring Metrics" +description: "Enterprise Cluster Monitoring Metrics for Palette's Enterprise (on-premises) variant." +icon: "" +hide_table_of_contents: false +sidebar_position: 60 +tags: ["self-hosted", "enterprise", "monitoring"] +--- + +## Pods Monitoring Metrics +### Namespaces to Monitor Pods + +|**Namespaces** |**Interpretation**| +|-----------|--------------| +|**ui-system** |Palette Management UI| +|**cp-system** |System Management UI| +|**nats-system**| Message System| +|**ingress-nginx**| Ingress services| +|**hubble-system**|Core backend services| +|**jet-system**|Pivot Tenant Clusters| + +### Exceptions + +The below pods are dynamically created from jobs and can be excluded from monitoring. + + +|**Pods Prefix** |**Namespace**| +|-----------|--------------| +|ingress-nginx-admission-patch- |ingress-nginx| +|ingress-nginx-admission-create- |ingress-nginx| +|packsync- |hubble-system| +|cleanup- |hubble-system| + + + +## CPU and Memory Monitoring Metrics + +### Default Specifications +* CPU: 4 vCPU +* RAM: 8 GB RAM +* CP Nodes: 3 + +### Thresholds +* CPU warn [per node ] > 70% +* CPU alert [per node] > 80% +* Memory Warn [per node] > 80% +* Memory Alert [per node] > 90% + +### Node Monitoring Metrics + #### Number of Nodes: 3 + #### Node Alerts +* Node up +* Node down +* Node unreachable + diff --git a/docs/docs-content/enterprise-version/on-prem-system-requirements.md b/docs/docs-content/enterprise-version/on-prem-system-requirements.md new file mode 100644 index 0000000000..7e1a6b532f --- /dev/null +++ b/docs/docs-content/enterprise-version/on-prem-system-requirements.md @@ -0,0 +1,850 @@ +--- +sidebar_label: "System Requirements" +title: "System Requirements" +description: "An overview of the self-hosted Palette system requirements." +icon: "" +hide_table_of_contents: false +toc_min_heading_level: 2 +toc_max_heading_level: 3 +sidebar_position: 0 +tags: ["self-hosted", "enterprise"] +--- + + + +## System Requirements + +Palette is available as a self-hosted application that you install in your environment. The self-hosted version is a dedicated Palette environment hosted on VMware instances or in an existing Kubernetes cluster. Self-hosted Palette is available in the following three modes: + +| **Self-Hosted Modes** | **Description** | +| --------------------- | --------------------------------------------------------------------------------- | +| **VMWare Enterprise Mode** | A multi-node, highly available version for production purposes. | +| **VMWare Quick Start Mode** | A single VM deployment of the platform that is ideal for use in Proofs of Concept (PoCs). | +| **Helm Chart Mode** | Install Palette in an existing Kubernetes cluster using a Helm Chart. | + +The next sections describe specific requirements for all modes. + +
+ +## Prerequisites + +The following are prerequisites for deploying a Kubernetes cluster in VMware: +* vSphere version 7.0 or above. vSphere 6.7 is supported but not recommended as it reached end of general support in 2022. + + +* Configuration Requirements - A Resource Pool needs to be configured across the hosts, onto which the workload clusters will be provisioned. Every host in the Resource Pool will need access to shared storage, such as vSAN, to use high-availability control planes. Network Time Protocol (NTP) must be configured on each ESXi host. + + +* You need an active vCenter account with all the permissions listed below in the VMware Cloud Account Permissions section. + + +* Install a Private Cloud Gateway for VMware as described in the Creating a VMware Cloud Gateway section. Installing the Private Cloud Gateway automatically registers a cloud account for VMware in Palette. You can register additional VMware cloud accounts in Palette as described in the Creating a VMware Cloud account section. + +* Kubernetes version 1.19 minimum when installing Palette in a cluster using a Helm Chart. We recommend using managed Kubernetes, such as Amazon EKS and Azure EKS. + +* Subnet with egress access to the internet (direct or via proxy): + * For proxy: HTTP_PROXY, HTTPS_PROXY (both are required). + * Outgoing internet connection on port 443 to api.spectrocloud.com. + + +* The Private cloud gateway IP requirements are: + * One (1) node - one (1) IP or three (3) nodes - three (3) IPs. + * One (1) Kubernetes control-plane VIP. + * One (1) Kubernetes control-plane extra. + + +* Assign IPs for application workload services (e.g., Load Balancer services). + + +* A DNS to resolve public internet names (e.g., api.spectrocloud.com). + + +* Shared Storage between vSphere hosts. + + +* A cluster profile created in Palette for VMware. + + +* Zone Tagging: A dynamic storage allocation for persistent storage. + + +### Zone Tagging + + Zone tagging is required for dynamic storage allocation, across fault domains, when provisioning workloads that require persistent storage. This is required for the installation of the Palette platform itself and is also useful for Workloads deployed in the Tenant Clusters, if they have persistent storage needs. Use vSphere tags on data centers (kubernetes-region) and compute clusters (kubernetes-zone) to create distinct zones in your environment. + + As an example, assume your vCenter environment includes three compute clusters: *cluster-1*, *cluster-2*, and *cluster-3* as part of data center dc-1. You can tag them as follows: + +| **vSphere Object** | **Tag Category** | **Tag Value** | +| ------------------ | ---------------- | ------------- | +| dc-1 | k8s-region | region1 | +| cluster-1 | k8s-zone | az1 | +| cluster-2 | k8s-zone | az2 | +| cluster-3 | k8s-zone | az3 | + + +:::info + +The exact values for the kubernetes-region and kubernetes-zone tags can be different from the ones described in the example above, as long as these are unique. + +::: + +
+ +### Tag Requirements +The following points needs to be taken care while creating the Tags: +* A valid tag must consist of alphanumeric characters +* The tag must start and end with an alphanumeric characters +* The regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?') + +**Example Tags:** +* MyValue +* my_value +* 12345 + + + + + +## VMware Privileges + +The vSphere user account that is deploying Palette must have the following minimum vSphere privileges. The **Administrator** role provides super-user access to all vSphere objects. For users without the **Administrator** role, one or more custom roles can be created based on the tasks being performed by the user. +Permissions and privilieges vary depending on the vSphere version you are using. + +Select the tab that corresponds with your vSphere versions. + +
+ + + + + + +#### Root-Level Role Privileges + +
+ +The root-level role privileges are applied to root object and Datacenter objects only. + +|**vSphere Object**|**Privileges**| +|---------------|----------| +|**Cns**|Searchable| +|**Datastore**|Browse datastore +|**Host**|Configuration +|| Storage partition configuration +|**vSphere** **Tagging**|Create vSphere Tag| +||Edit vSphere Tag| +|**Network**|Assign network| +|**Sessions**|Validate session| +|**VM Storage Policies**|View VM storage policies| +|**Storage views**|View| + +
+ +#### Spectro Role Privileges + + + + + + +##### Cns Privileges + - Searchable + + + + + +##### Datastore Privileges + - Allocate Space + - Browse Datastore + - Low level file operations + - Remove file + - Update virtual machine files + - Update virtual machine metadata + + + + + + + ##### Folder Privileges + - Create folder + - Delete folder + - Move folder + - Rename folder + + + + + + #### Host Privileges + - Local Operations + * Reconfigure virtual machine + + + + + +
+ +:::info + +If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” needs to be provided. + +::: + +
+ + #### Network Privileges + + - Assign Network + +
+ + + + #### Resource Privileges + + - Apply recommendation + - Assign virtual machine to resource pool + - Migrate powered off virtual machine + - Migrate powered on virtual machine + - Query vMotion + + + + + + #### Sessions Privileges + - Validate session + + + + + + #### VM Storage Policies Privileges + + - View access for VM storage policies is required. Ensure the privilege `StorageProfile.View` is available. Refer to the [VM Storage Policies Privileges](https://docs.vmware.com/en/VMware-vSphere/8.0/vsphere-security/GUID-DECEAE60-58CB-4B30-8874-FA273573E6B5.html) resource to learn more. + + + + + + #### Storage Views Privileges + - View + + + + + + + #### Task Privileges + + - Create task + - Update task + + + + + + #### vApp Privileges + + - Import + - View OVF environment + - vApp application configuration + - vApp instance configuration + + + + + + #### vSphere Tagging + + - Create vSphere Tag + - Edit vSphere Tag + + + + + + + #### Virtual Machines Privileges + + +
+ +| | | | +| ------------------------- | ------------------------------------------- | ------------------------------------- | +| **Change Configuration** | | | +| | Change Settings | Extend virtual disk | +| | Change Swapfile Placement | Modify device settings | +| | Configure host USB device | Query Fault Tolerance compatibility | +| | Configure raw device | Query unowned files | +| | Add existing disk | Reload from path | +| | Add new disk | Remove disk | +| | Add or remove device | Rename | +| | Change resource | Reset guest information | +| | Configure managedBy | Set annotation | +| | Display connection settings | Toggle fork parent | +| | Advanced configuration | Upgrade virtual machine compatibility | +| | Change CPU count | | +| **Guest operations** | | | +| | Guest operation alias modification | Guest operation alias query | +| | Guest operation modifications | Guest operation queries | +| | Guest operation program execution | | +| **Interaction** | | | +| | Power off | Power on | +| **Inventory** | | | +| | Create from existing | Move | +| | Create new | Remove | +| **Provisioning** | | | +| | Allow disk access | Customize guest | +| | Allow file access | Deploy template | +| | Allow read-only disk access | Mark as template | +| | Allow virtual machine download | Mark as virtual machine | +| | Allow virtual machine files upload | Modify customization specification | +| | Clone template | Promote disks | +| | Clone virtual machine | Read customization specifications | +| | Create template from virtual machine | | +| **Service Configuration** | | | +| | Allow notifications | Modify service configuration | +| | Allow polling of global event notifications | Query service configurations | +| | Manage service configurations | Read service configuration | +| **Snapshot Management** | | | +| | Create snapshot | Remove snapshot | +| | Rename snapshot | Revert to snapshot | +| **vSphere Replication** | | | +| | Configure replication | Monitor replication | +| | Monitor replication | | + + +
+ + + + #### vSAN + + - Cluster + * ShallowRekey + + + +
+ +
+ + + + +#### Root-Level Role Privileges + +
+ +The root-level role privileges are applied to root object and Datacenter objects only. + +|**vSphere Object**|**Privileges**| +|---------------|----------| +|**Cns**|Searchable| +|**Datastore**|Browse datastore +|**Host**|Configuration +|| Storage partition configuration +|**vSphere** **Tagging**|Create vSphere Tag| +||Edit vSphere Tag| +|**Network**|Assign network| +|**Sessions**|Validate session| +|**Profile-driven storage**|Profile-driven storage view| +|**Storage views**|View| + +
+ +#### Spectro Role Privileges + + + + + + +#### Cns Privileges + - Searchable + + + + + +#### Datastore Privileges + - Allocate Space + - Browse Datastore + - Low level file operations + - Remove file + - Update virtual machine files + - Update virtual machine metadata + + + + + + + #### Folder Privileges + - Create folder + - Delete folder + - Move folder + - Rename folder + + + + + + #### Host Privileges + - Local Operations + * Reconfigure virtual machine + + + + + +
+ + +:::info + +If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” needs to be provided. + +::: + + #### Network Privileges + + - Assign Network + +
+ + + + #### Resource Privileges + + - Apply recommendation + - Assign virtual machine to resource pool + - Migrate powered off virtual machine + - Migrate powered on virtual machine + - Query vMotion + + + + + + #### Sessions Privileges + - Validate session + + + + + + #### Profile Driven Storage + - Profile-driven storage view + + + + + + #### Storage Views Privileges + - View + + + + + + + #### Task Privileges + + - Create task + - Update task + + + + + + #### vApp Privileges + + - Import + - View OVF environment + - vApp application configuration + - vApp instance configuration + + + + + + #### vSphere Tagging + + - Create vSphere Tag + - Edit vSphere Tag + + + + + + + #### Virtual Machines Privileges + + +
+ +| | | | +| ------------------------- | ------------------------------------------- | ------------------------------------- | +| **Change Configuration** | | | +| | Change Settings | Extend virtual disk | +| | Change Swapfile Placement | Modify device settings | +| | Configure host USB device | Query Fault Tolerance compatibility | +| | Configure raw device | Query unowned files | +| | Add existing disk | Reload from path | +| | Add new disk | Remove disk | +| | Add or remove device | Rename | +| | Change resource | Reset guest information | +| | Configure managedBy | Set annotation | +| | Display connection settings | Toggle fork parent | +| | Advanced configuration | Upgrade virtual machine compatibility | +| | Change CPU count | | +| **Guest operations** | | | +| | Guest operation alias modification | Guest operation alias query | +| | Guest operation modifications | Guest operation queries | +| | Guest operation program execution | | +| **Interaction** | | | +| | Power off | Power on | +| **Inventory** | | | +| | Create from existing | Move | +| | Create new | Remove | +| **Provisioning** | | | +| | Allow disk access | Customize guest | +| | Allow file access | Deploy template | +| | Allow read-only disk access | Mark as template | +| | Allow virtual machine download | Mark as virtual machine | +| | Allow virtual machine files upload | Modify customization specification | +| | Clone template | Promote disks | +| | Clone virtual machine | Read customization specifications | +| | Create template from virtual machine | | +| **Service Configuration** | | | +| | Allow notifications | Modify service configuration | +| | Allow polling of global event notifications | Query service configurations | +| | Manage service configurations | Read service configuration | +| **Snapshot Management** | | | +| | Create snapshot | Remove snapshot | +| | Rename snapshot | Revert to snapshot | +| **vSphere Replication** | | | +| | Configure replication | Monitor replication | +| | Monitor replication | | + + +
+ + + + #### vSAN + + - Cluster + * ShallowRekey + + + +
+ + + +
+ + + + +#### Root-Level Role Privileges + +
+ +The root-level role privileges are applied to root object and Datacenter objects only. + +|**vSphere Object**|**Privileges**| +|---------------|----------| +|**Cns**|Searchable| +|**Datastore**|Browse datastore +|**Host**|Configuration +|| Storage partition configuration +|**vSphere** **Tagging**|Create vSphere Tag| +||Edit vSphere Tag| +|**Network**|Assign network| +|**Sessions**|Validate session| +|**Profile-driven storage**|Profile-driven storage view| +|**Storage views**|View| + +
+ +#### Spectro Role Privileges + + + + + + +#### Cns Privileges + - Searchable + + + + + +#### Datastore Privileges + - Allocate Space + - Browse Datastore + - Low level file operations + - Remove file + - Update virtual machine files + - Update virtual machine metadata + + + + + + + #### Folder Privileges + - Create folder + - Delete folder + - Move folder + - Rename folder + + + + + + #### Host Privileges + - Local Operations + * Reconfigure virtual machine + + + + + +
+ +:::info + +If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” needs to be provided. + +::: + + #### Network Privileges + + - Assign Network + +
+ + + + #### Resource Privileges + + - Apply recommendation + - Assign virtual machine to resource pool + - Migrate powered off virtual machine + - Migrate powered on virtual machine + - Query vMotion + + + + + + #### Sessions Privileges + - Validate session + + + + + + #### Profile Driven Storage + - Profile-driven storage view + + + + + + #### Storage Views Privileges + - View + + + + + + + #### Task Privileges + + - Create task + - Update task + + + + + + #### vApp Privileges + + - Import + - View OVF environment + - vApp application configuration + - vApp instance configuration + + + + + + #### vSphere Tagging + + - Create vSphere Tag + - Edit vSphere Tag + + + + + + + #### Virtual Machines Privileges + + +
+ +| | | | +| ------------------------- | ------------------------------------------- | ------------------------------------- | +| **Change Configuration** | | | +| | Change Settings | Extend virtual disk | +| | Change Swapfile Placement | Modify device settings | +| | Configure host USB device | Query Fault Tolerance compatibility | +| | Configure raw device | Query unowned files | +| | Add existing disk | Reload from path | +| | Add new disk | Remove disk | +| | Add or remove device | Rename | +| | Change resource | Reset guest information | +| | Configure managedBy | Set annotation | +| | Display connection settings | Toggle fork parent | +| | Advanced configuration | Upgrade virtual machine compatibility | +| | Change CPU count | | +| **Guest operations** | | | +| | Guest operation alias modification | Guest operation alias query | +| | Guest operation modifications | Guest operation queries | +| | Guest operation program execution | | +| **Interaction** | | | +| | Power off | Power on | +| **Inventory** | | | +| | Create from existing | Move | +| | Create new | Remove | +| **Provisioning** | | | +| | Allow disk access | Customize guest | +| | Allow file access | Deploy template | +| | Allow read-only disk access | Mark as template | +| | Allow virtual machine download | Mark as virtual machine | +| | Allow virtual machine files upload | Modify customization specification | +| | Clone template | Promote disks | +| | Clone virtual machine | Read customization specifications | +| | Create template from virtual machine | | +| **Service Configuration** | | | +| | Allow notifications | Modify service configuration | +| | Allow polling of global event notifications | Query service configurations | +| | Manage service configurations | Read service configuration | +| **Snapshot Management** | | | +| | Create snapshot | Remove snapshot | +| | Rename snapshot | Revert to snapshot | +| **vSphere Replication** | | | +| | Configure replication | Monitor replication | +| | Monitor replication | | + + +
+ + + + #### vSAN + + - Cluster + * ShallowRekey + + + +
+ + + + +
+
+ + +
+ + +--- + +## Network Requirements + +* Outgoing access from the platform VMs to the internet either directly or via a proxy. + + +* An IP Address (static or DHCP) for the quick start virtual machine (also used as an installer for enterprise version). + + +* A block of five (5) IP addresses reserved for an enterprise cluster: One IP address for each of the three enterprise cluster VMs, an IP to be used as a VIP, and an additional IP reserved for rolling upgrades. + + +* Interconnectivity across all the three (3) VMs on all ports. + + +* Connectivity from the Virtual Machines to the vCenter. + + +:::info +Ensure your data center CIDR IP address does not overlap with the Kubernetes PodCIDR range. During installation, you can change the Kubernetes PodCIDR range settings. +::: + + +## Proxy Requirements +* If a proxy is used for outgoing connections, it must support both HTTPS and HTTP traffic. All Palette components communicate over HTTPS by default. An HTTP proxy can be used when HTTP is the only supported protocol, such as connecting to a private image registry that only supports HTTP. + +* Connectivity to all [Proxy Whitelist](../architecture/palette-public-ips.md#palette-domains) domains must be allowed + + +## Self-Hosted Configuration + +This section lists resource requirements for Palette VerteX for various capacity levels. In Palette VerteX, the terms *small*, *medium*, and *large* are used to describe the instance size of worker pools that Palette VerteX is installed on. The following table lists the resource requirements for each size. + + +
+ +:::caution + +The recommended maximum number of deployed nodes and clusters in the environment should not be exceeded. We have tested the performance of Palette VerteX with the recommended maximum number of deployed nodes and clusters. Exceeding these limits can negatively impact performance and result in instability. The active workload limit refers to the maximum number of active nodes and pods at any given time. + +::: + +
+ + + +| **Size** | **Nodes**| **CPU**| **Memory**| **Storage**| **MongoDB Storage Limit**| **MongoDB Memory Limit**| **MongoDB CPU Limit** |**Total Deployed Nodes**| **Deployed Clusters with 10 Nodes**| +|----------|----------|--------|-----------|------------|--------------------|-------------------|------------------|----------------------------|----------------------| +| Small | 3 | 8 | 16 GB | 60 GB | 20 GB | 4 GB | 2 | 1000 | 100 | +| Medium (Recommended) | 3 | 16 | 32 GB | 100 GB | 60 GB | 8 GB | 4 | 3000 | 300 | +| Large | 3 | 32 | 64 GB | 120 GB | 80 GB | 12 GB | 6 | 5000 | 500 | + + +#### Instance Sizing + +| **Configuration** | **Active Workload Limit** | +|---------------------|---------------------------------------------------| +| Small | Up to 1000 Nodes each with 30 Pods (30,000 Pods) | +| Medium (Recommended) | Up to 3000 Nodes each with 30 Pods (90,000 Pods)| +| Large | Up to 5000 Nodes each with 30 Pods (150,000 Pods) | + +
+ + +## Best Practices + +The following steps are optional but recommended for production environments. + + +| | | +| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| **DNS Mapping** | A DNS is used to access the Palette Management Console. While the Virtual IP Address (VIP), configured on the platform can be used
to access the platform, it is recommended that you reserve a DNS for this purpose and map it to the VIP after installation. | +| **SMTP Setting**s | Configure the SMTP settings to enable the Palette platform to send out email notifications. Email notifications are sent out to new
users, when they are initially onboarded onto the platform, so they can activate their accounts and reset their password at a later time. | +| **Trusted Certificate** | Configure your platform with a trusted CA certificates. | +| **FTP Location for backups** | Configure an FTP location for platform backups and schedule daily backups. | \ No newline at end of file diff --git a/docs/docs-content/enterprise-version/reverse-proxy.md b/docs/docs-content/enterprise-version/reverse-proxy.md new file mode 100644 index 0000000000..438fc5311e --- /dev/null +++ b/docs/docs-content/enterprise-version/reverse-proxy.md @@ -0,0 +1,252 @@ +--- +sidebar_label: "Configure Reverse Proxy" +title: "Configure Reverse Proxy" +description: "Learn how to configure a reverse proxy for Palette." +icon: "" +hide_table_of_contents: false +sidebar_position: 80 +--- + +You can configure a reverse proxy for Palette. The reverse proxy can be used by host clusters deployed in a private network. Host clusters deployed in a private network are not accessible from the public internet or by users in different networks. You can use a reverse proxy to access the cluster's Kubernetes API server from a different network. + +When you configure reverse proxy server for Palette, clusters that use the [Spectro Proxy pack](../integrations/frp.md) will use the reverse proxy server address in the kubeconfig file. Clusters not using the Spectro Proxy pack will use the default cluster address in the kubeconfig file. + + +Use the following steps to configure a reverse proxy server for Palette. + +## Prerequisites + + +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed and available. + + +- [Helm](https://helm.sh/docs/intro/install/) is installed and available. + + +- Access to the kubeconfig file of the Palette Kubernetes cluster. You can download the kubeconfig file from the Palette system console. Navigate to **Enterprise System Migration**, select the Palette cluster, and click the **Download Kubeconfig** button for the cluster. + + +- A domain name that you can use for the reverse proxy server. You will also need access to the DNS records for the domain so that you can create a CNAME DNS record for the reverse proxy server load balancer. + + +- Ensure you have an SSL certificate that matches the domain name you will assign to Spectro Proxy. You will need this to enable HTTPS encryption for the Spectro Proxy. Contact your network administrator or security team to obtain the SSL certificate. You need the following files: + - x509 SSL certificate file in base64 format + + - x509 SSL certificate key file in base64 format + + - x509 SSL certificate authority file in base64 format + + +- The Spectro Proxy server must have internet access and network connectivity to the private network where the Kubernetes clusters are deployed. + + +## Enablement + +1. Open a terminal session and navigate to the directory where you stored the **values.yaml** for the Palette installation. + + +2. Use a text editor and open the **values.yaml** file. Locate the `frps` section and update the following values in the **values.yaml** file. Refer to the [Spectro Proxy Helm Configuration](helm-chart-install-reference.md#spectro-proxy) to learn more about the configuration options. + + | **Parameter** | **Description** | **Type** | + | --- | --- | ---| + | `enabled`| Set to `true` to enable the Spectro Proxy server. | boolean | + | `frps.frpHostURL`| The domain name you will use for the Spectro Proxy server. For example, `frps.example.com`. | + | `server.crt`| The x509 SSL certificate file in base64 format. | + | `server.key`| The x509 SSL certificate key file in base64 format. | + | `ca.crt`| The x509 SSL certificate authority file in base64 format. | + +
+ + The following is an example of the `frps` section in the **values.yaml** file. The SSL certificate files are truncated for brevity. + +
+ + ```yaml + frps: + frps: + enabled: true + frpHostURL: "frps.palette.example.com" + server: + crt: "LS0tLS1CRU...........tCg==" + key: "LS0tLS1CRU...........tCg==" + ca: + crt : "LS0tLS1CRU...........tCg==" + ``` + + +3. Issue the `helm upgrade` command to update the Palette Kubernetes configuration. The command below assumes you are in the folder that contains the **values.yaml** file and the Palette Helm chart. Change the directory path if needed. + +
+ + ```bash + helm upgrade --values values.yaml hubble spectro-mgmt-plane-0.0.0.tgz --install + ``` + + +4. After the new configurations are accepted, use the following command to get the IP address of the Spectro Proxy server's load balancer. + +
+ + ```bash + kubectl get svc --namespace proxy-system spectro-proxy-svc + ``` +5. Update the DNS records for the domain name you used for the Spectro Proxy server. Create a CNAME record that points to the IP address of the Spectro Proxy server's load balancer. + + +6. Log in to the Palette System API by using the `/v1/auth/syslogin` endpoint. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette, or use the IP address. Ensure you replace the credentials below with your system console credentials. + +
+ + ```bash + curl --insecure --location 'https://palette.example.com/v1/auth/syslogin' \ + --header 'Content-Type: application/json' \ + --data '{ + "password": "**********", + "username": "**********" + }' + ``` + Output + ```json hideClipboard + { + "Authorization": "**********.", + "IsPasswordReset": true + } + ``` + +7. Using the output you received, copy the authorization value to your clipboard and assign it to a shell variable. Replace the authorization value below with the value from the output. + +
+ + ```shell hideClipboard + TOKEN=********** + ``` + +8. Next, prepare a payload for the`/v1/system/config/` endpoint. This endpoint is used to configure Palette to use a reverse proxy. The payload requires the following parameters: + +
+ + | **Parameter** | **Description** | **Type** | + | --- | --- | --- | + | `caCert`| The x509 SSL certificate authority file in base64 format. | string | + | `clientCert`| The x509 SSL certificate file in base64 format. | string | + | `clientKey`| The x509 SSL certificate key file in base64 format. | string | + | `port` | The port number for the reverse proxy server. We recommend using port `443`. | integer | + | `protocol` | The protocol to use for the reverse proxy server. We recommend using `https`. | string | + | `server`| The domain name you will use for the Spectro Proxy server. For example, `frps.example.com`. Do not include the HTTP schema in the value. | string | + + The following is an example payload. The SSL certificate files are truncated for brevity. + +
+ + ```json hideClipboard + { + "caCert": "-----BEGIN CERTIFICATE-----\n.............\n-----END CERTIFICATE-----", + "clientCert": "-----BEGIN CERTIFICATE-----\n..........\n-----END CERTIFICATE-----", + "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n........\n-----END RSA PRIVATE KEY-----", + "port": 443, + "protocol": "https", + "server": "frps.palette.example.com.com" + } + ``` + +
+ + :::info + + You can save the payload to a file and use the `cat` command to read the file contents into the `curl` command. For example, if you save the payload to a file named `payload.json`, you can use the following command to read the file contents into the `curl` command. You can also save the payload as a shell variable and use the variable in the `curl` command. + + ::: + + +
+ +9. Issue a PUT request using the following `curl` command. Replace the URL with the custom domain URL you assigned to Palette or use the IP address. You can use the `TOKEN` variable you created earlier for the authorization header. Ensure you replace the payload below with the payload you created in the previous step. + +
+ + ```bash + curl --insecure --silent --include --output /dev/null -w "%{http_code}" --location --request PUT 'https://palette.example.com/v1/system/config/reverseproxy' \ + --header "Authorization: $TOKEN" \ + --header 'Content-Type: application/json' \ + --data ' { + "caCert": "-----BEGIN CERTIFICATE-----\n................\n-----END CERTIFICATE-----\n", + "clientCert": "-----BEGIN CERTIFICATE-----\n.............\n-----END CERTIFICATE-----", + "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n............\n-----END RSA PRIVATE KEY-----\n", + "port": 443, + "protocol": "https", + "server": "frps.palette.example.com.com" + }' + ``` + + A successful response returns a `204` status code. + + Output + ```shell hideClipboard + 204 + ``` + +You now have a Spectro Proxy server that you can use to access Palette clusters deployed in a different network. Make sure you add the [Spectro Proxy pack](../integrations/frp.md) to the clusters you want to access using the Spectro Proxy server. + + +## Validate + +Use the following command to validate that the Spectro Proxy server is active. + +
+ + + +1. Open a terminal session. + + +2. Log in to the Palette System API by using the `/v1/auth/syslogin` endpoint. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette or use the IP address. Ensure you replace the credentials below with your system console credentials. + +
+ + ```bash + curl --insecure --location 'https://palette.example.com/v1/auth/syslogin' \ + --header 'Content-Type: application/json' \ + --data '{ + "password": "**********", + "username": "**********" + }' + ``` + Output + ```json hideClipboard + { + "Authorization": "**********.", + "IsPasswordReset": true + } + ``` + +3. Using the output you received, copy the authorization value to your clipboard and assign it to a shell variable. Replace the authorization value below with the value from the output. + +
+ + ```shell hideClipboard + TOKEN=********** + ``` + +4. Query the system API endpoint `/v1/system/config/reverseproxy` to verify the current reverse proxy settings applied to Palette. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette, or use the IP address. You can use the `TOKEN` variable you created earlier for the authorization header. + +
+ + ```bash + curl --location --request GET 'https://palette.example.com/v1/system/config/reverseproxy' \ + --header "Authorization: $TOKEN" + ``` + + If the proxy server is configured correctly, you will receive an output similar to the following that contains your settings. The SSL certificate outputs are truncated for brevity. + +
+ + ```json hideClipboard + { + "caCert": "-----BEGIN CERTIFICATE-----\n...............\n-----END CERTIFICATE-----\n", + "clientCert": "-----BEGIN CERTIFICATE-----\n...........\n-----END CERTIFICATE-----", + "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n........\n-----END RSA PRIVATE KEY-----\n", + "port": 443, + "protocol": "https", + "server": "frps.palette.example.com" + } + ``` \ No newline at end of file diff --git a/docs/docs-content/enterprise-version/ssl-certificate-management.md b/docs/docs-content/enterprise-version/ssl-certificate-management.md new file mode 100644 index 0000000000..d4d48c6a74 --- /dev/null +++ b/docs/docs-content/enterprise-version/ssl-certificate-management.md @@ -0,0 +1,81 @@ +--- +sidebar_label: "SSL Certificate Management" +title: "SSL Certificate Management" +description: "Upload and manage SSL certificates in Palette." +icon: "" +hide_table_of_contents: false +sidebar_position: 90 +--- + + +When you install Palette, a self-signed certificate is generated and used by default. You can upload your own SSL certificate to replace the default certificate. + +Palette uses SSL certificates to secure external communication. Palette's internal communication is default secured by default and uses HTTPS. External communication with Palette, such as the system console, gRPC endpoint, and API endpoint, requires you to upload an SSL certificate to enable HTTPS. + + +:::info + +Enabling HTTPS is a non-disruptive operation. You can enable HTTPS at any time without affecting the system's functionality. + +::: + + +## Upload an SSL Certificate + +You can upload an SSL certificate in Palette by using the following steps. + + +### Prerequisites + +- Access to the Palette system console. + + +- You need to have an x509 certificate and a key file in PEM format. The certificate file must contain the full certificate chain. Reach out to your network administrator or security team if you do not have these files. + + +- Ensure the certificate is created for the custom domain name you specified for your Palette installation. If you did not specify a custom domain name, the certificate must be created for the Palette system console's IP address. You can also specify a load balancer's IP address if you are using a load balancer to access Palette. + + +### Enablement + +1. Log in to the Palette system console. + + +2. Navigate to the left **Main Menu** and select **Administration**. + + +3. Select the tab titled **Certificates**. + + +4. Copy and paste the certificate into the **Certificate** field. + + +5. Copy and paste the certificate key into the **Key** field. + + +6. Copy and paste the certificate authority into the **Certificate authority** field. + + +
+ + ![A view of the certificate upload screen](/enterprise-version_ssl-certificate-upload.png) + +
+ +7. Save your changes. + +If the certificate is invalid, you will receive an error message. Once the certificate is uploaded successfully, Palette will refresh its listening ports and start using the new certificate. + + +### Validate + +You can validate that your certificate is uploaded correctly by using the following steps. + + +1. Log out of the Palette system console. If you are already logged in, log out and close your browser session. Browsers cache connections and may not use the newly enabled HTTPS connection. Closing your existing browser session avoids issues related to your browser caching an HTTP connection. + + +2. Log back into the Palette system console. Ensure the connection is secure by checking the URL. The URL should start with `https://`. + + +Palette is now using your uploaded certificate to create a secure HTTPS connection with external clients. Users can now securely access the system console, gRPC endpoint, and API endpoint. \ No newline at end of file diff --git a/docs/docs-content/enterprise-version/system-console-dashboard.md b/docs/docs-content/enterprise-version/system-console-dashboard.md new file mode 100644 index 0000000000..a283317407 --- /dev/null +++ b/docs/docs-content/enterprise-version/system-console-dashboard.md @@ -0,0 +1,43 @@ +--- +sidebar_label: "System Console Dashboard" +title: "System Console Dashboard" +description: "Understanding the super-admin settings in Palette's Enterprise (on-premise) variant." +icon: "" +hide_table_of_contents: false +sidebar_position: 50 +tags: ["self-hosted", "enterprise"] +--- + + +The self-hosted system console enables an initial setup and onboarding, administration, as well as upgrade management of the Palette Platform. The on-prem system console is available in a "quick start" mode and an "enterprise" mode. + +Platform administrators can use this console to perform the following operations: + +| Setting | Function | +| --- | --- | +| Tenant Management | Create and activate tenants | +| Update Management | Upgrade Spectro Cloud platform to newer versions | +| Administration | Configure platform settings like SMTP, Certificates, etc. | +| Migrate quick start mode cluster to enterprise | Available in quick start mode to install an enterprise cluster | + +## Tenant Management + +Create new tenants and their initial tenant admin accounts. Optionally, activate new tenants to enable tenant administrators to log in and access the tenant management console. + +## Update Management + +Apply Palette platform upgrades. Upgrades to the Palette platform are published to the Palette repository and a notification is displayed on the console when new versions are available. Platform administrators can apply platform upgrades directly from the on-prem system console. + +## Administration + +### SMTP + +Configure SMTP settings to enable the Palette platform to send out email notifications. Email Notifications are sent out to new users when they are onboarded to the platform to activate their accounts. + +### Certificates + +Provide the desired SSL/TLS server certificates to support external access to valid HTTPs. + +## Cluster Management + +Enterprise clusters are created and deployed from this section. The layers and/or pack integrations constituting a cluster can also be configured and updated. diff --git a/docs/docs-content/enterprise-version/upgrade.md b/docs/docs-content/enterprise-version/upgrade.md new file mode 100644 index 0000000000..a13a1bc889 --- /dev/null +++ b/docs/docs-content/enterprise-version/upgrade.md @@ -0,0 +1,81 @@ +--- +sidebar_label: "Upgrade Notes" +title: "Upgrade Notes" +description: "Spectro Cloud upgrade notes for specific Palette versions." +icon: "" +hide_table_of_contents: false +sidebar_position: 100 +--- + +This page is a reference resource to help you better prepare for a Palette upgrade. Review each version's upgrade notes for more information about required actions and other important messages to be aware of. If you have questions or concerns, reach out to our support team by opening up a ticket through our [support page](http://support.spectrocloud.io/). + +## Palette 4.0 + +Palette 4.0 includes the following major enhancements that require user intervention to facilitate the upgrade process. + +- **Enhanced security for Palette microservices** - To enhance security, all microservices within Palette now use `insecure-skip-tls-verify` set to `false`. When upgrading to Palette 4.0, you must provide a valid SSL certificate in the system console. + + If you already have an SSL certificate, key, and Certificate Authority (CA) certificate, you can use them when upgrading to Palette 4.0.0. To learn how to upload SSL certificates to Palette, refer to [SSL Certificate Management](ssl-certificate-management.md). + + +- **Self-hosted Palette Kubernetes Upgrade** - If you installed Palette using the Helm Chart method, the Kubernetes version used for Palette is upgraded from version 1.24 to 1.25. You will need to copy the new Kubernetes YAML to the Kubernetes layer in the Enterprise cluster profile. If you have customized your Kubernetes configuration, you will need to manually adjust custom values and include any additional configuration in the upgraded YAML that we provide. Refer to [Upgrade Kubernetes](upgrade.md#upgrade-kubernetes). + +### Upgrade from Palette 3.x to 4.0 + +From the Palette system console, click the **Update version** button. Palette will be temporarily unavailable while system services update. + +![Screenshot of the "Update version" button in the system consoles.](/enterprise-version_sys-console-update-palette-version.png) + +#### Upgrade Kubernetes + +Follow the steps below to upgrade Kubernetes. + +
+ +1. To obtain the upgraded Kubernetes YAML file for Palette 4.0, contact our support team by sending an email to support@spectrocloud.com. + + +2. In the system console, click on **Enterprise Cluster Migration**. + + +3. Click on the **Profiles** tab, and select the Kubernetes layer. The Kubernetes YAML is displayed in the editor at right. + + +4. If the existing Kubernetes YAML has been customized or includes additional configuration, we suggest you create a backup of it by copying it to another location. + + +5. Copy the Kubernetes YAML you received from our support team and paste it into the editor. + +
+ + ![Screenshot of the Kubernetes YAML editor.](/enterprise-version_upgrade_ec-cluster-profile.png) + + +6. If you have made any additional configuration changes or additions, add your customizations to the new YAML. + + +7. Save your changes. + +The Enterprise cluster initiates the Kubernetes upgrade process and leads to the reconciliation of all three nodes. + + +## Palette 3.4 + +Prior versions of Palette installed internal Palette components' ingress resources in the default namespace. The new version of the Helm Chart ensures all Palette required ingress resources are installed in the correct namespace. Self-hosted Palette instances deployed to Kubernetes and upgrading from Palette versions 3.3.X or older must complete the following action. + + +1. Connect to the cluster using the cluster's kubeconfig file. + + + +2. Identify all Ingress resources that belong to *Hubble* - an internal Palette component. + + ```shell + kubectl get ingress --namespace default + ``` + +3. Remove each Ingress resource listed in the output that starts with the name Hubble. Use the following command to delete an Ingress resource. Replace `REPLACE_ME` with the name of the Ingress resource you are removing. + + ```shell + kubectl delete ingress --namespace default + ``` \ No newline at end of file diff --git a/docs/docs-content/getting-started/_category_.json b/docs/docs-content/getting-started/_category_.json new file mode 100644 index 0000000000..455b8e4969 --- /dev/null +++ b/docs/docs-content/getting-started/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 20 +} diff --git a/docs/docs-content/getting-started/dashboard.md b/docs/docs-content/getting-started/dashboard.md new file mode 100644 index 0000000000..445a0325c0 --- /dev/null +++ b/docs/docs-content/getting-started/dashboard.md @@ -0,0 +1,63 @@ +--- +sidebar_label: "Palette Dashboard" +title: "Palette Dashboard" +description: "Spectro Cloud Palette Dashboard" +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["getting-started"] +--- + +This section is a tour of the two main dashboards of the Tenant console–the **Project Dashboard** and the **Admin Dashboard**. The Project Dashboard is used to perform operations related to setting up your Kubernetes clusters such as setting up Cluster Profiles, Creating Cloud Accounts, and deploying clusters. The Admin Dashboard is used for performing administrative tasks such as setting up Single Sign On (SSO), creating user, teams and setting up Role-Based Access Control (RBAC), and setting up additional package registries. The Admin Dashboard is only available to the users who have the Tenant Admin role. Admin users can toggle between the Project Dashboard and Tenant Admin Dashboard. Users without the Tenant Admin role can only see the Project Dashboard. + +## Project Dashboard + +Upon login, the dashboard shows the views available for a non-admin user. At the top, we have the Projects}> A Project helps to organize the cluster resources in a logical grouping method. button which helps to organize the cluster resources in a logical grouping. From the dropdown, we can shift between the projects. The left panel contains the Project Overview}>Project Overview gives an overview of the resource and cost consumption of the selected project. (2) which gives an overview of the resource and cost consumption of the selected project. Cluster Profiles}>Cluster Profiles are instantiated templates that are created with pre-configured layers/components needed for cluster deployments. (3) of the Default Project are shown. The left pane in this dashboard also contains options for clusters}>Kubernetes clusters in Palette that are instantiated from cluster profiles. (4). Workspaces}>Workspace Workspace enables the coupling of relevant namespaces across multiple clusters to manage access, obtain cost, and workload visibility by applications or teams. (5) enables the coupling of relevant namespaces across multiple clusters to manage access, obtain cost, and workload visibility by applications or teams. Audit logs}>Audit Logs gives the log of activities with timeline. (6) gives the log of activities with timeline. + + +#### Non-admin User view + +Upon login, the dashboard shows the views available for a non-admin user. + +1. The Projects}> A Project helps to organize the cluster resources in a logical grouping method. button which helps to organize the cluster resources in a logical grouping. From the dropdown, we can shift between the projects. + + +2. The left panel contains the Project Overview}>Project Overview gives an overview of the resource and cost consumption of the selected project. (2) which gives an overview of the resource and cost consumption of the selected project. + + +3. Cluster Profiles}>Cluster Profiles are instantiated templates that are created with pre-configured layers/components needed for cluster deployments. of the Default Project are shown. The left pane in this dashboard also contains options for clusters}>Kubernetes clusters in Palette that are instantiated from Cluster Profiles. + + + +4. Workspaces enables the coupling of relevant namespaces across multiple clusters to manage access, obtain cost, and workload visibility by applications or teams. + + + +5. Audit logs dusplay the log of activities with timeline. + + + +6. The **Settings** section (7) of the Default dashboard relates to the Cloud Account settings, Backup Location settings, and Alerts. This is an important distinction from the settings under the Admin Dashboard. It also allows the user to upload SSH keys for safekeeping. These key(s) can be recalled when deploying a cluster. + + ![project-dashboard](/project-dashboard.png) + + + + +## Tenant Admin Dashboard + + +The menu within the Tenant Admin Dashboard contains the Projects button. This is different from the Projects menu in the Default Dashboard. Within the Tenant Admin Dashboard, the Projects button provides access to modifying a project itself (edit/configure/delete and the overall status), whereas the button in the Default Dashboard provides access to the Cluster Profiles inside the project. + +1. The Cluster Profiles button in the Tenant Admin Dashboard provides the ability to create and manage Global Cluster profiles that can be used for cluster creation, across all projects, within a tenant. + + +2. The Roles}>A Role is a collection of permissions. (and Permissions}>Permissions are associated with specific actions within the platform.); as well as Users}>Users are members of a tenant who are assigned roles that control their access within the platform. and Teams}>A Team is a group of users. Allows the admin to set or restrict these attributes for one or more team members. See the RBAC}>Palette's RBAC design allows granting granular access to resources and its operations. section for more details. + + +3. The audit logs(9)}>The Palette management platform application captures audit logs to track the user interaction with the application resources along with the timeline. in the admin Dashboard allow the admin to track the user interaction with the application resources along with the timeline for all projects and users. For admin users, the "audit log" button is visible for each project as well. Here, the admin can view the logs of the resources specific to the project. + + +4. Finally, the Tenant Admin settings (10) under the Admin Dashboard provide access to the pack registries}>A pack is a collection of files such as manifests, Helm charts, ansible roles, configuration files, etc.; private cloud gateways}>A Private Cloud Gateway is a Palette component that enables the communication between Palette's management console and a VMware based private data center. and [SAML SSO](../user-management/saml-sso/saml-sso.md) configurations. + + ![admin-dashboard](/admin-dashboard.png) diff --git a/docs/docs-content/getting-started/getting-started.md b/docs/docs-content/getting-started/getting-started.md new file mode 100644 index 0000000000..23a9338523 --- /dev/null +++ b/docs/docs-content/getting-started/getting-started.md @@ -0,0 +1,24 @@ +--- +sidebar_label: "Getting Started" +title: "Getting Started" +description: "Spectro Cloud Getting Started" +hide_table_of_contents: false +sidebar_custom_props: + icon: "overview" +tags: ["getting-started"] +--- + +This page gives an overview of getting started with Spectro Cloud Palette quickly. We introduce Palette's complimentary subscription plans, features, workflow, and user experience session to our users. + + +The first step towards adopting Palette to your organization is to create a login. We highly appreciate our users having a first impression of our product before making a purchase decision; hence, we provide our users with the following options: + + +[Palette Freemium](../getting-started/palette-freemium.md) + +[Free Cloud Credit](..//getting-started/palette-freemium.md) + + +After successful account creation, Palette presents a well-organized Product Onboarding Workflow to streamline the user product adoption. The onboarding process consists of explaining our product features, followed by a Palette experience session. Here we furnish an easy-to-follow deployment pipeline for our users to launch their first cluster successfully. Explore more about the feature: + +[Product Onboarding Workflow](../getting-started/onboarding-workflow.md) diff --git a/docs/docs-content/getting-started/onboarding-workflow.md b/docs/docs-content/getting-started/onboarding-workflow.md new file mode 100644 index 0000000000..145c1850bd --- /dev/null +++ b/docs/docs-content/getting-started/onboarding-workflow.md @@ -0,0 +1,65 @@ +--- +sidebar_label: "Palette Onboarding Workflow" +title: "Palette Onboarding Workflow" +description: "Palette Onboarding Workflow" +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +--- + +Palette offers a product tour to help you get familiar with the console and many of its key components. + +## Product Tour +Upon a successful sign-in to our platform, we start the onboarding process with a product tour—an introduction to the platform, to familiarize the users with our Palette features. + + +## Start your Palette Experience + +![user-experience.png](/user-experience.png) + + +The product tour is followed by a Palette experience session. Here we make sure that our users are guided through a successful deployment pipeline in their first use, instead of them just figuring things out along the way towards cluster creation. The major components of this session are as follows: + +* [Create New Cluster](../clusters/clusters.md) + + * Create a new cluster from scratch using any cloud environment or bare metal. + + * A system-level cluster profile is included for the users to explore the Palette functionalities easier and faster. + +* [Import Cluster](../clusters/imported-clusters/cluster-import.md) + * Bring your own cluster into Palette in two easy steps. + +* Out-of-the-box (OOTB) Configurations: + * Try one of our out-of-the-box cluster profile configurations applicable on your own cluster or in our Palette Virtual Cluster environment. + +
+ + :::info + Once the user experience session is finished, the user will be familiar with Palette's workflow and deployment pipeline. This section of the document is a quick start to the deployment process. The different Palette features and Day-2 operations are detailed in the remainder of this documentation site. + ::: + + +### Connect with us +* [Slack](https://spectrocloudcommunity.slack.com/join/shared_invite/zt-g8gfzrhf-cKavsGD_myOh30K24pImLA#/shared-invite/email) + +* support@spectrocloud.com + + +## Palette Workflow + +Palette requires the creation of a cluster profile before a workload cluster can be created. This is because [cluster profiles](../cluster-profiles/cluster-profiles.md) are +templates created with preconfigured layers that define the required dependencies, such as the Operating System (OS) and Kubernetes version for your cluster. The cluster profile is a core component of Palette. You can learn more about cluster profiles by reviewing the [cluster profile](../cluster-profiles/cluster-profiles.md) reference page. + +## Resources + +* [Create your Cluster Profile](../cluster-profiles/task-define-profile.md) + + +* [Create your Cluster](../clusters/clusters.md) + + +* [Imported Clusters](../clusters/imported-clusters/cluster-import.md) + + +* [Cluster Management](../clusters/cluster-management/cluster-management.md) + diff --git a/docs/docs-content/getting-started/palette-freemium.md b/docs/docs-content/getting-started/palette-freemium.md new file mode 100644 index 0000000000..c6b7abf26b --- /dev/null +++ b/docs/docs-content/getting-started/palette-freemium.md @@ -0,0 +1,47 @@ +--- +sidebar_label: "Try Palette for Free" +title: "About Free Tier" +description: "Palette Free Tier, Freemium, Free Tier" +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +--- + +## Try Palette for Free + +**Palette Free Tier** encourages new users to explore Palette, without having to make a purchase decision, before they are ready. With unlimited kilo-Core-hour usage the first month, there is full access to the Spectro Cloud Palette Platform to create, deploy, and manage Palette resources. + +The second month and every month thereafter, the customer is granted a complimentary 25 kilo-Core-hour (kCh) to use freely to manage up to five Kubernetes clusters with Palette. + +There is the choice to continue in the Free Tier, so long as you stay under the 25 kCh consumption. Go over the usage limit of 25 kCh, and the clusters remain visible, but convert to Read-Only. Palette sends alert messages to users when 25 kCh is crossed, and if more than five active clusters are launched further deployments will be restricted. Enter a payment and all usage and capabilities are restored. + + +## Free Cloud Credit with Palette + +Palette provides a cloud account—with required credentials—for user deployments. This free cloud credit is offered to customers who do not have access to a cloud account and want to try Palette, before committing to the Palette platform. Perhaps a customer finds themselves without sufficient permissions to input their existing cloud credentials or is exploring new ways to manage their services. The Free Cloud Credit, granted through the Palette account, is a great way to begin exploring the Palette platform. + + +## Request a Free Cloud Account + +To request an authorization to the Spectro Cloud Free Cloud Credit program, connect via the [Slack Spectro Cloud Community](https://join.slack.com/t/spectrocloudcommunity/shared_invite/zt-g8gfzrhf-cKavsGD_myOh30K24pImLA) or [email](mailto:developer@spectrocloud.com) and ask about the Free Cloud Credit program. A $100 of free cloud credit will be granted to use with the Spectro Cloud's Palette platform. The user is encouraged to monitor the usage and expenditure percentage through Palette, and as they reach the conclusion of the credit usage, a message will remind the user of the Usage Status. + + +## Managing a Free Cloud Account + +The Free Cloud Credit ends when the $100 of the free cloud credit, granted by Spectro Cloud is consumed. At that time, all resources created during the trial period will cease services and clusters will be marked for deletion and will be lost, so make sure to back up as needed. Reminders will be sent along the way at the 50%, 90%, and 100% consumption stage sharing the state of the free cloud usage. + + +### Upgrade to a Paid Cloud Account + +Upgrade to a Cloud Account at any time after trying the Free Cloud Credit service. Create your own Cloud Account and replace the Free Cloud Credit with your own Cloud Account information. + + +### Kilo-Core-hour Calculations + +Usage is calculated as a concept of kilo-Core-hour or kCh. This is a measurement of the number of CPU Cores multiplied by Time. For instance, for a 4-node cluster which has 16 CPU cores each, one day of management will equate to 4 x 16 CPU cores x 24 hours = 1.54 kCh. + +## Next Steps + +To get started with Palette Free Tier, visit the [signup link](https://www.spectrocloud.com/free-trial). + + diff --git a/docs/docs-content/glossary-all.md b/docs/docs-content/glossary-all.md new file mode 100644 index 0000000000..dda655ba27 --- /dev/null +++ b/docs/docs-content/glossary-all.md @@ -0,0 +1,195 @@ +--- +sidebar_label: "Glossary" +title: "Palette Glossary" +description: "Palette Glossary" +hide_table_of_contents: false +sidebar_position: 250 +sidebar_custom_props: + icon: "about" +tags: ["glossary"] +--- + +# Glossary + +This page gives a quick reference to various object types and concepts within the Palette platform. +## App Mode +A mode optimized for a simpler and streamlined developer experience that allows you to focus on the building, maintenance, testing, deployment, and monitoring of your applications. App Mode removes the need to worry about the infrastructure management of a Kubernetes cluster and results in a PaaS-like experience, enabling you to focus on deploying [App Profiles](#app-profile), Apps, and [Palette Virtual Clusters](#palette-virtual-cluster). + + +## App Profile +App Profiles are templates created with preconfigured services required for Palette Virtual Clusters deployment. The App Profile allow creators to integrate various services or tiers, required to run an application, such as cache, databases, and more into a single deliverable. App Profiles provide a way to drive consistency across virtual clusters. You can create as many profiles as required, with multiple tiers serving different functionalities per use case. + +## Air-Gapped + +Palette on-prem installation supports Air-Gapped, a security measure in which its management platform is installed on VMware environments with no direct or indirect connectivity to any other devices or networks of the outside world. This feature provides airtight security to the platform without the risk of compromise or disaster. In addition, it ensures the total isolation of a given system from other networks, especially those that are not secure. +## Attach Manifests + +For integrations and add-ons orchestrated via Palette [Packs](#pack) or [Charts](#helm-charts), at times it is required to provide additional Kubernetes resources to complete the installation. Resources like additional secrets or Custom Resource Definitions may need to be installed for the integration or add-on to function correctly. Attach Manifests are additional raw manifests attached to a cluster profile layer built using a Palette Pack or a Chart. Multiple Attach Manifests can be added to a layer in a cluster profile. + +## Bring Your Own Operating System (BYOOS) + +A feature in Palette that allows you to bring your own operating system and use it with your Kubernetes clusters. With the BYOOS pack, you can reference your own OS images, configure the necessary drivers, and customize the OS to meet your specific requirements. BYOOS gives you greater flexibility, control, and customization options when it comes to managing your Kubernetes clusters. It is especially useful for enterprises and organizations with strict requirements around security, compliance, or specific hardware configurations. + +## Chart Repositories + +Chart Repositories are web servers, either public or private, that host Helm Charts. By default, Palette includes several popular chart registries such as Bitnami. As an administrator, you can add additional public or private chart repositories to leverage charts from those sources. This feature provides greater flexibility in managing and deploying applications, allowing you to access and use Helm Charts from various sources in your Palette environment +## Cloud Account + +Cloud Accounts are where access credentials are stored for public and private clouds. It is used by the system to provide new cluster infrastructure and cluster resources. Cloud account information is treated as sensitive data and fully encrypted using the tenant's unique encryption key. + +## Cluster Mode +Cluster Mode enables you to create, deploy, and manage Kubernetes clusters and applications. In Cluster Mode, you can deploy Kubernetes clusters to public cloud providers, on-prem data centers, and on the edge. + +## Cluster Profile + +A Cluster Profile is a declarative model of a Kubernetes infrastructure stack. A Kubernetes infrastructure stack is broken into multiple layers, from core layers like base OS, Kubernetes, storage, network, to additional add-on layers such as load balancer, ingress controller, logging, monitoring, security, etc. For each layer, Palette provides multiple out-of-the-box options and versions. The cluster profile is essentially a configuration of end-to-end Kubernetes stacks and settings that you create based on your needs, which you can reuse every time you need to deploy a cluster matching that configuration. For example, let us say for AI/ML you need a cluster with a base OS with an NVIDIA driver installed and Kubeflow installed in the cluster, but for a production cluster, you need a different stack with Logging (EFK), Monitoring (Prometheus), Security (Twistlock) pre-installed. + +The diagram below shows an example of a cluster profile: + +![cluster_profile_new](/cluster_profile_new.png) + +Read more about Cluster Profiles [here](cluster-profiles/cluster-profiles.md). +## Edge Appliances + +Palette supports several kinds of appliances for the Edge deployment. These appliances can be registered with the Palette Management Console and used for provisioning a Virtualized or a Native OS (Native Edge Deployment). The following is the list of all the Palette supported Edge appliance types: + + | **Appliance Type** | **Environment** | + | :------------------------------ | :---------------------------------------- | + | Native Edge Deployment | Bare Metal Machines or Virtual Appliances | + | Bare Metal Machine | Virtualized | + | KVM-based virtual machines | Virtualized | + +**Note:** Palette Edge Manager & TUI would be embedded in P6OS. +## Edge Clusters + +Edge Clusters are Kubernetes clusters set up on appliances installed in isolated locations such as hospitals, grocery stores, restaurants, etc., unlike a data center or cloud environment. These appliances can be bare metal machines or virtual machines and are managed by operators at these remote sites. Palette provides the provisioning of Workload Clusters on such edge appliances from its SaaS-based management console. Besides provisioning of the cluster, Palette also provides end-to-end management of these clusters through operations such as scaling, upgrades, reconfiguration, etc. +## Helm Charts + +Helm Charts are Kubernetes YAML manifests that describe a related set of Kubernetes resources into a single package. Just like Palette's native Packs, Palette supports and orchestrates helm charts hosted in any public or private Helm chart registry on to Kubernetes clusters. + +## Host Cluster + +A Kubernetes cluster that is managed by Palette. A host cluster may contain several Palette Virtual Clusters. +## Management Clusters + +Management Cluster is where Palette core components are hosted and are often referred to in on-prem installations of Palette. As part of the Kubernetes workload cluster provisioning, the first control-plane node is launched by Palette in the management cluster or the cloud gateway. Once the first control-plane node goes to running state, all the resources are pivoted from the management cluster or the cloud gateway to the target workload cluster. After that, the target cluster self-manages the cluster and application lifecycle. All Day-2 operations which result in node changes, including OS/Kubernetes upgrades, scaling, and nodes certificate rotation, are triggered by changes to the Cluster API resources in the target workload cluster. +## OIDC + +OpenID Connect [(OIDC)](user-management/saml-sso/saml-sso.md) is an open source, authentication protocol that allows users to verify their identity, based on the authentication performed by an authorization provider. +## Organization + +An organization is the equivalent of a Tenant. Review the [Tenant](#tenant) definition to learn more. +## Pack + +Palette provides multiple integrations/technologies in a [cluster profile](#cluster-profile) for various system layers, such as OS, Kubernetes, storage, networking, monitoring, security, load balancers, etc. These integrations are provided in the form of Packs. A pack is a Palette content package that describes an integration in the Kubernetes infrastructure stack ecosystem and contains relevant artifacts required to deploy and manage that integration. Palette provides packs for core layers of the Kubernetes stack; Operating Systems, Kubernetes distributions, Networking and Storage as well as packs for add-on layers such as ELK Stack, Prometheus, Sysdig Falco, etc. + +## Pack Manifests + +Layers in a [cluster profile](#cluster-profile) are typically built using a Palette [Pack](#pack) or a [Charts](#helm-charts). There may be certain scenarios where additional Kubernetes resources need to be provisioned that are not part of any Palette pack or a chart. Pack manifests provide a pass-through mechanism to allow provisioning through raw manifests. Pack Manifest layers can be added to a cluster profile stack built using Spectro Packs and Charts. +## Palette Edge Manager (Local API) + +A cmd line API that supports TUI operations & site diagnostics. For Dark Site or Air Gapped environments Palette Edge Manager can be used to upload cluster configurations. + +## Palette eXtended Kubernetes (PXK) + +Palette eXtended Kubernetes (PXK) is a customized version of the open-source Cloud Native Computing Foundation (CNCF) distribution of Kubernetes. This Kubernetes version can be deployed through Palette to all major infrastructure providers, public cloud providers, and private data center providers. This is the default distribution when deploying a Kubernetes cluster through Palette. You have the option to choose other Kubernetes distributions, such as MicroK8s, Konvoy, and more, should you want to consume a different Kubernetes distribution. + +PXK is different from the upstream open-source Kubernetes version primarily because of the carefully reviewed and applied hardening of the operating system (OS) and Kubernetes. The hardening ranges from removing unused kernel modules to using an OS configuration that follows industry best practices. Our custom Kubernetes configuration addresses common Kubernetes deployment security pitfalls and implements industry best practices. + +A benefit of Palette when used with PXK is the ability to apply different flavors of container storage interface (CSI) plugins and container network interface (CNI) plugins. +Other open-source Kubernetes distributions, such as MicroK8s, RKE2, and K3s, come with a default CSI and CNI. Additional complexity and overhead are required from you to enable different interfaces. PXK supports the ability to select other interface plugins out of the box without any additional overhead or complexity needed from your side. + +There are no changes to the Kubernetes source code and we also follow the same versioning schema as the upstream open-source Kubernetes distribution. + +## Palette eXtended Kubernetes Edge (PXK-E) + +Palette eXtended Kubernetes Edge (PXK-E) is a customized version of the open-source Cloud Native Computing Foundation (CNCF) distribution of Kubernetes. This Kubernetes distribution is customized and optimized for edge computing environments and can be deployed through Palette. PXK-E is the Kubernetes distribution Palette defaults to when deploying Edge clusters. + + +PXK-E differs from the upstream open-source Kubernetes version by optimizing for operations in an edge computing environment. PXK-E also differentiates itself by using the open-source project, [Kairos](https://kairos.io/) as the base operating system (OS). PXK-E’s use of Kairos means the OS is immutable, which significantly improves the security posture and reduces potential attack surfaces. + +Another differentiator of PXK-E is the carefully reviewed and applied hardening of the OS and Kubernetes. The hardening ranges from removing unused OS kernel modules to using an OS configuration that follows industry best practices. Our custom Kubernetes configuration addresses common deployment security pitfalls and implements industry best practices. + +With PXK-E, you can manage automatic OS upgrades while retaining immutability and the flexibility to roll out changes safely. The A/B partition architecture of Kairos allows for new OS and dependency versions to be installed in a separate partition and mounted at runtime. You can fall back to use the previous partition if issues are identified in the new partition. + +PXK-E manages the underlying OS and the Kubernetes layer together, which reduces the challenge of upgrading and maintaining two separate components. + +PXK-E allows you to apply different flavors of container storage interfaces (CSI) and container network interfaces (CNI). Other open-source Kubernetes distributions such as MicroK8s, RKE2, and K3s come with a default CSI and CNI. There is additional complexity and overhead when you want to consume different interface plugins with traditional Kubernetes distributions. Using PXK-E, you select the interface plugin you want to apply without additional overhead and complexity. + +There are no changes to the Kubernetes source code used in PXK-E, and it follows the same versioning schema as the upstream open-source Kubernetes distribution. + +## Palette Orchestrator +Palette orchestrator supports deploying the clusters as per the specifications desired and modeled in Palette UI. Furthermore, it supports the cluster version upgrades as per the user requirements. The Palette orchestrator also aids in recycling the certificates of the clusters, node health checks, and recycling unhealthy nodes. +## PaletteOS (P6OS) + +PaletteOS is a real-time operating system provisioned by Palette. It is embedded with a base Operating System such as Ubuntu, K3OS, etc., and one of the Kubernetes distributions such as CNCF (Cloud Native Computing Foundation), K3s (a Lightweight Kubernetes Distribution), or RKE (Rancher Kubernetes Engine). Palette builds several of these based on the most desired versions of the base operating system and Kubernetes distribution. + +**Examples**: (Ubuntu20.0.4+CNCFK8s1.21.3, SLES+K3S). We also encourage our customers to build their own Operating system. +## Palette Upgrade Controller + +A Kubernetes controller to be installed into the workload cluster to facilitate upgrades to new P6OS image. + +## Palette Virtual Cluster +Palette Virtual Clusters enable operations teams to partition a [host cluster](#host-cluster) and deploy lightweight virtual clusters on top, similar to how virtualization creates logically isolated virtual servers on top of physical machines. This is great for giving developers quick access to a sandbox environment for testing their code. Virtual clusters provide as strong a level of separation without introducing complicated overhead, such as separating physical resources and managing namespaces with complex RBAC configurations. Palette Virtual Clusters is powered by [vCluster](https://www.vcluster.com/). + +## Permissions + +Permissions are associated with specific actions within the platform such as Create New user in a tenant, Add a Cluster Profile in a project, View Clusters within a cluster, etc. Permissions are granted to the [users](#user) and [teams](#team) through [roles](#role). +## Presets + +Presets are a subset of properties configured for a layer that is pre-configured with defaults to easily enable or turn on a feature. Palette [packs](#pack) and [charts](#helm-charts) provide several settings that can be customized by the user. Although customizable typically in a YAML format, it can be cumbersome to look through a flat list of properties and identify the ones to change for specific functionality. Through presets, Palette groups a bunch of related properties that control a feature and provides them as named presets. During construction of a [cluster profile](#cluster-profile), users may be simply enabled or disable a preset to quickly make the desired changes. + +## Private Cloud Gateway + +A Private Cloud Gateway is a Palette component that enables the communication between Palette's management console and a private cloud/data center. The gateway needs to be installed by the users in their private cloud environments using a private cloud gateway installer appliance. +## Private Cloud Gateway-Edge (PCG-E) + +Deploying Edge Clusters requires a Private Cloud Gateway-Edge (PCG-E) to be installed on the appliances for Palette to discover the appliance and provision workload clusters on them. A PCG-E is Palette's on-premises component to support remote Edge devices. Palette PCG-E, once installed on-premises, registers itself with the Palette's SaaS portal and enables secure communications between the SaaS portal and the Edge Clusters. +## Private Pack Registry + +Palette provides extensibility by providing a way for users to define [packs](#pack) for integrations beyond the ones provided by default in Palette's public pack registry. These user-defined packs need to be hosted in a private registry, which users can bring up in their environment using Palette's pack registry software. +## Project + +Projects provide a way for grouping clusters together for logical separation. Role-based access controls within Palette are applied at the project level. [Users](#user) and [teams](#team) can be assigned one or more [roles](#role) within a project for granular control over [permissions](#permission) within the project scope. +## Public Pack Registry + +Palette maintains a public pack registry containing various [packs](#pack) that can be used in any [cluster profile](#cluster-profile). The pack content in this registry is constantly updated with new integrations. + + +## Repavement + +Repavement is the process of replacing a Kubernetes node with a new one. This is typically done when a node is unhealthy or needs to be upgraded. The process involves migrating active workloads to another healthy node, and removing it from the [node pool](clusters/cluster-management/node-pool.md#repave-behavior-and-configuration). This is referred to as draining the node. A new node is created and configured with the same settings as the old node and added back to the pool. The process is fully automated and does not require manual intervention. + +## Role + +A Role is a collection of [permissions](#permission). There are two kinds of roles in Palette: *tenant roles* and *project roles*. *Tenant roles* are a collection of tenant-level permissions such as create a new user, add a new project, etc. *Project roles* consist of permissions for various actions within the scope of a project such as create a cluster profile, create a cluster, etc. +## Site Configuration Text User Interface (TUI) + +TUI is initially used as an interface to site operator to provide site-specific settings such as NW Settings (Static IP, DHCP, WAN, GW, Proxy), Palette endpoint, and Device ID override. It can accept inputs from the unattended.yaml file. +## Spectro Agent +Spectro Agent bridges the information transfer between Palette SaaS and Palette Orchestrator. The Spectro Agent collects information such as metrics, workloads, and heartbeats and constantly updates to the SaaS platform for user access. In addition to this, the Spectro Agent is responsible for initiating and controlling Backup, OS-Patch, and Compliance Scan on the running cluster. + +## System Console (On-prem System Console) +The console is used to scale up the Enterprise cluster and manage it. The System console supports creating and activating a new tenant in a new instance. It Initiates the installation of a Palette Enterprise Cluster. The On-Prem System Console provides various administrative setup tasks. Most of these are optional and can be performed at any time. To quickly start using the platform's functionality, all that is needed is to create the first tenant and activate it.Initial login:admin/admin. +## System Profiles +System Profiles provide a way to bootstrap an edge appliance with an initial set of virtual and containerized applications. Similar to cluster profiles, System Profiles are templates created using one or more layers that are based on packs or helm charts. +## Team +A Team is a group of [users](#user). Users can be part of one or more teams. Teams provide a convenient way to control platform access for a group of users. [Roles](#role) assigned to a team grant associated tenant or [project](#project) [permissions](#permission) to all users that are part of the team. +## Tenant + +Tenant represents a customer or an organization in Palette. Palette is a multi-tenant platform. All tenant resources are isolated from other tenants. Each tenant has a unique encryption key to encrypt any sensitive data such as cloud credentials and tenant user information. [Users](#user) from a tenant do not have access to resources in another tenant. +## User + +Users are members of a [tenant](#tenant) who are assigned [roles](#role) that control their access within the platform. For example, users with the tenant admin role get permissions to perform all actions across all [projects](#project) in the tenant whereas users assigned project roles, only get specific permission within the associated projects. The user's personal information (email, name) is treated as sensitive data and fully encrypted using the tenant's unique encryption key. + +## VMO +Palette [Virtual Machine Orchestrator](vm-management/vm-management.md) provides a unified platform for managing containerized and virtualized applications. Palette VM Orchestrator allows organizations to onboard, deploy, manage, and scale VMs within the same cluster as their containerized applications. + +## Workload +An application running on the Kubernetes cluster is called a Workload. It can be a set of components that work together or a single independent component, run as a set of pods. In Kubernetes terms, a Pod is a set of running containers on your cluster. +## Workload Cluster + +Workload / Tenant / Application Clusters are a collection of master and worker nodes that cooperate to execute container application workloads. Kubernetes clusters provisioned by users are referred to as Workload Clusters. These clusters are created within [projects](#project) and they are provisioned and managed in the user's cloud environment. Each cluster is provisioned from a [Cluster Profile](#cluster-profile) with additional configuration overrides and cloud-specific settings. +## Workspace + +The multi-cluster management and governance capabilities are supervised with Palette Workspaces. Workspaces enable the logical grouping of clusters and namespaces to provide application or team-specific governance and visibility into workloads, cost, and usage metrics. For example, the application or team workload may be deployed into namespaces across clusters for achieving High Availability (HA), Disaster Recovery (DR), organization-specific placement policies, etc. Grouping the namespaces and clusters into a workspace provide central management and governance in a multi-cluster distributed environment. \ No newline at end of file diff --git a/docs/docs-content/integrations/_category_.json b/docs/docs-content/integrations/_category_.json new file mode 100644 index 0000000000..e1d4231c70 --- /dev/null +++ b/docs/docs-content/integrations/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 100 +} diff --git a/docs/docs-content/integrations/antrea-cni.md b/docs/docs-content/integrations/antrea-cni.md new file mode 100644 index 0000000000..551fb36611 --- /dev/null +++ b/docs/docs-content/integrations/antrea-cni.md @@ -0,0 +1,128 @@ +--- +sidebar_label: 'Antrea CNI' +title: 'Antrea CNI' +description: 'Antrea CNI network pack for Palette Kubernetes Clusters' +type: "integration" +category: ['network', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/antrea/blobs/sha256:3c5704caf6652c63374282cbf413f8e73a77c4efbc49f375c19c73f8e2ec4148?type=image/png' +tags: ["packs", "antrea", "cni", "network"] +--- + + +Palette supports Antrea controller network interface (CNI) for VMware Kubernetes clusters. Antrea CNI enables each pod to have exclusive IP addresses from the subnet with direct accessibility. + +Antrea leverages [Open vSwitch](https://www.openvswitch.org/) to implement pod networking and security features. Open vSwitch enables Antrea to implement Kubernetes network policies efficiently. + + +## Supported Versions + +**1.9.x** + +## Prerequisites + +- Enable the integrated NodeIPAM controller in the Antrea manifest: ``NodeIPAM:enable``. +
+ +- When deploying a cluster using Palette, use the ``podCIDR`` parameter in the Pack section of the Kubernetes manifest. The classless inter-domain routing (CIDR) IP specified in the Kubernetes manifest always takes precedence. + +
+ +- When deploying a cluster using ``kubeadm init`` to use Antrea CIDRs, you would specify the ``--pod-network-cidr `` option and provide the IP address with the CIDR. For example: + +
+ + ``--pod-network-cidr=10.244.0.0/16`` + +
+ +:::caution + +The CIDR IP specified in Palette using the ``podCIDR`` parameter in the Kubernetes manifest always takes precedence. + +If you wish to use Antrea CIDRs, the ``podCIDR`` and ``serviceCIDR`` parameters must be blank in the Kubernetes manifest. + +To avoid overlapping your pod network with any of your host networks, you should think of a suitable CIDR block to specify if you deploy a cluster using ``kubeadm init`` or as a replacement in your network plugin's YAML. + +::: + +
+ +- The Open vSwitch kernel module must be present on every Kubernetes node. + + +## Parameters + +The Antrea CNI pack supports the following parameters. + +| Parameter | Description | Required (Y/N) | +|-----------|-------------|---------| +| nodeIPAM:enable | Enables the integrated NodeIPAM controller in the Antrea manifest. The default is `false`. | Y | +| clusterCIDRs | CIDR ranges for pods in the cluster. The CIDRs can be either IPv4 or IPv6. You can specify up to one CIDR for each IP family. | N | +| serviceCIDR | IPv4 CIDR ranges reserved for Services. | N | +| serviceCIDRv6 | IPv6 CIDR ranges reserved for Services. | N | +| nodeCIDRMaskSizeIPv4 | Mask size for IPv4 Node CIDR in IPv4 or dual-stack cluster. | N | +| nodeCIDRMaskSizeIPv6 | Mask size for IPv6 Node CIDR in IPv6 or dual-stack cluster. | N | +| NodeIPAM | The feature toggle for ``antrea-controller``. The default is `false`. If you use CIDR ranges, set this to ``true``. | N | +| ServiceExternalIP | The feature toggle for ``antrea-agent`` and ``antrea-controller``. If you use the LoadBalancer service, set this to ``true``. | N | + + +## Usage + +Kubernetes network policies are supported by default. + +Antrea supports LoadBalancer services. Typically, implementing LoadBalancer services requires an external load balancer that is implemented by the Kubernetes cloud provider. + +Antrea provides two options for supporting LoadBalancer services without using an external load balancer: + +
+ +- Using Antrea’s built-in external IP management for Services of type LoadBalancer. + +- Leveraging MetalLB. + +For detailed information, refer to Antrea’s [Service of type LoadBalancer](https://antrea.io/docs/v1.9.0/docs/service-loadbalancer) documentation. + +To learn more about using MetalLB, review [Using MetalLB with Antrea](https://antrea.io/docs/v1.9.0/docs/service-loadbalancer/#using-metallb-with-antrea). + +
+ +# Troubleshooting + +If routing problems occur or some hosts cannot communicate outside their subnet, this indicates overlapping IP addresses or conflicting CIDR IPs. + +Ensure you have provided a non-overlapping IP address for your pod network in Palette's Kubernetes manifest using the ``podCIDR`` parameter. The CIDR IP specified with the ``podCIDR`` parameter in the Kubernetes manifest always takes precedence. + +If you wish to use Antrea CIDRs and have deployed a cluster using Palette, ensure that you have done the following: + +- Removed any value for ``podCIDR`` and ``serviceCIDR`` in the Kubernetes manifest. +- Provided a non-overlapping IP address for your pod network. + +
+ +## Terraform + +You can reference the Antrea CNI pack in Terraform with a data resource. + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "antrea" { + name = "antrea" + version = "1.9.0" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + +## References + +- [Antrea Service of type LoadBalancer](https://antrea.io/docs/v1.9.0/docs/service-loadbalancer) +- [MetalLB](https://metallb.universe.tf) +- [Antrea](https://antrea.io/) +- [Antrea IPAM Capabilities](https://antrea.io/docs/v1.6.1/docs/antrea-ipam/) +- [Using MetalLB with Antrea](https://antrea.io/docs/v1.9.0/docs/service-loadbalancer/#using-metallb-with-antrea) + diff --git a/docs/docs-content/integrations/argo-cd.md b/docs/docs-content/integrations/argo-cd.md new file mode 100644 index 0000000000..6db453664e --- /dev/null +++ b/docs/docs-content/integrations/argo-cd.md @@ -0,0 +1,54 @@ +--- +sidebar_label: 'Argo CD' +title: 'Argo CD' +description: 'Argo CD for Spectro Cloud Palette' + +type: "integration" +category: ['system app', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/argo-cd/blobs/sha256:647cd3df6fec421e6580589ea7229762d8e828c77036f835f14f4c15c2a44c4c?type=image/png' +tags: ["packs", "argo-cd", "system app"] +--- + +[Argo CD](https://argo-cd.readthedocs.io/en/stable/) is a declarative, GitOps continuous delivery tool for Kubernetes. Argo CD follows the GitOps pattern of using Git repositories as the source of truth for defining the desired application state. Argo CD automates the deployment of the desired application states in the specified target environments. Application deployments can track updates to branches, tags, or pinned to a specific version of manifests at a Git commit. Start using Argo CD with Palette today by consuming this pack. + + +## Prerequisites + +- Kubernetes 1.7+ + +## Version Supported + + + + +* **3.3.5** + + + + + + + +* **3.2.6** + + + + + +## Notable Parameters + +| Parameter | Description | +|-----------------------|------------------------------------------------------------------------------------------------| +| global.image.repository | The repository that is the source of truth. | +| global.image.tag | The image tag to pull. | +| global.image.imagePullPolicy | If defined, a imagePullPolicy applied to all ArgoCD deployments. Defaults to ` IfNotPresent` | +| global.securityContext | A list of security contexts +|imagePullSecrets| If defined, uses a Secret to pull an image from a private Docker registry or repository. +|hostAliases| A list of mapping between IP and hostnames that will be injected as entries in the pod's hosts files + + + +## References + +- [Argo CD](https://argo-cd.readthedocs.io/en/stable/) diff --git a/docs/docs-content/integrations/aws-autoscaler.md b/docs/docs-content/integrations/aws-autoscaler.md new file mode 100644 index 0000000000..9187a5566c --- /dev/null +++ b/docs/docs-content/integrations/aws-autoscaler.md @@ -0,0 +1,379 @@ +--- +sidebar_label: 'AWS Cluster Autoscaler' +title: 'AWS Cluster Autoscaler' +description: 'AWS Cluster Autoscaler for Spectro Cloud Palette' +hide_table_of_contents: true +type: "integration" +category: ['system app', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/aws-cluster-autoscaler/blobs/sha256:f86813591b3b63b3afcf0a604a7c8c715660448585e89174908f3c6a421ad8d8?type=image/png' +tags: ["packs", "aws-cluster-autoscaler", "system app", "network"] +--- + + + +Palette supports autoscaling for AWS EKS clusters by using the AWS Cluster Autoscaler pack. +The Cluster Autoscaler dynamically scales cluster resources. It monitors the workload and provisions or shuts down cluster nodes to maximize the cluster's performance and make it more resilient to failures. It resizes the Kubernetes cluster in the following two conditions: + + +* Scale-up: The Cluster Autoscaler triggers a scale-up operation if insufficient cluster resources lead to multiple pod failures. The pods become eligible for scheduling on the new nodes. The Cluster Autoscaler checks for pod failures every 30 seconds and schedules impacted pods on new nodes. Scaling up will not happen when the given pods have node affinity. + + +* Scale-down: The Cluster Autoscaler triggers a scale-down operation if nodes are underutilized for ten continuous minutes, and their pods are eligible for rescheduling on other available nodes. The node utilization threshold for scaling down a node defaults to 50% of the node's capacity. The Cluster Autoscaler calculates the node utilization threshold based on CPU and memory utilization. In scenarios where the node is underutilized, the Cluster Autoscaler migrates the pods from underutilized nodes to other available nodes and then shuts down the underutilized nodes. + + +Cluster Autoscaler pack is deployed as a [*Deployment*](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) in your cluster and utilizes [Amazon EC2 Auto Scaling Groups](https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html) to manage node groups. + + + +## Versions Supported + + + + + +## Prerequisites + +* Kubernetes 1.24.x or higher. + + +* Permission to create an IAM policy in the AWS account you use with Palette. + + +* IAM policy - A [Full Cluster Autoscaler Features](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#full-cluster-autoscaler-features-policy-recommended) IAM policy must be attached to the EKS cluster's node group. The policy must allow the Cluster Autoscaler to scale the cluster's node groups. + + There are two ways to achieve this prerequisite. You can define the policy as a *customer-managed* policy in the AWS account and use its Amazon Resource Name (ARN) in the cluster profile. Alternatively, you can attach the IAM policy as an *inline* policy to the node group if you have already deployed your cluster. Refer to the [Usage](#usage) section below to learn more. + + +* Updated Kubernetes layer manifest - The Kubernetes pack's manifest should be updated with the newly created IAM policy ARN. The YAML code block below displays the `managedMachinePool.roleAdditionalPolicies` section to update in the Kubernetes pack's manifest. Refer to the [Usage](#usage) section below for more details with an example. +
+ + ```yaml + managedMachinePool: + #roleName: {{ name of the self-managed role | format "${string}" }} + ## A list of additional policies to attach to the node group role + roleAdditionalPolicies: + - {{ arn for the policy1 | format "${string}" }} + ``` +
+ +## Usage + +Cluster Autoscaler helps improve your cluster's performance and makes your cluster more resilient to failures. It automatically adjusts the number of nodes in your cluster based on the current workload. In other words, Cluster Autoscaler monitors the resource utilization, such as CPU and memory, and the number of pods active in your cluster and scales the cluster when either of these events occurs: +
+ +- Multiple pods fail due to resource contention. In this case, the Cluster Autoscaler will provision more nodes. + + +- Nodes are underutilized for a specific period. In this case, the Cluster Autoscaler will reschedule the pods onto other nodes and shut down the underutilized node. +
+ + +### Deploy Cluster Autoscaler +To deploy the Cluster Autoscaler pack, you must first define an IAM policy in the AWS account associated with Palette. + +Next, update the cluster profile to specify the IAM policy ARN in the Kubernetes pack's manifest. Palette will attach that IAM policy to your cluster's node group during deployment. Note that Palette automatically creates two IAM roles in the AWS account when you deploy an EKS cluster. One role is for the cluster, and another for the cluster's node group. The cluster's IAM role name will have the following naming convention, `[your-cluster-name]-iam-service-role`, and the node group's IAM role name will follow the `ng-role_worker-pool-[random-string]` naming convention. + +The following steps provide detailed instructions for deploying the Cluster Autoscaler pack. + +
+ +1. Define the new IAM policy using the policy outlined below, and give it a name, for example, *PaletteEKSClusterAutoscaler*. +
+ + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeTags", + "ec2:DescribeInstanceTypes", + "ec2:DescribeLaunchTemplateVersions" + ], + "Resource": ["*"] + }, + { + "Effect": "Allow", + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:DescribeImages", + "ec2:GetInstanceTypesFromInstanceRequirements", + "eks:DescribeNodegroup" + ], + "Resource": ["*"] + } + ] + } + ``` + + +2. Copy the IAM policy ARN to the clipboard for the next step. For example, the policy ARN will be similar to `arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler`. + + +3. In your cluster profile, and update the `managedMachinePool.roleAdditionalPolicies` section in the Kubernetes pack's manifest with the newly created IAM policy ARN. The snapshot below displays the specific section to update with the policy ARN. + + ![A snapshot displaying the ARN added to the Kubernetes pack's manifest.](/integrations_aws-cluster-autoscaler_k8s-manifest.png) + + For example, the code block below displays the updated `managedMachinePool.roleAdditionalPolicies` section with a sample policy ARN, `"arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler"`. Before you use the following code block, replace the ARN below with yours. +
+ + ```yaml + managedMachinePool: + # roleName: {{ name of the self-managed role | format "${string}" }} + # A list of additional policies to attach to the node group role + roleAdditionalPolicies: + - "arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler" + ``` +
+ + :::info + + If you do not want to update the Kubernetes pack's manifest, you can add an *inline* IAM policy to the cluster's node group post deployment. Refer to this [AWS guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html#add-policies-console) on how to embed an inline policy for a user or role. Refer to the [AWS IAM documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) for the differences between managed and inline policies. + + ::: + + +4. In the cluster deployment wizard, when you are in the **Nodes configuration** section, you must enter the minimum and maximum number of worker pool nodes, and the instance type (size) that suits your requirement. + + You must provide the node count limits because the Cluster Autoscaler uses an Auto Scaling Group to manage the cluster's node group. An Auto Scaling Group requires a minimum and maximum count and the selection of an instance type. You can choose an instance type that suits your requirement. + + For example, the snapshot below displays the cluster's minimum and maximum capacity. + + ![A snapshot displaying the minimum and maximum node count in Palette.](/integrations_aws-cluster-autoscaler_node-count.png) + +
+ +### Resize the Cluster + +To better understand the scaling behavior of the Cluster Autoscaler and its impact on a cluster, do the following exercise to gain firsthand experience with the scaling behavior. + +In the following example scenario, you will first create a cluster with large-sized worker pool instances. Next, you will manually reduce the instance size, leading to insufficient resources for existing pods and multiple pod failures in the cluster. As a result, the Cluster Autoscaler will provision new smaller-sized nodes with enough capacity to accommodate the current workload and reschedule those contending pods on new nodes. Also, the new nodes' count will be within the minimum and maximum limit you specified for the worker pool. + + +Use the following steps to trigger the pod rescheduling event manually: +
+ +1. In the cluster deployment wizard, while defining the **Nodes configuration**, choose a large-sized instance type. For example, you can choose your worker pool to have instance size **t3.2xlarge** (8 vCPUs, 32 GB RAM) or higher. + + +2. After your cluster is successfully deployed, navigate to the **Nodes** tab in the cluster details page in Palette, and note the count and size of nodes. The snapshots below display one node of the type **t3.2xlarge** in the worker pool of a successfully deployed cluster. + + ![A snapshot displaying one node of the type **t3.2xlarge** in the worker pool.](/integrations_aws-cluster-autoscaler_one-node.png) + + + +3. Manually reduce the instance size in the worker-pool configuration to a **t3.medium** (2 vCPUs, 8 GB RAM). The snapshot below displays how to edit the instance size in the node pool configuration. + + ![A snapshot displaying how to edit node pool configuration.](/integrations_aws-cluster-autoscaler_edit-node.png) + + +4. Wait for a few minutes for the new nodes to provision. Reducing the node size will make the Cluster Autoscaler shut down the large node and provision smaller-sized nodes with enough capacity to accommodate the current workload. Also, the new node count will be within the minimum and maximum limit you specified for the worker pool configuration wizard. + + The following snapshot displays two new nodes of the size **t3.medium** spin up automatically. These two smaller-sized nodes will be able to handle the same workload as a single larger-sized node. + + ![A snapshot displaying new nodes of the size **t3.medium** spin up automatically, *collectively* providing enough capacity to accommodate the current workload. ](/integrations_aws-cluster-autoscaler_two-nodes.png) +
+ +
+ + + +## Prerequisites + +* Kubernetes 1.19.x or higher. + + +* Permission to create an IAM policy in the AWS account you use with Palette. + + +* IAM policy - A [Full Cluster Autoscaler Features](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#full-cluster-autoscaler-features-policy-recommended) IAM policy must be attached to the EKS cluster's node group. The policy must allow the Cluster Autoscaler to scale the cluster's node groups. + + There are two ways to achieve this prerequisite. You can define the policy as a *customer-managed* policy in the AWS account and use its Amazon Resource Name (ARN) in the cluster profile. Alternatively, you can attach the IAM policy as an *inline* policy to the node group if you have already deployed your cluster. Refer to the [Usage](#usage) section below to learn more. + + +* Updated Kubernetes layer manifest - The Kubernetes pack's manifest should be updated with the newly created IAM policy ARN. The YAML code block below displays the `managedMachinePool.roleAdditionalPolicies` section to update in the Kubernetes pack's manifest. Refer to the [Usage](#usage) section below for more details with an example. +
+ + ```yaml + managedMachinePool: + #roleName: {{ name of the self-managed role | format "${string}" }} + ## A list of additional policies to attach to the node group role + roleAdditionalPolicies: + - {{ arn for the policy1 | format "${string}" }} + ``` +
+ +## Usage + +Cluster Autoscaler helps improve your cluster's performance and makes your cluster more resilient to failures. It automatically adjusts the number of nodes in your cluster based on the current workload. In other words, Cluster Autoscaler monitors the resource utilization, such as CPU and memory, and the number of pods active in your cluster and scales the cluster when either of these events occurs: +
+ +- Multiple pods fail due to resource contention. In this case, the Cluster Autoscaler will provision more nodes. + + +- Nodes are underutilized for a specific period. In this case, the Cluster Autoscaler will reschedule the pods onto other nodes and shut down the underutilized node. +
+ + +### Deploy Cluster Autoscaler +To deploy the Cluster Autoscaler pack, you must first define an IAM policy in the AWS account associated with Palette. + +Next, update the cluster profile to specify the IAM policy ARN in the Kubernetes pack's manifest. Palette will attach that IAM policy to your cluster's node group during deployment. Note that Palette automatically creates two IAM roles in the AWS account when you deploy an EKS cluster. One role is for the cluster, and another for the cluster's node group. The cluster's IAM role name will have the following naming convention, `[your-cluster-name]-iam-service-role`, and the node group's IAM role name will follow the `ng-role_worker-pool-[random-string]` naming convention. + +The following steps provide detailed instructions for deploying the Cluster Autoscaler pack. + +
+ +1. Define the new IAM policy using the policy outlined below, and give it a name, for example, *PaletteEKSClusterAutoscaler*. +
+ + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeTags", + "ec2:DescribeInstanceTypes", + "ec2:DescribeLaunchTemplateVersions" + ], + "Resource": ["*"] + }, + { + "Effect": "Allow", + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:DescribeImages", + "ec2:GetInstanceTypesFromInstanceRequirements", + "eks:DescribeNodegroup" + ], + "Resource": ["*"] + } + ] + } + ``` + + +2. Copy the IAM policy ARN to the clipboard for the next step. For example, the policy ARN will be similar to `arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler`. + + +3. In your cluster profile, and update the `managedMachinePool.roleAdditionalPolicies` section in the Kubernetes pack's manifest with the newly created IAM policy ARN. The snapshot below displays the specific section to update with the policy ARN. + + ![A snapshot displaying the ARN added to the Kubernetes pack's manifest.](/integrations_aws-cluster-autoscaler_k8s-manifest.png) + + For example, the code block below displays the updated `managedMachinePool.roleAdditionalPolicies` section with a sample policy ARN, `"arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler"`. Before you use the following code block, replace the ARN below with yours. +
+ + ```yaml + managedMachinePool: + # roleName: {{ name of the self-managed role | format "${string}" }} + # A list of additional policies to attach to the node group role + roleAdditionalPolicies: + - "arn:aws:iam::650628870702:policy/PaletteEKSClusterAutoscaler" + ``` +
+ + :::info + + If you do not want to update the Kubernetes pack's manifest, you can add an *inline* IAM policy to the cluster's node group post deployment. Refer to this [AWS guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html#add-policies-console) on how to embed an inline policy for a user or role. Refer to the [AWS IAM documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) for the differences between managed and inline policies. + + ::: + + +4. In the cluster deployment wizard, when you are in the **Nodes configuration** section, you must enter the minimum and maximum number of worker pool nodes, and the instance type (size) that suits your requirement. + + You must provide the node count limits because the Cluster Autoscaler uses an Auto Scaling Group to manage the cluster's node group. An Auto Scaling Group requires a minimum and maximum count and the selection of an instance type. You can choose an instance type that suits your requirement. + + For example, the snapshot below displays the cluster's minimum and maximum capacity. + + ![A snapshot displaying the minimum and maximum node count in Palette.](/integrations_aws-cluster-autoscaler_node-count.png) + +
+ +### Resize the Cluster + +To better understand the scaling behavior of the Cluster Autoscaler and its impact on a cluster, do the following exercise to gain firsthand experience with the scaling behavior. + +In the following example scenario, you will first create a cluster with large-sized worker pool instances. Next, you will manually reduce the instance size, leading to insufficient resources for existing pods and multiple pod failures in the cluster. As a result, the Cluster Autoscaler will provision new smaller-sized nodes with enough capacity to accommodate the current workload and reschedule those contending pods on new nodes. Also, the new nodes' count will be within the minimum and maximum limit you specified for the worker pool. + + +Use the following steps to trigger the pod rescheduling event manually: +
+ +1. In the cluster deployment wizard, while defining the **Nodes configuration**, choose a large-sized instance type. For example, you can choose your worker pool to have instance size **t3.2xlarge** (8 vCPUs, 32 GB RAM) or higher. + + +2. After your cluster is successfully deployed, navigate to the **Nodes** tab in the cluster details page in Palette, and note the count and size of nodes. The snapshots below display one node of the type **t3.2xlarge** in the worker pool of a successfully deployed cluster. + + + ![A snapshot displaying one node of the type **t3.2xlarge** in the worker pool.](/integrations_aws-cluster-autoscaler_one-node.png) + + +3. Manually reduce the instance size in the worker-pool configuration to a **t3.medium** (2 vCPUs, 8 GB RAM). The snapshot below displays how to edit the instance size in the node pool configuration. + + ![A snapshot displaying how to edit node pool configuration.](/integrations_aws-cluster-autoscaler_edit-node.png) + + +4. Wait for a few minutes for the new nodes to provision. Reducing the node size will make the Cluster Autoscaler shut down the large node and provision smaller-sized nodes with enough capacity to accommodate the current workload. Also, the new node count will be within the minimum and maximum limit you specified for the worker pool configuration wizard. + + The following snapshot displays two new nodes of the size **t3.medium** spin up automatically. These two smaller-sized nodes will be able to handle the same workload as a single larger-sized node. + + + ![A snapshot displaying new nodes of the size **t3.medium** spin up automatically, *collectively* providing enough capacity to accommodate the current workload. ](/integrations_aws-cluster-autoscaler_two-nodes.png) +
+ + +
+ +
+ +## Troubleshooting + +If you are facing the `LimitExceeded: Cannot exceed quota for PoliciesPerRole:10` error in the cluster deployment logs, it may be because the default IAM role Palette creates for the node group already has 10 policies attached to it, and you are trying to attach one more. By default, your AWS account will have a quota of 10 managed policies per IAM role. To fix the error, follow the instruction in this [AWS guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-quotas-entities) to request a quota increase. + + + +If you encounter an `executable aws-iam-authenticator not found` error in your terminal when attempting to access your EKS cluster from your local machine, it may be due to the [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator) plugin missing from your local environment. You can find the installation steps for the +aws-iam-authenticator in the following [install guide](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html). + + +## Terraform + +You can reference the AWS Cluster Autoscaler pack in Terraform with a data resource. + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "aws-cluster-autoscaler" { + name = "aws-cluster-autoscaler" + version = "1.26.3" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + +## References + +- [Cluster Autoscaler on AWS](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) + + +- [Amazon EKS Autoscaling](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html) + + +- [AWS IAM Authenticator Plugin](https://github.com/kubernetes-sigs/aws-iam-authenticator) diff --git a/docs/docs-content/integrations/aws-ebs.md b/docs/docs-content/integrations/aws-ebs.md new file mode 100644 index 0000000000..f612a4a46d --- /dev/null +++ b/docs/docs-content/integrations/aws-ebs.md @@ -0,0 +1,103 @@ +--- +sidebar_label: 'AWS-EBS' +title: 'AWS EBS' +description: 'AWS EBS storage add on into Spectro Cloud' +type: "integration" +category: ['storage', 'amd64','fips'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/csi-aws/blobs/sha256:f86813591b3b63b3afcf0a604a7c8c715660448585e89174908f3c6a421ad8d8?type=image/png' +tags: ["packs", "aws-ebs", "storage"] +--- + + +AWS Elastic Block Store is an easy to use, high performance block storage at any scale. It helps in the easy deployment, management, and scaling of the most demanding and high-performance tenant workloads. AWS EBS also ensures availability with replication and durability. + +## Prerequisites + +Palette requires the following IAM policies to be attached to the IAM role that is used to create the cluster: + +- The AWS managed policy `AmazonEBSCSIDriverPolicy`. + +- For AWS Key Management Service (KMS) encryption, refer to the [KMS Encryption Policy](#kms-encryption-policy) section. + +## Versions Supported + + + + + +* **1.12.0** + + + + + +* **1.10.0** + + + + + +* ** 1.8.0** + + + + + +* ** 1.5.1** + + + + + +## KMS Encryption Policy + +If you want to use KMS encryption, you must attach the following IAM policy to the Palette IAM role that is used to create the cluster. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "kms:GenerateDataKeyWithoutPlaintext", + "kms:CreateGrant" + ], + "Resource": "*" + } + ] +} +``` + +## Notable Parameters + +| Name | Supported Values | Default Value | Description | +| --- | --- | --- | --- | +| storageType | gp2, sc1, st1, io1 | gp2 | AWS Volume type to be used. | +| reclaimPolicy | Delete, Retain | Delete | Defines whether volumes will be retained or deleted. | +| allowVolumeExpansion | true, false | true | Flag to allow resizing a volume. | +| isDefaultClass | true, false | true | Flag to denote if this StorageClass will be the default. | +| volumeBindingMode | WaitForFirstConsumer, Immediate | WaitForFirstConsumer | Controls when volumeBinding and dynamic provisioning should happen. | +| encrypted | true, false | true | Denotes whether the EBS volume should be encrypted or not. | +| kmsKeyId (optional) | The full Amazon Resource Name of the key to use when encrypting the volume. | -- | If you don't provide the full Amazon Resource Name but **encrypted** is true, AWS [generates a key](https://kubernetes.io/docs/concepts/storage/storage-classes/#aws-ebs). | + + +You can view the full parameter list [here](https://github.com/kubernetes-sigs/aws-ebs-csi-driver#createvolume-parameters). + + +Storage classes that Palette creates are named `spectro-storage-class` and can be fetched from kubectl using the following CLI command: + + +```bash +kubectl get storageclass --all-namespaces +``` + + +## References + +- [AWS EBS](https://aws.amazon.com/ebs/) + + +- [AWS EBS Storage Class Details](https://kubernetes.io/docs/concepts/storage/storage-classes/#aws-ebs) diff --git a/docs/docs-content/integrations/aws-efs.md b/docs/docs-content/integrations/aws-efs.md new file mode 100644 index 0000000000..72736f52a4 --- /dev/null +++ b/docs/docs-content/integrations/aws-efs.md @@ -0,0 +1,396 @@ +--- +sidebar_label: 'AWS EFS' +title: 'AWS EFS' +description: 'AWS EFS storage add on into Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['storage', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-aws-efs/blobs/sha256:5d1eb98bb847489f341beda1407c14442854ab8e5910d0cc8da1a63636057927?type=image/png' +tags: ["packs", "aws-efs", "storage"] +--- + +You can access information from an Amazon Elastic File System (Amazon EFS) volume within a specific region, no matter which availability zone it's in. The cluster can be distributed across availability zones instead of having it in one location and replicating it multiple times. + +Palette handles setting up the AWS EFS as a volume with ease when adding the PersistentVolume storage container. Palette will dynamically provision the AWS EFS storage layer for the worker node. + +## Versions Supported + + + + + +## Prerequisites + +- Create the Identity and Access Management (IAM) role that allows the driver to manage AWS EFS access points. The [Introducing Amazon EFS CSI dynamic provisioning](https://aws.amazon.com/blogs/containers/introducing-efs-csi-dynamic-provisioning/) blog provides information on `EFSCSIControllerIAMPolicy`. + +- An AWS EFS file system is available. Check out the guide [Create your Amazon EFS file system](https://docs.aws.amazon.com/efs/latest/ug/gs-step-two-create-efs-resources.html) if you need additional guidance. + +- Create your EKS cluster using static provisioning. Static provisioning requires you to create a virtual private cloud (VPC), subnets, route tables, internet gateway and NAT gateways in the AWS console. + + You can use the same VPC or a different one for EFS: + + - Using the same VPC for EFS ensures EFS is reachable from your EKS cluster. We recommend using the same VPC because it doesn't require peering. + + - If you use a different VPC for EFS, you need to peer the VPC with the VPC on which the EKS cluster is running.

+ +- The security group associated with your EFS file system must have an inbound rule that allows Network File System (NFS) traffic (port 2049) from the CIDR for your cluster's VPC. + + +## Parameters + +The table lists commonly used parameters you can configure when adding this pack. + +| Parameter | Description | Default | +|-----------------|-----------------------|------------------------------| +| storageClassName | AWS Volume type to be used. | spectro-storage-class | +| isDefaultClass | Toggle for Default class. | true | +| fileSystemId | The file system under which access points are created. Create the file system prior to this setup. This is a required field and needs to be set to a pre-created AWS EFS volume. Other values can use the default setting. | Set this to an AWS EFS volume you have already created. | +| provisioningMode | Type of volume provisioned by AWS EFS. For now, this is the only access point supported. | efs-ap | +| directoryPerms | Directory permissions for Access Point root directory creation. | 700 | +| gidRangeStart | Starting range of the Portable Operating System Interface (POSIX) group Id to be applied for access point root directory creation (optional). | 1000 | +| gidRangeEnd | End range of the POSIX group Id (optional). | 2000 | +| basePath | Path under which access points for dynamic provisioning is created. If this parameter is not specified, access points are created under the root directory of the file system. | `/base_efs` | + + + +## Usage + +There are two ways to add AWS EFS to Palette: + +- Add EFS as a base CSI layer in a cluster profile. +- Add EFS as an Add-on layer, which will create a new storage class using the AWS EFS file system. + + +### Policy Information + +You must create a policy that allows you to use EFS from your IAM account. You can use the following JSON to create the policy.

+ +```yaml +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "elasticfilesystem:DescribeAccessPoints", + "elasticfilesystem:DescribeFileSystems" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "elasticfilesystem:CreateAccessPoint" + ], + "Resource": "*", + "Condition": { + "StringLike": { + "aws:RequestTag/efs.csi.aws.com/cluster": "true" + } + } + }, + { + "Effect": "Allow", + "Action": "elasticfilesystem:DeleteAccessPoint", + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:ResourceTag/efs.csi.aws.com/cluster": "true" + } + } + } + ] +} +``` + +### Storage Class + +Palette creates storage classes named *spectro-storage-class*. You can view a list of storage classes using this kubectl command: +
+ +```bash +kubectl get storageclass +``` + +### PersistentVolumeClaim + +A PersistentVolumeClaim (PVC) is a request made by a pod for a certain amount of storage from the cluster. It acts as a link between the pod and the storage resource, allowing the pod to use the storage. You can learn details about a PVC, as shown in the following output, when you use the `kubectl describe pvc` command. +
+ +```bash +kubectl describe pvc my-efs-volume +``` + +```yaml + +Name: efs + +Namespace: default + +StorageClass: aws-efs + +Status: Pending + +Volume: + +Labels: + +Annotations: kubectl.kubernetes.io/last-applied-configuration: +{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{"volume.beta.kubernetes.io/ +storage-class":"aws-efs"},"name":"..."} + +volume.beta.kubernetes.io/storage-class: aws-efs + +Finalizers: [kubernetes.io/pvc-protection] + +Capacity: + +Access Modes: + +Events: +| Type | Reason | Age | From | Message | +| ------- | ------------------ | ------------------ | --------------------------- | ------------------------ | +| Warning | ProvisioningFailed | 43s (x12 over 11m) | persistentvolume-controller | no volume plugin matched | +Mounted By: + +``` + +
+ + + + +## Prerequisites + +- Create the Identity and Access Management (IAM) role that allows the driver to manage AWS EFS access points. The [Introducing Amazon EFS CSI dynamic provisioning](https://aws.amazon.com/blogs/containers/introducing-efs-csi-dynamic-provisioning/) blog provides information on `EFSCSIControllerIAMPolicy`. + +- An AWS EFS file system is available. Check out the guide [Create your Amazon EFS file system](https://docs.aws.amazon.com/efs/latest/ug/gs-step-two-create-efs-resources.html) if you need additional guidance. + +- Create your EKS cluster using static provisioning. Static provisioning requires you to create a virtual private cloud (VPC), subnets, route tables, internet gateway and NAT gateways in the AWS console. + + You can use the same VPC or a different one for EFS: + + - Using the same VPC for EFS ensures EFS is reachable from your EKS cluster. We recommend using the same VPC because it doesn't require peering. + + - If you use a different VPC for EFS, you need to peer the VPC with the VPC on which the EKS cluster is running.

+ +- The security group associated with your EFS file system must have an inbound rule that allows Network File System (NFS) traffic (port 2049) from the CIDR for your cluster's VPC. + + +## Parameters + +The table lists commonly used parameters you can configure when adding this pack. + +| Parameter | Description | Default | +|-------------------------|--------------------------------------------------------|---------------------------------------------| +| storageClassName | AWS Volume type to be used. | spectro-storage-class | +| isDefaultClass | Toggle for Default class. | true | +| fileSystemId | The file system under which access points are created. Create the file system prior to this setup. This is a required field and needs to be set to a pre-created AWS EFS volume. Other values can use the default setting. | Set this to an AWS EFS volume you have already created. | +| provisioningMode | Type of volume provisioned by AWS EFS. For now, this is the only access point supported. | efs-ap | +| directoryPerms | Directory permissions for Access Point root directory creation. | 700 | +| gidRangeStart | Starting range of the Portable Operating System Interface (POSIX) group Id to be applied for access point root directory creation (optional). | 1000 | +| gidRangeEnd | End range of the POSIX group Id (optional). | 2000 | +| basePath | Path under which access points for dynamic provisioning is created. If this parameter is not specified, access points are created under the root directory of the file system. | `/base_efs` | + + +## Usage + +There are two ways to add AWS EFS to Palette: + +- Add EFS as a CSI layer in AWS/EKS. +- Add EFS as an Add-on layer, which will create a new storage class using the AWS EFS file system. + + +### Policy Information + +You must create a policy that allows you to use EFS from your IAM account. You can use the following JSON to create the policy.

+ +```yaml +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "elasticfilesystem:DescribeAccessPoints", + "elasticfilesystem:DescribeFileSystems" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "elasticfilesystem:CreateAccessPoint" + ], + "Resource": "*", + "Condition": { + "StringLike": { + "aws:RequestTag/efs.csi.aws.com/cluster": "true" + } + } + }, + { + "Effect": "Allow", + "Action": "elasticfilesystem:DeleteAccessPoint", + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:ResourceTag/efs.csi.aws.com/cluster": "true" + } + } + } + ] +} +``` + +### Storage Class + +Palette creates storage classes named *spectro-storage-class*. You can view a list of storage classes using this kubectl command: + +
+ +```bash +kubectl get storageclass +``` + +### PersistentVolumeClaim + +A PersistentVolumeClaim (PVC) is a request made by a pod for a certain amount of storage from the cluster. It acts as a link between the pod and the storage resource, allowing the pod to use the storage. You can learn details about a PVC by using the `kubectl describe pvc` command, as the following example output shows. +
+ +```bash +kubectl describe pvc my-efs-volume +``` + +```yaml + +Name: efs + +Namespace: default + +StorageClass: aws-efs + +Status: Pending + +Volume: + +Labels: + +Annotations: kubectl.kubernetes.io/last-applied-configuration: +{"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{"volume.beta.kubernetes.io/ +storage-class":"aws-efs"},"name":"..."} + +volume.beta.kubernetes.io/storage-class: aws-efs + +Finalizers: [kubernetes.io/pvc-protection] + +Capacity: + +Access Modes: + +Events: +| Type | Reason | Age | From | Message | +| ------- | ------------------ | ------------------ | --------------------------- | ------------------------ | +| Warning | ProvisioningFailed | 43s (x12 over 11m) | persistentvolume-controller | no volume plugin matched | +Mounted By: + +``` + +
+
+ + +## Troubleshooting + +Some basic troubleshooting steps you can take if you receive errors in your pods when mounting an Amazon EFS volume in your Amazon EKS cluster are to verify you have the following: +
+ + - An Amazon EFS file system created with a mount target in each of the worker node subnets. + - A valid EFS storage class definition using the efs.csi.aws.com provisioner. + - A valid PersistentVolumeClaim (PVC) definition and PersistentVolume definition. This isn't needed if you're using dynamic provisioning. + - The Amazon EFS CSI driver installed in the cluster. + +The following list provides more specific details to help you troubleshoot issues when mounting an Amazon EFS volume. CSI driver pod logs are also available to determine the cause of the mount failures. If the volume is failing to mount, the efs-plugin logs are available. +
+ +- **Mount Targets:** Verify the mount targets are configured correctly. Be sure to create the EFS mount targets in each Availability Zone where the EKS worker nodes are running. + +- **Allow NFS Traffic:** Verify the security group associated with your EFS file system and worker nodes allows NFS traffic. + + - The security group that's associated with your EFS file system must have an inbound rule that allows NFS traffic (port 2049) from the CIDR for your cluster's VPC. + + - The security group that's associated with your worker nodes where the pods are failing to mount the EFS volume must have an outbound rule that allows NFS traffic (port 2049) to the EFS file system. + +- **Subdirectories:** If you're mounting the pod to a subdirectory, verify the subdirectory is created in your EFS file system. When you add sub paths in persistent volumes, the EFS CSI driver doesn't create the subdirectory path in the EFS file system as part of the mount operation. Subdirectories must be present before you start the mount operation. + +- **DNS server:** Confirm the cluster's Virtual Private Cloud (VPC) uses the Amazon DNS server. To verify the DNS server, log in to the worker node and issue the following command, replacing ```region``` with your AWS Region: + +
+ + ```bash + nslookup fs-4fxxxxxx.efs.region.amazonaws.com + + ``` + +- **Permissions:** Verify you have "iam" mount options in the persistent volume definition when using a restrictive file system policy. In some cases, the EFS file system policy is configured to restrict mount permissions to specific IAM roles. In this case, the EFS mount helper in the EFS CSI driver requires the ```-o iam``` mount option during the mount operation. Include the **spec.mountOptions** property:

+ + ```bash + spec: + mountOptions: + - iam + ``` +- **IAM role:** Verify the Amazon EFS CSI driver controller service account associates with the correct IAM role and the IAM role has the required permissions. + + Run the following command:
+ + ```bash + kubectl describe sa efs-csi-controller-sa -namespace kube-system + ``` + + You should see this annotation: + + ```bash + eks\.amazonaws\.com/role-arn"="arn:aws:iam::111122223333:role/AmazonEKS_EFS_CSI_Driver_Policy + ``` +- **Driver Pods:** Verify the EFS CSI driver pods are running. Issue the following command to display a list of controller pods and node pods running in your cluster: + + ```bash + kubectl get all -label app.kubernetes.io/name=aws-efs-csi-driver -namespace kube-system + ``` + +- **File system won't mount:** Verify the EFS mount operation from the EC2 worker node where the pod is failing to mount the file system. Log in to the Amazon EKS worker node where the pod is scheduled. Then, use the EFS mount helper to try to manually mount the EFS file system to the worker node. You can run the following command to test: + + ```bash + sudo mount -types -efs -options tls file-system-dns-name efs-mount-point/ + ``` + +You can find more information in Amazon's [Troubleshoot Amazon EFS](https://aws.amazon.com/premiumsupport/knowledge-center/eks-troubleshoot-efs-volume-mount-issues/) guide. + + +## Terraform + +You can reference the AWS EFS pack in Terraform with a data resource. + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "csi-aws-efs" { + name = "aws-efs" + version = "1.4.0" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +## References + +- [Amazon EFS CSI Driver](https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html) + +- [Amazon Elastic File System](https://aws.amazon.com/efs/) + +- [Amazon EFS Tutorial and Examples](https://github.com/aws-samples/amazon-efs-tutorial) + +- [IAM Policy Example](https://raw.githubusercontent.com/kubernetes-sigs/aws-ebs-csi-driver/master/docs/example-iam-policy.json) + +- [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/)
diff --git a/docs/docs-content/integrations/azure-cni.md b/docs/docs-content/integrations/azure-cni.md new file mode 100644 index 0000000000..219340a705 --- /dev/null +++ b/docs/docs-content/integrations/azure-cni.md @@ -0,0 +1,87 @@ +--- +sidebar_label: 'Azure CNI' +title: 'Azure CNI' +description: 'Azure CNI network pack for Palette AKS Clusters' +hide_table_of_contents: true +type: "integration" +category: ['network', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-azure/blobs/sha256:0787b7943741181181823079533cd363884a28aa0651715ea43408bdc77a5c51?type=image/png' +tags: ["packs", "azure-cni", "cni", "network"] +--- + + +Palette supports Azure Container Network Interface (CNI) networking for Azure Kubernetes Service (AKS) clusters. Azure CNI enables each pod to have exclusive IP addresses from the subnet with direct accessibility. + +To allocate unique IP addresses to individual pods, advanced forethought needs to be put in. As per the maximum pods supported by a node, [IP addresses need to be reserved](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni#plan-ip-addressing-for-your-cluster) in advance. The default [maximum number](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni#maximum-pods-per-node) of pods per node varies between kubenet and Azure CNI networking and based on the method of cluster deployment. + + +## Versions Supported + + + + + +**1.4.0** + + + + + + +# Azure CNI Policy Support + +Network Policy is a Kubernetes specification that defines access policies for communication between pods. By default, AKS cluster pods can send and receive traffic without limitations. However, to ensure security, rules to control traffic flow can be defined. Network Policies define an ordered set of rules to send and receive traffic and applies them to a collection of pods that match one or more label selectors. Palette enables Network Policies to be included as part of a wider manifest that also creates a deployment or service. Palette leverages two (2) Network Policies from Azure CNI: + +
+ +* **azure**: Azure's own implementation, called Azure Network Policy. + +* **calico**: An open-source network and network security solution founded by [Tigera](https://www.tigera.io/). + + +Palette users can choose any one of the above Network Policies and provide it to the pack YAML file as `networkPolicy` as given below: + +
+
+ +```yaml +pack: + # The Network policy for ingress and egress traffic between pods in a cluster. Supported values are none, azure, calico + networkPolicy: "none" +``` +
+ +:::info + Provide the networkPolicy value as none if no policy to be applied. +::: + + +
+ + +## Azure and Calico Policies and their Capabilities + +|Capability |Azure |Calico| +|-----------|-------|------| +|Supported platforms|Linux|Linux, Windows Server 2019 and 2022| +|Supported networking options|Azure CNI|Azure CNI (Linux, Windows Server 2019 and 2022) and kubenet (Linux)| +|Compliance with Kubernetes specification|All policy types supported| All policy types supported| +|Additional features| None |Extended policy model consisting of Global Network Policy, Global Network Set, and Host Endpoint. For more information on using the calicoctl CLI to manage these extended features, see calicoctl user reference guide.| +|Support|Supported by Azure Support and Engineering team|Calico community support.| +|Logging|Rules added or deleted in IP Tables are logged on every host under `/var/log/azure-npm.log`|For more information, see [Calico component logs](https://projectcalico.docs.tigera.io/maintenance/troubleshoot/component-logs)| + +:::caution + +Make sure to use Azure CNI with the Windows operating system as the kubenet is not available for the Windows environment. + +::: + +
+ +## References + +- [Azure CNI Git](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) + + +- [Azure CNI](https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni) diff --git a/docs/docs-content/integrations/azure-disk.md b/docs/docs-content/integrations/azure-disk.md new file mode 100644 index 0000000000..046b38e4ef --- /dev/null +++ b/docs/docs-content/integrations/azure-disk.md @@ -0,0 +1,59 @@ +--- +sidebar_label: 'Azure Disk' +title: 'Azure Disk' +description: 'Azure Disk storage add on into Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['storage', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-azure/blobs/sha256:0787b7943741181181823079533cd363884a28aa0651715ea43408bdc77a5c51?type=image/png' +tags: ["packs", "azure-disk", "storage"] +--- + + +Azure Disk storage is designed to be used with Azure virtual machines for the tenant workloads. It offers high-performance, durable block storage with sub-millisecond latency and throughput for transaction-intensive workloads. + + +## Versions Supported + + + + + + + + + + + + + +## Notable Parameters + +| Name | Supported Values | Default Value | Description | +| --- | --- | --- | --- | +| storageaccounttype | Standard_LRS, Premium_LRS | Standard_LRS | The storage account type to use | +| kind | managed, shared, dedicated | managed | The disk kind | +| reclaimPolicy | Delete, Retain | Delete | Defines whether volumes will be retained or deleted | +| allowVolumeExpansion | true, false | true | Flag to allow resizing volume | +| isDefaultClass | true, false | true | Flag to denote if this StorageClass will be the default | +| volumeBindingMode | WaitForFirstConsumer, Immediate | WaitForFirstConsumer | Controls when volumeBinding and dynamic provisioning should happen | + + + +## Troubleshooting + +Storage classes created by Palette are named `spectro-storage-class` and can be fetched from kubectl using the following CLI command: + +```bash +kubectl get storageclass --all-namespaces +``` + + +## References + +- [Azure Disk Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/#azure-disk-storage-class) + +- [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes) + + diff --git a/docs/docs-content/integrations/byoos.md b/docs/docs-content/integrations/byoos.md new file mode 100644 index 0000000000..51c8523673 --- /dev/null +++ b/docs/docs-content/integrations/byoos.md @@ -0,0 +1,242 @@ +--- +sidebar_label: "Bring Your Own OS (BYOOS)" +title: "Bring your own OS (BYOOS)" +description: "Bring Your Own OS (BYOOS) pack in Palette." +hide_table_of_contents: true +type: "integration" +category: ['operating system', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: "https://registry.dev.spectrocloud.com/v1/spectro-proxy/blobs/sha256:b6081bca439eeb01a8d43b3cb6895df4c088f80af978856ddc0da568e5c09365?type=image/png" +tags: ["packs", "byoos", "operating system"] +--- + + + + +# Bring Your Own Operating System (BYOOS) + +The [Bring Your Own Operating System (BYOOS)](../cluster-profiles/byoos/byoos.md) enables you to use a custom Operating System (OS) with Palette. Palette comes with several operating systems out-of-the-box, but the existing OS list may not meet all users' needs. + +Using your custom OS provides several benefits, including the ability to control your own dependencies, improve performance, and ensure compatibility with your existing applications. With BYOOS, you can choose the OS that best fits your needs, whether it's a commercial or open-source distribution, and integrate it with your Kubernetes clusters. The BYOOS pack can be used with both Edge and non-Edge environments. + +## Versions Supported + +**1.0.x** + +
+ + + + + +## Prerequisites + + +
+ +- The Edge Provider images you have created and uploaded to a container registry. Refer to the [Build Edge Artifacts](../clusters/edge/edgeforge-workflow/palette-canvos.md) guide for steps on how to create the Edge artifacts and how to upload your custom OS to a registry. + + +- Palette 3.3.0 or greater. + +## Parameters + +The BYOS Edge OS pack supports the following parameters. + +### Parameters + +| Parameter | Description | Type | +|----------------------|--------------------------------------------------------|------| +| `pack:content:` | Specifies the content of the **BYOS Edge OS** pack. | map | +| `pack.content.images` | Specifies a list of OS images to use with the pack. | list | +| `pack.content.images.image` | An OS image to use with the pack. | string| +| `system.uri` | The system URI specifies the location of BYOOS image. | string| + + + + ```yaml + pack: + content: + images: + - image: '{{.spectro.pack.edge-native-byoi.options.system.uri}}' + # - image: example.io/my-other-images/example:v1.0.0 + # - image: example.io/my-super-other-images/example:v1.0.0 + + options: + system.uri: example.io/my-images/example-custom-os:v1.4.5 + ``` + +## Usage + +BYOOS enables you to use a custom OS for your Edge host. You can use this feature to customize the desired specifications of your OS layer in the Edge host. You can reference the custom OS through the BYOOS pack. + + +To use a custom OS, you must include all the Edge artifacts and provider images required by the Edge Installer in the custom OS. Refer to the [Build Edge Artifacts](../clusters/edge/edgeforge-workflow/palette-canvos.md) guide for steps on how to create a custom OS that includes all the required components for the Edge Installer. + + +Select the BYOOS pack and fill out the required parameters during the cluster profile creation process. The `system.uri` parameter specifies the location of the BYOOS image. Refer to the [Build Edge Artifacts](../clusters/edge/edgeforge-workflow/palette-canvos.md) guide to learn how to create Edge Artifacts. + +![A view of the Kubernetes pack editor with a YAML configuration](/clusters_site-deployment_model-profile_byoos-pack-yaml.png) + + +
+ + + +## Prerequisites + +To use the non-Edge BYOOS pack, you must have the following: + +
+ +- A custom OS that you created. Refer to the [Build Edge Artifacts](../clusters/edge/edgeforge-workflow/palette-canvos.md) guide to learn how to create a custom OS for Palette. + +## Parameters + +The following is a list of parameters required when using the BYOOS pack. + +
+ +| Parameter | Description | Type | +|----------------------|--------------------------------------------------------|---| +| `osImageOverride` | The image ID used as the base OS layer. This is the image ID as assigned in the infrastructure environment the image belongs to. Example: `ami-0f4804aff4cf9c5a2` | string| +| `osName` | The name of the OS distribution. Example: `rhel` | string | +| `osVersion` | The version of the OS distribution. Example: `"8"` | string | + +## Usage + +Use the BYOOS pack when selecting the OS layer during the cluster profile creation. Use the following information to find the BYOOS pack. + +* Pack Type: OS +* Registry: Public Repo +* Pack Name: Bring Your Own OS (BYO-OS) +* Pack Version: 1.0.x or higher + + +:::info + +Check out the [Create Cluster Profile](../cluster-profiles/task-define-profile.md) guide to learn how to create a cluster profile. + +::: + +
+ + +Fill out the required parameters with information about your custom OS, such as the ID, OS distribution, and version. + +
+ +```yaml +pack: + osImageOverride: "ami-0f4804aff4cf9c5a2" + osName: "rhel" + osVersion: "8" +``` + + + +
+ + + ![View of the cluster profile wizard](/clusters_byoos_image-builder_cluster-profile-byoos-yaml.png) + + + + + +Check out the [Build Edge Artifacts](../clusters/edge/edgeforge-workflow/palette-canvos.md ) guide to learn to create a custom image for Palette. + + +--- + +
+ +Image creation tools are available to help you create custom OS images for the infrastructure provider you are using. The following is a list of commonly used tools for creating a custom OS: + +
+ +* [AWS EC2 Image Builder](https://aws.amazon.com/image-builder/). + + +* [Azure VM Image Builder](https://learn.microsoft.com/en-us/azure/virtual-machines/image-builder-overview?tabs=azure-powershell). + + +* [HashiCorp Packer](https://developer.hashicorp.com/packer). + + +* [Kubernetes Image Builder (KIB)](https://image-builder.sigs.k8s.io/introduction.html). + + +
+ +
+ + +## Terraform + + + + +You can retrieve details about the BYOOS Edge OS agent pack using the following Terraform code. + +
+ + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "byoos" { + name = "edge-native-byoi" + version = "1.0.0" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +``` + +
+ + + + +You can retrieve details about the BYOOS pack by using the following Terraform code. + +
+ +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "byoos" { + name = "generic-byoi" + version = "1.0.0" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +``` + +
+ +
+ +## References + +- [Create a Custom Cluster Profile with BYOOS](../clusters/edge/site-deployment/model-profile.md) + + +- [Build Edge Artifacts](../clusters/edge/edgeforge-workflow/palette-canvos.md) + + +- [Model Edge Native Cluster Profile](../clusters/edge/site-deployment/model-profile.md) + + +- [AWS EC2 Image Builder](https://aws.amazon.com/image-builder/) + + +- [Azure VM Image Builder](https://learn.microsoft.com/en-us/azure/virtual-machines/image-builder-overview?tabs=azure-powershell) + + +- [HashiCorp Packer](https://developer.hashicorp.com/packer) + + +- [Kubernetes Image Builder (KIB)](https://image-builder.sigs.k8s.io/introduction.html) \ No newline at end of file diff --git a/docs/docs-content/integrations/calico.md b/docs/docs-content/integrations/calico.md new file mode 100644 index 0000000000..498c9aa6a0 --- /dev/null +++ b/docs/docs-content/integrations/calico.md @@ -0,0 +1,73 @@ +--- +sidebar_label: 'Calico' +title: 'Calico' +description: 'Reference documentation for the Calico pack in Palette' +hide_table_of_contents: true +type: "integration" +category: ['network', 'amd64', 'fips'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/cni-calico/blobs/sha256:9a08103ccd797857a81b6ce55fa4f84a48bcb2bddfc7a4ff27878819c87e1e30?type=image/png' +tags: ["packs", "calico", "network"] +--- + +Palette Network Pack(s) helps provision resources for setting up Cluster networking in Kubernetes. Design goals for the Kubernetes network model can be found [here](https://kubernetes.io/docs/concepts/cluster-administration/networking/#the-kubernetes-network-model). + +[Project Calico](http://docs.projectcalico.org/) is an open-source container networking provider and network policy engine. + +Calico provides highly scalable networking and network policy solution for connecting Kubernetes pods based on the same IP networking principles as the internet, for both Linux (open source) and Windows (proprietary - available from [Tigera](https://www.tigera.io/essentials/)). Calico can be deployed without encapsulation or overlays to provide high-performance, high-scale data center networking. Calico also provides a fine-grained, intent-based network security policy for Kubernetes pods via its distributed firewall. + +Calico manifest used for networking does the following: + +* Installs the `calico/node` container on each host using a DaemonSet. +* Installs the Calico CNI binaries and network config on each host using a DaemonSet. +* Runs `calico/kube-controllers` as a deployment. +* The `calico-etcd-secrets` secret, which optionally allows for providing etcd TLS assets. +* The `calico-config` ConfigMap, which contains parameters for configuring the install. + +:::caution +Limitations: +AWS, VMWare supports IP-in-IP encapsulation type. +Azure supports VXLAN encapsulation type. +::: + +## Versions Supported + + + + + + + + + + + + + + + + +All versions below version 3.23.x are deprecated. + + + + + +## Parameters + +| Name | Supported Values | Default value | Description | +| --- | --- | --- | --- | +| calico.encapsulationType | `CALICO_IPV4POOL_IPIP`, `CALICO_IPV4POOL_VXLAN` | `CALICO_IPV4POOL_IPIP` - AWS, VMware clouds | The encapsulation type to be used for networking (depends on the cloud) | +| | | `CALICO_IPV4POOL_VXLAN` - Azure cloud | | +| calico.encapsulationMode | `Always, CrossSubnet, Never` | Always | The mode to use the IPv4 POOL created at start up | +| calico.calicoNetworkCIDR | CIDR range | `192.168.0.0/16` | CIDR range to be assigned for Pods. This range should match the `podCIDR` range specified in the Kubernetes layer | + +## Troubleshooting + +* A daemon set is installed and so a calico-node pod should run on all the nodes in the cluster to provide networking. +* For any issues with networking, check calico-node and calico-kube-controller pods on the cluster. + + +## References + +- [Calico Documentation](https://docs.tigera.io/calico/latest/reference) \ No newline at end of file diff --git a/docs/docs-content/integrations/centos.md b/docs/docs-content/integrations/centos.md new file mode 100644 index 0000000000..a9bea07347 --- /dev/null +++ b/docs/docs-content/integrations/centos.md @@ -0,0 +1,26 @@ +--- +sidebar_label: 'CentOS' +title: 'CentOS' +description: 'Choosing CentOS as an Operating System within the Spectro Cloud Console' +hide_table_of_contents: true +type: "integration" +category: ['operating system', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/centos-vsphere/blobs/sha256:fe51960e2a05745b7b9217e244e47fac401edcdb184b500d75cc537cecb81ef1?type=image/png' +tags: ["packs", "centos", "operating system"] +--- + + + +CentOS Linux distribution is a stable, predictable, manageable and reproducible platform derived from the sources of Red Hat Enterprise Linux (RHEL). It provides a rich base platform for open source communities to build upon. Spectro Cloud provides CentOS as a development framework for its users. + +## Version Supported + +**CentOS 7.7** + + +## References + +- [CentOS Wiki](https://wiki.centos.org) + +- [CentOS Documentation](https://docs.centos.org/en-US/docs) diff --git a/docs/docs-content/integrations/certmanager.md b/docs/docs-content/integrations/certmanager.md new file mode 100644 index 0000000000..7f852e3e03 --- /dev/null +++ b/docs/docs-content/integrations/certmanager.md @@ -0,0 +1,56 @@ +--- +sidebar_label: 'cert-manager' +title: 'cert-manager' +description: 'cert-manager Security pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['security', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/certmanager/blobs/sha256:7882e13d7056781a0195ec15e3b9fa5d4b4bb7f8b4e2c32cc5e254e2295c6a16?type=image/png' +tags: ["packs", "cert-manager", "security"] +--- + + +cert-manager adds certificates and certificate issuers as resource types in Kubernetes clusters, and simplifies the process of obtaining, renewing and using those certificates. It can issue certificates from a variety of supported sources, including Let’s Encrypt, HashiCorp Vault, and Venafi as well as private PKI. It also takes care of the certificate validity and attempts to renew certificates before expiry. + +## Versions Supported + + + + + +**1.9.1** + + + + + + +**1.8.1** + + + + + +**1.7.1** + + + + + +**1.4.0** + + + + + +**1.1.0** + + + + + + +## References + +- [Cert-manager Documentation](https://cert-manager.io/docs) diff --git a/docs/docs-content/integrations/cilium-tetragon.md b/docs/docs-content/integrations/cilium-tetragon.md new file mode 100644 index 0000000000..1139079529 --- /dev/null +++ b/docs/docs-content/integrations/cilium-tetragon.md @@ -0,0 +1,38 @@ +--- +sidebar_label: 'Cilium-Tetragon' +title: 'Cilium-Tetragon' +description: 'Cilium Tetragon monitoring pack for Spectro Cloud Palette' +hide_table_of_contents: true +type: "integration" +category: ['monitoring', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://soak.stage.spectrocloud.com/assets/monitoring_layer.3b14cf5b.svg' +tags: ["packs", "cilium-tetragon", "monitoring"] +--- + + +[Tetragon](https://github.com/cilium/tetragon) is an eBPF based security observability and runtime enforcement. eBPF is used to safely and efficiently extend the kernel's capabilities without requiring changing the kernel source code or loading kernel modules. Tetragon is a Cilium community open-source project that enables profound visibility with filtering and aggregation with the eBPF collector support to deliver visibility at depth with minimal overhead. + +Palette supports Cilium Tetragon as an add-on pack for monitoring services. Refer to the [create cluster profile](../cluster-profiles/task-define-profile.md#overview) page for more information on how to use an add-on pack. + + +## Versions Supported + + + + + +**0.8.** + + + + + + + +## References + +- [Tetragon GitHub](https://github.com/cilium/tetragon) + + +- [Tetragon Documentation](https://tetragon.cilium.io/docs) \ No newline at end of file diff --git a/docs/docs-content/integrations/cilium.md b/docs/docs-content/integrations/cilium.md new file mode 100644 index 0000000000..3e9a930165 --- /dev/null +++ b/docs/docs-content/integrations/cilium.md @@ -0,0 +1,41 @@ +--- +sidebar_label: 'Cilium' +title: 'Cilium' +description: 'Cilium network pack for Spectro Cloud Palette' +hide_table_of_contents: true +type: "integration" +category: ['network', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/cni-cilium/blobs/sha256:dbc239ac739ea2939ef41dd0743b82281bc82c360326cd7c536f73f0053e2cd2?type=image/png' +tags: ["packs", "cilium", "network"] +--- + + +Palette Network Pack(s) helps provision resources for setting up Cluster networking in Kubernetes. For more Kubernetes network model design goals visit [here](https://kubernetes.io/docs/concepts/cluster-administration/networking/#the-kubernetes-network-model). + +Palette supports **Cilium**, an open-source software for securing and observing network connectivity between cloud-native container workloads. Cilium is underpinned by a Linux Kernel technology called eBPF, to enable dynamic and strong security visibility and control logic within Linux. As eBPF runs within the Linux Kernel, Cilium security policies are applied and updated independent of the application code or container configuration. + +The Cilium agent runs on all clusters and servers to provide networking, security and observability to the workload running on that node. + +## Prerequisite + +* If the user is going for the BYO (Bring your own) Operating system use case then, HWE (Hardware Enabled) Kernel or a Kernel that supports [eBPF](https://ebpf.io/) modules needs to be provisioned. + +**Palette OS images are by default provisioned with the above pre-requisite.** + +## Versions Supported + + + + + +**1.10.9** + + + + + + +## References + +- [Cilium Documentation](https://docs.cilium.io/en/stable) diff --git a/docs/docs-content/integrations/citrix-ipam.md b/docs/docs-content/integrations/citrix-ipam.md new file mode 100644 index 0000000000..3ba83bddfe --- /dev/null +++ b/docs/docs-content/integrations/citrix-ipam.md @@ -0,0 +1,54 @@ +--- +sidebar_label: 'Citrix IPAM' +title: 'Citrix IPAM' +description: 'Citrix IPAM Load Balancer pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['load balancers', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/lb-citrix-adc/blobs/sha256:17f8ebc0dc69d329a39e5d27fc0ce3574034d18ab1776fabda396c5403b0bd86?type=image/png' +tags: ["packs", "citrix-ipam", "network"] +--- + + + + + +# Citrix IPAM and Ingress controller + +The integration helps with IP address management and provides load balancing capabilities for external services deployed on Kubernetes, especially for on-premise deployments. + +## Versions Supported + + + + +* **1.7.6** + + + + +## Components + +Integration deploys the following components: + +* IPAM controller. +* Ingress controller. + +## Parameters + +| Name | Default Value | Description | +| --- | --- | --- | +| vip.addresses | | The IP address range to be used for external Services | +| vip.namespace | citrix-system | The namespace for IPAM controller | +| citrix-k8s-ingress-controller.namespace | citrix-system | The namespace for Citrix Ingress controller | +| citrix-k8s-ingress-controller.clusterPrefix | | The prefix for resources to load balance applications from multiple clusters | +| citrix-k8s-ingress-controller.nsip | | The IP address of the Citrix ADC | +| citrix-k8s-ingress-controller.username | | Username to connect to Citrix IDC | +| citrix-k8s-ingress-controller.password | | Password to connect to Citrix IDC | + +## References + +- [Citrix IPAM Controller](https://developer-docs.citrix.com/projects/citrix-k8s-ingress-controller/en/latest/crds/vip) + +- [Citrix Ingress controller](https://developer-docs.citrix.com/projects/citrix-k8s-ingress-controller/en/latest/network/type_loadbalancer/#expose-services-of-type-loadbalancer-using-an-ip-address-from-the-citrix-ipam-controller) diff --git a/docs/docs-content/integrations/cloudanix.md b/docs/docs-content/integrations/cloudanix.md new file mode 100644 index 0000000000..2728129111 --- /dev/null +++ b/docs/docs-content/integrations/cloudanix.md @@ -0,0 +1,135 @@ +--- +sidebar_label: 'Cloudanix' +title: 'Cloudanix' +description: 'The Cloudanix security pack provides a dashboard that displays threats and unusual behavior in Kubernetes containers in Palette' +hide_table_of_contents: true +type: "integration" +category: ['security', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://cloudanix-assets.s3.amazonaws.com/static/cloudanix-logo-p.png' +tags: ["packs", "cloudanix", "security"] +--- + + +The Cloudanix pack is an add-on security pack that provides a dashboard to help you detect threats and unusual behavior in your Kubernetes clusters. Cloudanix detects the following. + +- Files added or modified in sensitive directories + + +- SSH into a container + + +- Modifications to shell configuration files + + +- Attempts to read sensitive files that contain credential information + + +- Crypto mining + +The Cloudanix dashboard provides an interactive interface that displays the mapping between threat events and associated container, pod, and node workloads. Additionally, Cloudanix identifies the user who initiated an activity identified as a threat and the command that was used. + +You can also start Jira workflows and target specific workloads from the Cloudanix dashboard. + +## Versions Supported + + + + + +## Prerequisites + +- CPUs: 0.5 +- Memory: 256 MiB +- Kubernetes 1.19.x to 1.25.x +- Kernel version 4.5 and higher + +## Parameters + +The Cloudanix pack has the following parameters, which are auto-filled based on Palette user information. + +| Name | Description | +| --- | --- | +| ``userEmail`` | The email address of the user who created the cluster and cluster profile. | +| ``partnerIdentifier`` | A Cloudanix unique identifier for Spectro Cloud. | +| ``organizationId`` | The organization tenant ID in Palette. | +| ``userName`` | Palette user name. | +| ``accountName`` | Palette cloud account name. | +| ``accountType`` | Cloud account type such as AWS or GCP, Azure, or others. | +| ``accountId`` | The user's cloud account ID. | +| ``clusterName`` | The name of the cluster. | +| ``clusterIdentifier`` | The cluster's unique identifier. | +| ``clusterDomain`` | The Palette cloud account type such as AWS, GCP, Azure, or others. | + +## Usage + +This Helm Chart installs four Cloudanix services to enable container security capabilities: + +
+ +- **config-cron**: A job that runs periodically in a Kubernetes cluster to maintain the configuration of Cloudanix inventory and threat services. +- **misconfig-cron**: A job that captures Kubernetes misconfigurations and displays them on the Cloudanix dashboard. +- **inventory-service**: An inventory service that detects any new Kubernetes resources and displays them on the Cloudanix dashboard. +- **threat-service**: A threat service that exports threat events and affected Kubernetes resources, which are visible on the Cloudanix dashboard. + + + +From the **Workloads** page, click the **Risks** tab to view a list of failed threat rules. You can exclude resources, such as pods and containers, from the risk findings. + +
+ +### Kubernetes 1.25 and higher + +When you use the Cloudanix 1.0.x pack with Kubernetes 1.25 and higher, you need to add the **Spectro Namespace Labeler** add-on pack to your cluster profile. After you create the cluster profile, you then apply it to your cluster. + +Use the following information to find the **Spectro Namespace Labeler** add-on pack. + +- **Pack Type**: System App +- **Registry**: Public Repo +- **Pack Name**: Spectro Namespace Labeler +- **Pack Version**: 1.0.x or higher + + +Below is the YAML file for the **Spectro Namespace Labeler** add-on pack. No action is required. +
+ + ```yaml + pack: + namespace: cluster-{{ .spectro.system.cluster.uid }} + + charts: + spectro-namespace-labeler: + namespace: cluster-{{ .spectro.system.cluster.uid }} + + labels: + cloudanix: pod-security.kubernetes.io/enforce=privileged,pod-security.kubernetes.io/enforce-version=v1.26 + ``` + +As a final step, apply the cluster profile to your cluster. + +
+
+ +## Terraform + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "cloudanix" { + name = "cloudanix" + version = "0.0.6" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +## References + +- [Cloudanix Documentation](https://docs.cloudanix.com/introduction) + + + + + diff --git a/docs/docs-content/integrations/collectord.md b/docs/docs-content/integrations/collectord.md new file mode 100644 index 0000000000..54415c23d1 --- /dev/null +++ b/docs/docs-content/integrations/collectord.md @@ -0,0 +1,66 @@ +--- +sidebar_label: 'Outcold Solutions' +title: 'Outcold Solutions' +description: 'Outcold Solutions - Monitoring pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['monitoring', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/outcold-monitoring/blobs/sha256:3140960d1f39649ad821cfc59450d3c164079b03d15387b2e638eae07442af41?type=image/png' +tags: ["packs", "outcold-monitoring", "monitoring"] +--- + + +Integration provides Kubernetes monitoring solution that includes log aggregation, performance and system metrics, metrics from the control plane and application metrics, a dashboard for reviewing network activity, and alerts to notify you about cluster or application performance issues. + +## Versions Supported + + + + +* **5.0.0** + + + + +## Prerequisites + +This integration forwards logs and metrics to [Splunk](https://www.splunk.com/). Pre-requisites for Splunk are +1. [Install Kubernetes Monitoring application](https://www.outcoldsolutions.com/docs/monitoring-kubernetes/v5/installation/#install-monitoring-kubernetes-application) +2. [Enable HTTP Event Collector (HEC) in Splunk](https://www.outcoldsolutions.com/docs/monitoring-kubernetes/v5/installation/#enable-http-event-collector-in-splunk) +3. Make sure to configure the forwarder settings below while setting up the pack + +```yaml +[general] +acceptEULA = false +license = +fields.kubernetes_cluster = - +# Splunk output +[output.splunk] + +# Splunk HTTP Event Collector url +url = + +# Splunk HTTP Event Collector Token +token = + +# Allow invalid SSL server certificate +insecure = false + +# Path to CA certificate +caPath = + +# CA Name to verify +caName = +``` +## Components + +The following workloads gets deployed on collectorforkubernetes namespace, by default +* Collectorforkubernetes - Daemonset +* Collectorforkubernetes Master - Daemonset +* Collectorforkubernetes Addon - Deployment + +## References + +- [Official Documentation](https://www.outcoldsolutions.com/docs/monitoring-kubernetes/v5) +- [Install Guide](https://www.outcoldsolutions.com/docs/monitoring-kubernetes/v5/installation) diff --git a/docs/docs-content/integrations/deprecated-packs.md b/docs/docs-content/integrations/deprecated-packs.md new file mode 100644 index 0000000000..9dfff86ea3 --- /dev/null +++ b/docs/docs-content/integrations/deprecated-packs.md @@ -0,0 +1,12 @@ +--- +sidebar_label: "Deprecated Packs" +title: "Deprecated Packs" +description: "Deprecated Packs" +icon: "" +hide_table_of_contents: false +tags: ["packs", "deprecation"] +--- + +The following table displays the deprecation status of packs that are in the deprecation lifecycle. To learn more about the deprecation lifecycle, refer to the [Maintenance Policy](maintenance-policy.md). + + \ No newline at end of file diff --git a/docs/docs-content/integrations/dex.md b/docs/docs-content/integrations/dex.md new file mode 100644 index 0000000000..66f9a2c7b4 --- /dev/null +++ b/docs/docs-content/integrations/dex.md @@ -0,0 +1,85 @@ +--- +sidebar_label: 'Dex' +title: 'Dex' +description: 'Dex Authentication pack in Spectro Cloud' +type: "integration" +hide_table_of_contents: true +category: ['authentication', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/dex/blobs/sha256:78e381fe12509ed94c7c19cd6f6fc4e896ec66485364644dc1a40229fcf9d90d?type=image/png' +tags: ["packs", "dex", "security"] +--- + +Dex is an identity service to drive authentication for Kubernetes API Server through the [OpenID Connect](https://openid.net/connect/) plugin. Clients such as kubectl can act on behalf of users who can log in to the cluster through any identity provider that dex supports. + + +## Versions Supported + + + + + +* **2.35.1** + + + + + +* **2.30.0** + + + + + +* **2.28.0** + + + + + + * **2.25.0** + + + + + + * **2.21.0** + + + + +## Components + +Dex integration in Spectro Cloud will deploy the following components: + +* Dex. +* Dex Client (dex-k8s-authenticator). + +The integration will create self-signed certificates, will cross-configure Dex, Dex Client components & will set appropriate flags on the Kubernetes API Server. + +## Ingress + +Follow below steps to configure Ingress on Dex + +1. Change Dex serviceType from "LoadBalancer" to "ClusterIP" (line #112) +2. Ingress (line #118) + * Enable Ingress; Change enabled from false to "true" + * Set Ingress rules like annotations, path, hosts, etc. + +Follow below steps to configure Ingress on Dex Client + +1. Change dex-k8s-authenticator serviceType from "LoadBalancer" to "ClusterIP" (line #312) +2. Ingress (line #320) + * Enable Ingress; Change enabled from false to "true" + * Set Ingress rules like annotations, path, hosts, etc. + +With these config changes, you can access Dex, Dex Client service(s) on the Ingress Controller LoadBalancer hostname / IP + + +## References + +- [Dex](https://github.com/dexidp/dex) + +- [Dex Documentation](https://dexidp.io/docs) + +- [Dex K8s Authenticator](https://github.com/mintel/dex-k8s-authenticator) diff --git a/docs/docs-content/integrations/external-dns.md b/docs/docs-content/integrations/external-dns.md new file mode 100644 index 0000000000..c6e1658ec5 --- /dev/null +++ b/docs/docs-content/integrations/external-dns.md @@ -0,0 +1,160 @@ +--- +sidebar_label: 'ExternalDNS' +title: 'External DNS' +description: 'ExternalDNS pack in Spectro Cloud' +type: "integration" +hide_table_of_contents: true +category: ['load balancers', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/external-dns/blobs/sha256:1bfd6dceb0b50efee4068cd6321511f6b24be86e2d613e0a8206e716ba7aea3f?type=image/png' +tags: ["packs", "external-dns", "network"] +--- + + + +The integration helps configure public DNS servers with information about Kubernetes services to make them discoverable. + +## Prerequisites + +Providers have to be set up for this pack to get deployed and work seamlessly. For a list of supported providers and the prerequisites to be set up, visit [providers](https://github.com/kubernetes-sigs/external-dns#status-of-providers) section + +## Versions Supported + + + + +* **0.13.1** +* **0.12.2** + + + + +* **0.7.2** + + + + + +## Components + +Integration deploys the following components: +* External DNS + + +## ExternalDNS for Services on AWS Route53 Example + +### Setup prerequisites for AWS Route53 + +* Create the following IAM policy in the AWS account. This is needed for externalDNS to list and create Route53 resources. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "route53:ChangeResourceRecordSets" + ], + "Resource": [ + "arn:aws:route53:::hostedzone/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "route53:ListHostedZones", + "route53:ListResourceRecordSets" + ], + "Resource": [ + "*" + ] + } + ] +} +``` +* Create an IAM role and associate the policy created above. Make a note of the role ARN which will be used in ExternalDNS deployment later +* Setup hosted zone in AWS Route53 + ```bash + # Create a DNS zone through AWS CLI + aws route53 create-hosted-zone --name "external-dns-test.my-org.com." --caller-reference "external-dns-test-$(date +%s)" + ``` + +### Deploy ExternalDNS on the cluster + +* Add ExternalDNS pack to the desired profile and deploy it to the cluster. + You may want to configure the following in pack values.yaml + * Configure AWS provider details (line #86) + * Credentials, Zone Type + * AssumeRoleArn with the Role ARN created above + + * Configure txtOwnerId with the ID of the hosted zone created above (line #366) + ```bash + aws route53 list-hosted-zones-by-name --output json --dns-name "external-dns-test.my-org.com." | jq -r '.HostedZones[0].Id' + ``` + * Optionally change externalDNS policy and logLevel + +### Deploy Ingress Controller on the cluster + +* Deploy one of the Ingress Controller on the cluster + +### Deploy Applications with Ingress on the cluster + +* Add Prometheus-Operator addon to the same profile where ExternalDNS is added + * Change serviceType to ClusterIP (line #408) + * Enable Ingress for the add-on packs. In this example, let us use Prometheus-Operator integration. + Ingress config for Grafana will look like the following: + ```yaml + #Ingress config + ingress: + ## If true, Grafana Ingress will be created + ## + enabled: true + + hosts: + - grafana.external-dns-test.my-org.com + + ## Path for grafana ingress + path: / + ``` + When Prometheus-Operator gets deployed in the Cluster, Ingress resource for Grafana will also get created and will look like + ```yaml + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + name: grafana-ingress + namespace: monitoring + spec: + rules: + - host: grafana.external-dns-test.my-org.com + http: + paths: + - backend: + serviceName: grafana + servicePort: 80 + path: / + status: + loadBalancer: + ingress: + - hostname: a9a2eadb64c8e4c2fb37a1f69afb0a30-330939473.us-west-2.elb.amazonaws.com + ``` + +### Verify ExternalDNS (Ingress example) + + * If all goes well, after 2 minutes, ExternalDNS would have inserted 2 records on your hosted zone + + ```bash + aws route53 list-resource-record-sets --output json --hosted-zone-id "/hostedzone/ZEWFWZ4R16P7IB" \ + --query "ResourceRecordSets[?Name == 'grafana.external-dns-test.my-org.com.']|[?Type == 'A']" + ``` + * After which, if you access http://grafana.external-dns-test.my-org.com on your browser, you will be able to view the Grafana login page + +### Troubleshooting + +* Make sure Ingress resource gets created for the Applications deployed and a LoadBalancer hostname / IP address is set on the Ingress resource +* Check the `external-dns` pod for any issues with ExternalDNS not inserting records. If required, change `logLevel` to debug to see additional info on the logs + +## References + +* [External DNS Home](https://github.com/kubernetes-sigs/external-dns) +* [External DNS Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/external-dns) \ No newline at end of file diff --git a/docs/docs-content/integrations/external-secrets-operator.md b/docs/docs-content/integrations/external-secrets-operator.md new file mode 100644 index 0000000000..240d858101 --- /dev/null +++ b/docs/docs-content/integrations/external-secrets-operator.md @@ -0,0 +1,103 @@ +--- +sidebar_label: 'external-secrets-operator' +title: 'External Secrets Operator' +description: 'external-secrets-operator pack in Palette' +hide_table_of_contents: true +type: "integration" +category: ['authentication', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/external-secrets-operator/blobs/sha256:ee6f7f347d381852582f688c70b2564b0a346c2b2ed1221310889075a4453c6d?type=image/png' +tags: ["packs", "external-secrets-operator", "security"] +--- + + +External Secrets Operator (ESO) is a Kubernetes operator that integrates external secret management +systems like AWS Secrets Manager, HashiCorp Vault, Google Secrets Manager, or Azure Key Vault. The operator reads information from external APIs and automatically injects the values into a Kubernetes Secret. + +You can use the External-Secrets-Operator Add-on pack as an authenticator in Palette. + +:::info + +Starting from Palette version 3.1, Palette no longer supports upgrades to Kubernetes External Secrets since this is reaching end of life. Migrate or switch to using External Secrets operator instead. + +::: + + +## Versions Supported + + + + + +* **0.7.1** +* **0.6.0** + + + + + +* **0.5.6** + + + + + +### Sample SecretStore + +
+ +```yml +apiVersion: [external-secrets.io/v1beta1](http://external-secrets.io/v1beta1) +kind: ExternalSecret +metadata: + name: vault-example # Custom name +spec: + refreshInterval: "15s" + secretStoreRef: + name: vault-backend # Custom value + kind: SecretStore + target: + name: mysecretfoobar + data: + - secretKey: foobar + remoteRef: + key: secret/foo # custom value + property: my-value # custom value + +``` + +### Sample ExternalSecret YAML file + +
+ +```yml +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: custom-name +spec: + provider: + vault: + server: "http://12.34.567.133:0000" # custom server end point + path: "secret" # custom path + version: "v2" # custom version + auth: + # points to a secret that contains a vault token + # https://www.vaultproject.io/docs/auth/token + tokenSecretRef: + name: "vault-token1" # Custom name and key + key: "token1" +--- +apiVersion: v1 +kind: Secret +metadata: + name: vault-token1 +data: + token: cm9vdA== # "root" # custome value +``` + +## References + +- [Amazon IAM-Policy-Examples-ASM-Secrets](https://docs.aws.amazon.com/mediaconnect/latest/ug/iam-policy-examples-asm-secrets.html) + +- [External Secrets](https://github.com/external-secrets/external-secrets) diff --git a/docs/docs-content/integrations/falco.md b/docs/docs-content/integrations/falco.md new file mode 100644 index 0000000000..d004d37c82 --- /dev/null +++ b/docs/docs-content/integrations/falco.md @@ -0,0 +1,46 @@ +--- +sidebar_label: 'Falco' +title: 'Falco' +description: 'Integration of the Falco add on into Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['security', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/falco/blobs/sha256:4e37461d0a31959ca8af65128329750ca3417e883e7e4ba17ee085b01a383a27?type=image/png' +tags: ['packs', 'falco', 'security'] +--- + + + +Falco integration is a behavioral activity monitor designed to detect anomalous activity in your applications. You can use Falco to monitor the run-time security of your Kubernetes applications and internal components. + +## Versions Supported + + + + + +* **1.16.3** + + + + + +* **1.0.11** +* **1.0.10** + + + + + +* **1.13.1** + + + + + + + +## References + +- [Falco Helm Chart GitHub](https://github.com/falcosecurity/charts/tree/master/falco) diff --git a/docs/docs-content/integrations/fluentbit.md b/docs/docs-content/integrations/fluentbit.md new file mode 100644 index 0000000000..f6a48cef3a --- /dev/null +++ b/docs/docs-content/integrations/fluentbit.md @@ -0,0 +1,28 @@ +--- +sidebar_label: 'Fluentbit' +title: 'Fluentbit' +description: 'Fluentbit Monitoring pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['logging', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/fluentbit/blobs/sha256:012fbab20e3427b6c1f6a73d2ea0b4cc43cf60991774c4800ddf3e23c4b64544?type=image/png' +tags: ['packs', 'fluentbit', 'logging'] +--- + +Fluent-Bit is a multi-platform log forwarder. The default integration will help forward logs from the Kubernetes cluster to an external ElasticSearch cluster + +## Version + +* **1.9.6** + +## Contents + +Fluent-Bit is installed as a DaemonSet & so, an instance of fluent-bit will be running on all the nodes in the cluster. + +## References + +- [Fluentbit Docs](https://docs.fluentbit.io/manual) + + +- [Fluentbit GitHub](https://github.com/fluent/fluent-bit) diff --git a/docs/docs-content/integrations/frp.md b/docs/docs-content/integrations/frp.md new file mode 100644 index 0000000000..45d90f254a --- /dev/null +++ b/docs/docs-content/integrations/frp.md @@ -0,0 +1,694 @@ +--- +sidebar_label: 'Spectro Proxy' +title: 'Spectro Proxy' +description: 'Fast Reverse Proxy Authentication pack in Spectro Cloud-Spectro Proxy' +hide_table_of_contents: true +type: "integration" +category: ['authentication', 'amd64', 'fips', "network"] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/spectro-proxy/blobs/sha256:b6081bca439eeb01a8d43b3cb6895df4c088f80af978856ddc0da568e5c09365?type=image/png' +tags: ['packs', 'spectro-proxy', 'network'] +--- + + +Spectro Proxy is a pack that enables the use of a reverse proxy with a Kubernetes cluster. The reverse proxy allows you to connect to the cluster API of a Palette-managed Kubernetes cluster in private networks or clusters configured with private API endpoints. The reverse proxy managed by Spectro Cloud is also known as the forward reverse proxy (FRP). + +The reverse proxy has a server component and a client component. The reverse proxy server is publicly available and managed by Spectro Cloud. The client is deployed inside your Palette-managed Kubernetes cluster and connects to the reverse proxy server. When you add the Spectro Proxy pack to a cluster profile, a couple of things happen: + +- The kubeconfig file is updated with the reverse proxy address instead of pointing directly to the cluster's API address. The following is an example of a kubeconfig file where the `server` attribute points to the reverse proxy. + + ```hideClipboard yaml {4-5} + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS...... + server: https://cluster-61a578b5259452b88941a1.proxy.spectrocloud.com:443 + name: example-server + contexts: + # The remainder configuration is omitted for brevity. + ``` + +- Any requests to the Kubernetes API server, such as kubectl commands, will be routed to the reverse proxy. The reverse proxy forwards the request to the intended client, which is the cluster's API server. The cluster's API server authenticates the request and replies with the proper response. + + +You can attach this pack to a [cluster profile](../cluster-profiles/cluster-profiles.md). The pack installs the Spectro Proxy client in the workload clusters and configures the cluster's API server to point to a managed proxy server. + +
+ +:::info + +This pack can be combined with the [Kubernetes dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) pack to expose the Kubernetes dashboard. To learn more about exposing the Kubernetes dashboard, check out the [Enable Kubernetes Dashboard](../clusters/cluster-management/kubernetes-dashboard.md) guide. + +::: + +
+ +## Network Connectivity + + +The host cluster's network configuration defines who can access the host cluster from a network perspective. If a user is in the same network as the cluster, the user may be able to access the host cluster without needing a forward proxy. However, if the user is on a different network, the host cluster's network configuration may limit the user's ability to connect to the host cluster and may require the use of a forward proxy. + +From a network configuration perspective, a cluster can be in a private or a public network. Host clusters deployed in a network that does not allow inbound internet access are considered private. Whereas the clusters deployed in a network with both inbound and outbound access to the internet are considered public. The following are the three possible network connectivity scenarios: + +
+ +* The cluster and the user are in the same private network. + + +* The cluster and the user are in different private networks. + + +* The cluster is in a public network. + +
+ +![An overview of the three different connectivity scenarios](/integrations_frp_conection_overview.png) + +
+ + +The following table summarizes the network connectivity requirements for each scenario and whether the Spectro Proxy is required. + +
+ +| **Scenario** | **Description** | **Requires Spectro Proxy?** | +|----------|-------------|------------------------| +| Private cluster in the same network | The cluster is deployed with a private endpoint, and the user is also in the same network. | ❌ | +| Private cluster in a different network | The cluster is deployed with a private endpoint, and the user is in a different network. | ✅ | +| Public cluster in a different network | The cluster is deployed with a public endpoint, and the user is in a different network. | ❌ | + +
+ +To learn more about how the Spectro Proxy interacts with clusters in a public or private network environment and when the Spectro Proxy is required, select the tab that matches your use case. + + +
+ + + + + + + +Networks labeled as private do not allow inbound internet access. Inbound network requests to the network are allowed only if the connection originated from the internal network. If you are in a different network than the cluster, you can connect to the cluster's API server through the Spectro Proxy. The Spectro Proxy allows you to connect to the cluster's API server although you are not in the same network as the cluster. + + +
+ +:::caution + +Users that are in a different network than the cluster require the Spectro Proxy server to connect to the cluster's API server. Otherwise, requests to the cluster's API server will fail due to a lack of network connectivity. + +::: + + +The Spectro Proxy client is installed by the Spectro Proxy pack. The client is deployed in the cluster and connects to the Spectro Proxy server. The Spectro Proxy server is a managed service that is publicly available and managed by Spectro Cloud. The Spectro Proxy server forwards the request to the cluster's API server. The cluster's API server authenticates the request and replies with the proper response. + +The kubeconfig files generated for the host cluster are updated with the Spectro Proxy server's address. When you or other users issue a kubectl command, the request is routed to the Spectro Proxy server. The following is an example of a kubeconfig file where the SSL certificate and server address attribute point to the Spectro Proxy. + + +The following diagram displays the network connection flow of a user attempting to connect to a cluster with private endpoints. The user is in a different network than the cluster. + +
+ +1. The user issues a kubectl command to the cluster's API server. + + +2. The request is routed to the Spectro Proxy server. The Spectro Proxy client inside the host cluster has an established connection with the cluster's API server. + + +3. The Spectro Proxy server forwards the request to the cluster's API server located in a different network. The cluster's API server authenticates the request and replies with the proper response. + + +![Private cluster in a different network.](/integrations_frp_conection_private-different-network.png) + +Depending on what type of infrastructure provider you are deploying the host cluster in, you may have to specify the Spectro Proxy server's SSL certificate in the Kubernetes cluster's configuration. Refer to the [Usage](#usage) section below for more information. + + +
+ + + + + + + +Networks labeled as private do not allow inbound internet access. Inbound network requests to the network are allowed only if the connection originated from the internal network. If you are in the same network as the cluster, you can connect directly to the cluster's API server. The term "same network" means that from a network perspective, requests can reach the cluster's API server without having to traverse the internet. + + +
+ +:::info + +Users in the same network as the cluster do not require the Spectro Proxy server to connect to the cluster's API server. + +::: + +![Private cluster in the same network.](/integrations_frp_conection_private-same-network.png) + + + +
+ + + + +Clusters deployed in a network with both inbound and outbound access to the internet are considered public. + +
+ +:::info + + Clusters deployed in a public network do not require the Spectro Proxy to connect to the cluster's API server. + +::: + +When a cluster has public endpoints, you can query the cluster's Kubernetes API server from any network with internet access. The following diagram displays the network connection flow of a user attempting to connect to a cluster with public endpoints. Any user with access to the internet can connect to the cluster's API server. + +![A public cluster connection path](/integrations_frp_conection_public_connection.png) + + + +
+ + +
+ +---- + + +
+ +## Versions Supported + + + + + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. + + +## Parameters + +The Spectro Proxy supports the following parameters. + +| Parameter | Description | Default | +|-------------------------|--------------------------------------------------------|---------------------------------------------| +| namespace | The Kubernetes namespace to install the Spectro Proxy. | `cluster-{{ .spectro.system.cluster.uid }}` | +| server | The Kubernetes server. | `{{ .spectro.system.reverseproxy.server }}` | +| clusterUid | The Kubernetes cluster identifier. | `{{ .spectro.system.cluster.uid }}` | +| subdomain | The Kubernetes cluster subdomain identifier. | `cluster-{{ .spectro.system.cluster.uid }}` | + + +The Kubernetes dashboard integration supports the following parameters. + +| Parameter | Description | Default | +|-----------------|---------------------------------------------|---------| +| enabled | Enable the dashboard. | `false` | +| useInsecurePort | Use unsecure port (HTTP) for communication. | `false` | + + +The VMware dashboard integration supports the following parameters. + + +| Parameter | Description | Default | +|-----------------|---------------------------------------------|---------| +| enabled | Enable the dashboard. | `false` | + + +## Usage + +To use this pack, you have to add it to your cluster profile. You can also add the Spectro Proxy pack when you create the cluster profile. Check out the [Create Cluster Profile](../cluster-profiles/task-define-profile.md) guide to learn more about cluster profile creation. + +Depending on the type of cluster, the usage guidance varies. Select the tab that corresponds to the kind of cluster you have. Use the following definitions to help you identify the type of cluster. + +
+ + +- **Palette Deployed**: A brand new IaaS cluster that is deployed or will be deployed through Palette. An IaaS cluster is a Kubernetes cluster with a control plane that is not managed by a third party or cloud vendor but is completely managed by Palette. Google GKE and Tencent TKE fall into this category. Clusters in this category get an additional entry in the Kubernetes configuration that adds the reverse proxy certificate (CA) to the API server configuration. + +
+ +- **Imported Cluster**: An imported cluster or a non-IaaS cluster with a control plane that a third party manages. Azure AKS and AWS EKS fall in this category, as both Palette and the cloud provider partially manage the clusters. Clusters that fall under this category get the default kubeconfig CA replaced with the CA from the proxy server. Additionally, the kubeconfig authentication method is changed to a bearer token. To support the bearer token method, a new service account is created in the cluster with a role binding that allows Kubernetes API requests to pass through the reverse proxy and connect with the cluster API server. + + + + + + + +
+ +:::caution + + +Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. This will also result in Kubernetes control plane nodes getting repaved. + +::: + +Add the following extra certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `kubeadmconfig.apiServer` parameter section. + +
+ +```yaml +certSANs: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" +``` + + +The following is an example configuration of the Kubernetes Pack manifest getting updated with the certificate SAN value: + +![frp-cert-san-example](/docs_integrations_frp_cert-san-example.png) + + +For RKE2 and k3s edge-native clusters, add the following configuration to the Kubernetes pack under the `cluster.config` parameter section. +
+ +```yaml +tls-san: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" +``` + +![TLS-SAN configuration example](/docs_integrations_frp_tls-san-example.png) + +
+ + +
+ + +
+ +:::caution + + +Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. + +::: + + +Add the Spectro Proxy pack to a cluster profile without making any configuration changes. Use the pack as is. + +
+ +:::info + +Set the parameter `k8sDashboardIntegration.enabled` to true if you intend to expose the Kubernetes dashboard. +Review the [Enable Kubernetes Dashboard](spectro-k8s-dashboard.md) guide for more information. + +::: + +
+ +
+ + + + +
+ + + + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. + + +## Parameters + +The Spectro Proxy supports the following parameters. + +| Parameter | Description | Default | +|-------------------------|--------------------------------------------------------|---------------------------------------------| +| namespace | The Kubernetes namespace to install the Spectro Proxy. | `cluster-{{ .spectro.system.cluster.uid }}` | +| server | The Kubernetes server. | `{{ .spectro.system.reverseproxy.server }}` | +| clusterUid | The Kubernetes cluster identifier. | `{{ .spectro.system.cluster.uid }}` | +| subdomain | The Kubernetes cluster subdomain identifier. | `cluster-{{ .spectro.system.cluster.uid }}` | + + +The Kubernetes dashboard integration supports the following parameters. + +| Parameter | Description | Default | +|-----------------|---------------------------------------------|---------| +| enabled | Enable the dashboard. | `false` | +| useInsecurePort | Use unsecure port (HTTP) for communication. | `false` | + + +## Usage + +To use this pack, you have to add it to your cluster profile. You can also add the Spectro Proxy pack when you create the cluster profile. Check out the [Create Cluster Profile](../cluster-profiles/task-define-profile.md) guide to learn more about cluster profile creation. + +Depending on the type of cluster, the usage guidance varies. Select the tab that corresponds to the kind of cluster you have. Use the following definitions to help you identify the type of cluster. + +
+ + +- **Palette Deployed**: A brand new IaaS cluster that is deployed or will be deployed through Palette. An IaaS cluster is a Kubernetes cluster with a control plane that is not managed by a third party or cloud vendor but is completely managed by Palette. Google GKE and Tencent TKE fall into this category. Clusters in this category get an additional entry in the Kubernetes configuration that adds the reverse proxy certificate (CA) to the API server configuration. + +
+ +- **Imported Cluster**: An imported cluster or a non-IaaS cluster with a control plane that a third party manages. Azure AKS and AWS EKS fall in this category, as both Palette and the cloud provider partially manage the clusters. Clusters that fall under this category get the default kubeconfig CA replaced with the CA from the proxy server. Additionally, the kubeconfig authentication method is changed to a bearer token. To support the bearer token method, a new service account is created in the cluster with a role binding that allows Kubernetes API requests to pass through the reverse proxy and connect with the cluster API server. + + + + + + + +
+ +:::caution + + +Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. This will also result in Kubernetes control plane nodes getting repaved. + +::: + +Add the following extra certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `kubeadmconfig.apiServer` parameter section. + +
+ +```yaml +certSANs: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" +``` + +The following is an example configuration of the Kubernetes Pack manifest getting updated with the certificate SAN value: + +![frp-cert-san-example](/docs_integrations_frp_cert-san-example.png) + + +For RKE2 and k3s edge-native clusters, add the following configuration to the Kubernetes pack under the `cluster.config` parameter section. +
+ +```yaml +tls-san: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" +``` + +![TLS-SAN configuration example](/docs_integrations_frp_tls-san-example.png) + +
+ + +
+ + +
+ +:::caution + + +Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. + +::: + + +Add the Spectro Proxy pack to a cluster profile without making any configuration changes. Use the pack as is. + +
+ +:::info + +Set the parameter `k8sDashboardIntegration.enabled` to true if you intend to expose the Kubernetes dashboard. +Review the [Enable Kubernetes Dashboard](spectro-k8s-dashboard.md) guide for more information. + +::: + +
+ +
+ + + + +
+ + + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. + + +## Parameters + +The Spectro Proxy supports the following parameters. + +| Parameter | Description | Default | +|-------------------------|--------------------------------------------------------|---------------------------------------------| +| namespace | The Kubernetes namespace to install the Spectro Proxy. | `cluster-{{ .spectro.system.cluster.uid }}` | +| server | The Kubernetes server. | `{{ .spectro.system.reverseproxy.server }}` | +| clusterUid | The Kubernetes cluster identifier. | `{{ .spectro.system.cluster.uid }}` | +| subdomain | The Kubernetes cluster subdomain identifier. | `cluster-{{ .spectro.system.cluster.uid }}` | + + +The Kubernetes dashboard integration supports the following parameters. + +| Parameter | Description | Default | +|-----------------|---------------------------------------------|---------| +| enabled | Enable the dashboard. | `false` | +| useInsecurePort | Use unsecure port (HTTP) for communication. | `false` | + +## Usage + +To use this pack, you have to add it to your cluster profile. You can also add the Spectro Proxy pack when you create the cluster profile. Check out the [Create Cluster Profile](../cluster-profiles/task-define-profile.md) guide to learn more about cluster profile creation. + +Depending on the type of cluster, the usage guidance varies. Select the tab that corresponds to the kind of cluster you have. Use the following definitions to help you identify the type of cluster. + +
+ + +- **Palette Deployed**: A brand new IaaS cluster that is deployed or will be deployed through Palette. An IaaS cluster is a Kubernetes cluster with a control plane that is not managed by a third party or cloud vendor but is completely managed by Palette. Google GKE and Tencent TKE fall into this category. Clusters in this category get an additional entry in the Kubernetes configuration that adds the reverse proxy certificate (CA) to the API server configuration. + +
+ +- **Imported Cluster**: An imported cluster or a non-IaaS cluster with a control plane that a third party manages. Azure AKS and AWS EKS fall in this category, as both Palette and the cloud provider partially manage the clusters. Clusters that fall under this category get the default kubeconfig CA replaced with the CA from the proxy server. Additionally, the kubeconfig authentication method is changed to a bearer token. To support the bearer token method, a new service account is created in the cluster with a role binding that allows Kubernetes API requests to pass through the reverse proxy and connect with the cluster API server. + + + + + + + +
+ +:::caution + + +Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. This will also result in Kubernetes control plane nodes getting repaved. + +::: + +Add the following extra certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `kubeadmconfig.apiServer` parameter section. + +
+ +```yaml +certSANs: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" +``` + +The following is an example configuration of the Kubernetes Pack manifest getting updated with the certificate SAN value: + +![frp-cert-san-example](/docs_integrations_frp_cert-san-example.png) + + +For RKE2 and k3s edge-native clusters, add the following configuration to the Kubernetes pack under the `cluster.config` parameter section. +
+ +```yaml +tls-san: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" +``` + +![TLS-SAN configuration example](/docs_integrations_frp_tls-san-example.png) + +
+ + +
+ + +
+ +:::caution + + +Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. + +::: + + +Add the Spectro Proxy pack to a cluster profile without making any configuration changes. Use the pack as is. + +
+ +:::info + +Set the parameter `k8sDashboardIntegration.enabled` to true if you intend to expose the Kubernetes dashboard. +Review the [Enable Kubernetes Dashboard](spectro-k8s-dashboard.md) guide for more information. + +::: + +
+ +
+ + + + +
+ + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. + + +## Parameters + +The Spectro Proxy supports the following parameters. + +| Parameter | Description | Default | +|-------------------------|--------------------------------------------------------|---------------------------------------------| +| namespace | The Kubernetes namespace to install the Spectro Proxy. | `cluster-{{ .spectro.system.cluster.uid }}` | +| server | The Kubernetes server. | `{{ .spectro.system.reverseproxy.server }}` | +| clusterUid | The Kubernetes cluster identifier. | `{{ .spectro.system.cluster.uid }}` | +| subdomain | The Kubernetes cluster subdomain identifier. | `cluster-{{ .spectro.system.cluster.uid }}` | + + +## Usage + +To use this pack, you have to add it to your cluster profile. You can also add the Spectro Proxy pack when you create the cluster profile. Check out the [Create Cluster Profile](../cluster-profiles/task-define-profile.md) guide to learn more about cluster profile creation. + +Depending on the type of cluster, the usage guidance varies. Select the tab that corresponds to the kind of cluster you have. Use the following definitions to help you identify the type of cluster. + +
+ + +- **Palette Deployed**: A brand new IaaS cluster that is deployed or will be deployed through Palette. An IaaS cluster is a Kubernetes cluster with a control plane that is not managed by a third party or cloud vendor but is completely managed by Palette. Google GKE and Tencent TKE fall into this category. Clusters in this category get an additional entry in the Kubernetes configuration that adds the reverse proxy certificate (CA) to the API server configuration. + +
+ +- **Imported Cluster**: An imported cluster or a non-IaaS cluster with a control plane that a third party manages. Azure AKS and AWS EKS fall in this category, as both Palette and the cloud provider partially manage the clusters. Clusters that fall under this category get the default kubeconfig CA replaced with the CA from the proxy server. Additionally, the kubeconfig authentication method is changed to a bearer token. To support the bearer token method, a new service account is created in the cluster with a role binding that allows Kubernetes API requests to pass through the reverse proxy and connect with the cluster API server. + + + + + + + +
+ +:::caution + + +Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. This will also result in Kubernetes control plane nodes getting repaved. + +::: + +Add the following extra certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `kubeadmconfig.apiServer` parameter section. +
+ +```yaml +certSANs: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" +``` + +The following is an example configuration of the Kubernetes Pack manifest getting updated with the certificate SAN value: + +![frp-cert-san-example](/docs_integrations_frp_cert-san-example.png) + + + +For RKE2 and k3s edge-native clusters, add the following configuration to the Kubernetes pack under the `cluster.config` parameter section. +
+ +```yaml +tls-san: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" +``` + +![TLS-SAN configuration example](/docs_integrations_frp_tls-san-example.png) + +
+ + +
+ + +
+ +:::caution + + +Be aware that if this pack is added as a Day-2 operation, meaning not during the cluster creation process, you will have to re-download the kubeconfig file to pick up the new configuration changes. + +::: + + +Add the Spectro Proxy pack to a cluster profile without making any configuration changes. Use the pack as is. + +
+ +:::info + +Set the parameter `k8sDashboardIntegration.enabled` to true if you intend to expose the Kubernetes dashboard. +Review the [Enable Kubernetes Dashboard](spectro-k8s-dashboard.md) guide for more information. + +::: + +
+ +
+ + +
+
+ + +## Troubleshooting + +Troubleshooting scenarios related to the Spectro Proxy. +
+ +### x509 Unknown Authority Error + +If you encounter an x509 unknown authority error when deploying a cluster with the Spectro Proxy. + +
+ +```hideClipboard shell +Unable to connect to connect the server: X509: certiticate signed by unknown authorit signed by +``` + +The workaround for this error is to wait a few moments for all the kubeconfig configurations to get propagated to Palette. The Palette cluster agent sends the original kubeconfig to Palette, followed by the modified kubeconfig containing the reverse proxy settings. If you attempt to open up a web shell session or interact with cluster API during the initialization process, you will receive an x509 error. Once Palette receives the kubeconfig file containing the cluster's reverse proxy configurations from the cluster agent, the x509 errors will disappear. + + +## Terraform + +You can reference the Spectro Proxy pack in Terraform with a data resource. + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "spectro-proxy" { + name = "spectro-proxy" + version = "1.2.0" + type = "operator-instance" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +# References + +- [Enable Kubernetes Dashboard](../clusters/cluster-management/kubernetes-dashboard.md) + + +- [Terraform Data Resource](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) \ No newline at end of file diff --git a/docs/docs-content/integrations/gce.md b/docs/docs-content/integrations/gce.md new file mode 100644 index 0000000000..2a55781220 --- /dev/null +++ b/docs/docs-content/integrations/gce.md @@ -0,0 +1,45 @@ +--- +sidebar_label: 'GCE-Persistent-Disk' +title: 'GCE Persistent Disk' +description: 'GCE Persistent Disk storage pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['storage', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-gcp/blobs/sha256:af4cf7923e75f0ca1fe109f423ff0551855019edfc1d8772653cede454ef87ea?type=image/png' +tags: ['packs', 'storage', 'gce-persistent-disk'] +--- + + +The GCE Persistent disk are reliable, high-performance block storage for virtual machine instances. They are designed for high durability. It provides redundant data storage to ensure data integrity. The key features of GCE Persistent Disk are: + +* Disk Clones +* High Durability +* Resizable Volumes +* Independent Volumes +* Snapshots +* Machine Images + + +## Versions Supported + + + + + +**1.7.1** + + + + + +**1.0** + + + + + + +# References + +- [Google Cloud Persistent Disk Documentation](https://cloud.google.com/compute/docs/disks) diff --git a/docs/docs-content/integrations/generic-vm-libvirt.md b/docs/docs-content/integrations/generic-vm-libvirt.md new file mode 100644 index 0000000000..da99ef7d8d --- /dev/null +++ b/docs/docs-content/integrations/generic-vm-libvirt.md @@ -0,0 +1,309 @@ +--- +sidebar_label: 'generic-vm-libvirt' +title: 'Generic Virtual Machines Libvirt' +description: 'Choosing Libvirt Generic Virtual Machine within the Palette console' +hide_table_of_contents: true +type: "integration" +category: ['system app', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/generic-vm-libvirt/blobs/sha256:23e1ba27947158ccf1ae36913601011508a55103ce1bdb517a175d752fb35eea?type=image/png' +tags: ['packs', 'generic-vm-libvirt', 'system app'] +--- + + +Generic-VM-Libvirt is a Palette Add-on pack used to simplify deploying the virtual machine applications from a cluster profile or a system profile. Generic-VM-Libvirt extracts all Terraform constructs inside the pack and exposes nothing but the values. Users will then have the ability to modify the add-on pack for the different applications. + +## Version Supported + + + + +* **1.0.2** +* **1.0.0** + + + + +
+ +## Configuring Palette Generic VM Libvirt Add-on + +To configure the Generic-VM-Libvirt add-on pack for the application cluster, begin by editing the manifest namespace value. + +`cluster-{{ .spectro.system.cluster.uid }}` + +**Example** + +```yaml +namespace: jet-system +``` + +If multiple instances of this pack have to be deployed on the cluster for different virtual machine applications, then modify '`spectrocloud.com/display-name`' and '`releaseNameOverride`' with distinctive names to make it unique across all the packs in the cluster. + +
+ + +```yaml +spectrocloud.com/display-name: vm-app-1 +releaseNameOverride: +``` +
+ +## Generic-VM-Libvirt Pack Manifest + +
+ +```yaml +pack: + # for app cluster, namespace value should be "cluster-{{ .spectro.system.cluster.uid }}" + namespace: jet-system + + # if multiple instance of this pack has to be deployed on the cluster for different vm applications + # then modify 'spectrocloud.com/display-name' and 'releaseNameOverride' with unique names to make it + # unique across all the packs in the cluster + # spectrocloud.com/display-name: vm-app-1 + # releaseNameOverride: + # generic-vm-libvirt: vm-app-1 + +charts: + generic-vm-libvirt: + providers: + source: "dmacvicar/libvirt" + version: "0.6.14" + name: vm-app-1 + image: https://cloud-images.ubuntu.com/releases/xenial/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img + + # uncomment the below line and comment the above line if the image is present within the host. + # image="/opt/spectrocloud/ubuntu-16.04-server-cloudimg-amd64-disk1.img" + hardware: + cpu: 2 + memory: 6 #in GB + network: ["br-int"] + rootDisk: + size: 50 #in GB + pool: ehl_images + dataDisks: + - size: 20 #in GB + pool: ehl_data + persisted: true + - size: 25 #in GB + pool: ehl_data + persisted: true + cloudInit: + userData: | + #cloud-config + # vim: syntax=yaml + # Note: Content strings here are truncated for example purposes. + ssh_pwauth: true + chpasswd: + list: + - ubuntu:welcome + expire: false + metaData: | + networkConfig: | + version: 2 + ethernets: + ens3: + dhcp4: true + + # preExecCmd & postExecCmd gets executed in each reconcile loop which runs at an interval of ~2 mins + # If you want to run some command or script only whenever VM is getting creating or after VM is destroyed + # then you can use 'preVMInitCmd' and 'postVMInitCmd' respectively + # preExecCmd: "bash /var/files/pre-exec.sh" + # postExecCmd: "bash /var/files/pre-exec.sh" + + # preVMInitCmd & postVMInitCmd gets executed only when VM is being created/recreated and after VM is created/recreated respectively + preVMInitCmd: "" + postVMInitCmd: "" + + # For first time deployment, preVMDestroyCmd won't be invoked. If there is any change in cloud-init then vm resource will get recreated, + # and 'preVMDestroyCmd' will be invoked before deleting VM and once preVMDestroyCmd gets executed successfully, then only VM resource will be deleted. + # Once VM is deleted then before another VM is created, preVMInitCmd will be invoked + + # preVMDestroyCmd can also be uded to ssh into vm or call the rest api for the application running inside vm before vm is terminated + # or to download the file and use it later once new vm is provisioned + preVMDestroyCmd: "" + + # extraDomainHclConfig: | + # cpu { + # mode = "host-passthrough" + # } + + # mounts section can be used to mount data inside existing config maps or secrets into the pod as files where pre and post + # hooks are executed + # so that data present in config map or secret can be accessed while executing pre and post exec hooks + mounts: + configMap: + # - name: system-config + # path: /data/system-config + # - name: system-config-2 + # path: /data/system-config-2 + secret: + # - name: system-config + # path: /data/system-config + # - name: system-config-2 + # path: /data/system-config-2 + + # envs section can be used to inject data inside existing config maps or secrets into the pod as env variables + # where pre and post hooks are executed + # so that data present in config map or secret can be accessed while executing pre and post exec hooks + envs: + configMap: + # - name: database-app-config + # env: DATABASE_USER + # dataKey: "db.user" + secret: + # - name: database-app-secret + # env: DATABASE_PASSWORD + # dataKey: "db.password" + + # files present in below section will be added to the pod and will be accessible while executing + # pre and post exec hooks and absolute file path would be '/var/files/' + files: + # - name: pre-exec.sh + # content: | + # #!/bin/bash + # echo "I am pre exec" + # - name: post-exec.sh + # content: | + # #!/bin/bash + # echo "I am post exec" +``` + + +## Virtual Machine Hooks + +The Generic-VM-Libvirt pack supports various hooks, while deploying VM applications and supports multiple use-cases of customizing workflow, as customers require. + + +
+ +## Using preExecCmd and postExecCmd + +The **preExecCmd** and **postExecCmd** commands will be executed in every pod reconciliation. The loop runs at approximately a 2-minute interval. + +If you want to run the command or script only, whenever the virtual machine is getting created or after the virtual machine is destroyed, use **preVMInitCmd** and **postVMInitCmd**, respectively. + +
+ +```yaml +preExecCmd: "bash /var/files/pre-exec.sh" +``` + +```yaml +postExecCmd: "bash /var/files/pre-exec.sh" +``` + +
+ +## Using preVMInitCmd and postVMInitCmd + +The **preVMInitCmd** command is executed, only when the virtual machine is being created or recreated. Likewise, the **postVMInitCmd** command is executed only after the virtual machine is created or recreated. + +**Note**: These commands will not be executed in each reconciliation. + +
+ +```yaml +preVMInitCmd: "echo 'Hey! Hang on tight. I am gonna create a VM.'" +``` + +```yaml +postVMInitCmd: "echo 'Ooho! VM is created.'" +``` + +
+ +## Using preVMDestroyCmd + +Any command or script provided in this virtual machine hook will execute before the VM gets destroyed. It will be executed only when the VM is being deleted. A virtual machine deletion can happen for any reason, like changing anything in cloud-init or removing the pack from the profile. + +
+ +```yaml +preVMDestroyCmd: "" +``` + +
+ +:::info +During a first-time deployment, preVMDestroyCmd will not be invoked. However, if there is any change in cloud-init, then the VM resource will be recreated, preVMDestroyCmd will be invoked before deleting the VM, and once preVMDestroyCmd is executed successfully, only then will the VM resource be deleted. + +
+
+Once the virtual machine is deleted and before another virtual machine is created, preVMInitCmd will be invoked. +::: + +
+ +## Files + +Files presented in this section will be added to the pod, where the pre-and-post exec hooks are executed. + +
+ +```yaml +files: +- name: pre-exec.sh + content: | + #!/bin/bash + echo "I am pre exec" +- name: post-exec.sh + content: | + #!/bin/bash + echo "I am post exec" +extraDomainHclConfig: | + cpu { + mode = "host-passthrough" + } +``` + +
+ + +## Mounts + +Mount the data inside the existing configuration maps or secrets into the pod as files, where pre-and-post hooks are executed. This allows the data present in the configuration map or the secrets file to be accessible while running pre-and-post exec hooks. + +
+ +```yaml +mounts: + configMap: + - name: system-config + path: /data/system-config + - name: system-config-2 + path: /data/system-config-2 + secret: + - name: system-config + path: /data/system-config + - name: system-config-2 + path: /data/system-config-2 +``` + +
+ +## Environment Variables + +The ENVS section can inject data inside the existing config maps or secrets into the pod as environment variables, where pre-and post-hooks are executed so that data present in the config map or the secret file can be accessed while running pre-and-post exec hooks. + +
+ +```yaml +envs: + configMap: + - name: database-app-config + env: DATABASE_USER + dataKey: "db.user" + secret: + - name: database-app-secret + env: DATABASE_PASSWORD + dataKey: "db.password" +``` + + +## References + +- [Libvirt Apps](https://libvirt.org/apps.html) + + diff --git a/docs/docs-content/integrations/generic-vm-vsphere.md b/docs/docs-content/integrations/generic-vm-vsphere.md new file mode 100644 index 0000000000..93af93713b --- /dev/null +++ b/docs/docs-content/integrations/generic-vm-vsphere.md @@ -0,0 +1,336 @@ +--- +sidebar_label: 'generic-vm-vsphere' +title: 'Generic Virtual Machine vSphere' +description: 'Choosing vSphere Generic Virtual Machine within the Palette console' +hide_table_of_contents: true +type: "integration" +category: ['system app', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/generic-vm-vsphere/blobs/sha256:3b121dca3cbc7fed0153d3e1c8c3df20076ec200e091085a3a281ba08cb2261e?type=image/png' +tags: ['packs', 'generic-vm-vsphere', 'system app'] +--- + +Generic-VM-vSphere is a Palette Add-on pack used to simplify deploying the virtual machine resource from a cluster profile or a system profile. Generic-VM-vSphere extracts all Terraform constructs inside the pack and exposes nothing but the values. Users will then have the ability to modify the add-on pack for the different applications. + +
+ +## Version Supported + + + + +* **1.0.4** +* **1.0.0** + + + + +
+ +## Configuring Generic-VM-vSphere + +To configure the Generic-VM-vSphere Add-on pack for the application cluster, the namespace value should be as follows: + +`cluster-{{ .spectro.system.cluster.uid }}` +
+ +```yaml +namespace: cluster-{{ .spectro.system.cluster.uid }} +``` + +If multiple instances of this pack has to be deployed on the cluster for different virtual machine applications, then modify '`spectrocloud.com/display-name`' and '`releaseNameOverride`' with different names to make it unique across all the packs in the cluster. + +
+ +```yaml +spectrocloud.com/display-name: vm-app-1 +releaseNameOverride: +``` +
+
+ + +## Generic-VM-vSphere Pack Manifest + +
+ +```yaml +pack: + # for app cluster, namespace value should be "cluster-{{ .spectro.system.cluster.uid }}" + namespace: jet-system + + # if multiple instance of this pack has to be deployed on the cluster for different vm applications + # then modify 'spectrocloud.com/display-name' and 'releaseNameOverride' with unique names to make it + # unique across all the packs in the cluster + # spectrocloud.com/display-name: vm-app-1 + # releaseNameOverride: + # generic-vm-vsphere: vm-app-1 + +charts: + generic-vm-vsphere: + providers: + source: "hashicorp/vsphere" + version: "2.2.0" + name: vm-app-1 + hardware: + cpu: 2 + memory: 6 #in GB + dataDisks: + - size: 20 #in GB + - size: 25 #in GB + + + # To use an image from a remote url please uncomment the below lines and comment out the vmTemplate section. + # ovaTemplate: + # remote_ovf_url: "https://192.168.100.12:8443/artifactory/generic-eis-all/ehl-guest/sles-154-cloud-kube-v1.21.10-20220718141926-014.ova" + # name: system-cluster-ova + # network: + # - name: "VM Network" + # value: "VLAN-909" + # disk: + # size: 40 + vmTemplate: "spectro-templates/ubuntu-focal-20.04-cloudimg-20220207" + guestId: "ubuntu64Guest" #ubuntu64Guest for ubuntu, sles15_64Guest for sles etc + scsiType: "lsilogic" + + cloudInit: + # cloud init properties can be injected in vsphere via guest extra config (guestExtraConfig) or via vapp properties (vAppProperties) + # if cloud init type is vAppProperties then add data in vAppProperties: section and leave guestExtraConfig commented + # else if cloud init type is guestExtraConfig then add data in guestExtraConfig: section and leave vAppProperties commented + type: guestExtraConfig # valid values is one of ["vAppProperties" or "guestExtraConfig"] + userData: | + #cloud-config + # vim: syntax=yaml + # Note: Content strings here are truncated for example purposes. + ssh_pwauth: true + chpasswd: + list: + - ubuntu:welcome + expire: false + metaData: | + + networkConfig: | + version: 2 + ethernets: + ens3: + dhcp4: true + + guestExtraConfig: + "guestinfo.network_config": base64encode(data.template_file.network_config.rendered) + "guestinfo.network_config.encoding": "base64" + "guestinfo.userdata": base64encode(data.template_file.user_data.rendered) + "guestinfo.userdata.encoding": "base64" + + vAppProperties: + #instance-id: vm-app-1 + #hostname: vm-app-1 + #public-keys: "ssh-rsa AAAAB3....NGJwwlOmNrw== spectro@spectro" + #password: abcde12345 + #user-data: data.template_file.user_data.rendered + + # 'extraVMHclConfig' can be used to provide extra configuration in the virtual machine and config should be provided in HCL + # format + # extraVMHclConfig: | + # cdrom { + # client_device = true + # } + + # preExecCmd & postExecCmd gets executed in each reconcile loop which runs at an interval of ~2 mins + # If you want to run some command or script only whenever VM is getting creating or after VM is destroyed + # then you can use 'preVMInitCmd' and 'postVMInitCmd' respectively + # preExecCmd: "bash /var/files/pre-exec.sh" + # postExecCmd: "bash /var/files/pre-exec.sh" + + # preVMInitCmd & postVMInitCmd gets executed only when VM is being created/recreated and after VM is created/recreated respectively + preVMInitCmd: "" + postVMInitCmd: "" + + # For first time deployment, preVMDestroyCmd won't be invoked. If there is any change in cloud-init then vm resource will get recreated, + # and 'preVMDestroyCmd' will be invoked before deleting VM and once preVMDestroyCmd gets executed successfully, then only VM resource will be deleted. + # Once VM is deleted then before another VM is created, preVMInitCmd will be invoked + + # preVMDestroyCmd can also be used to ssh into vm or call the rest api for the application running inside vm before vm is terminated + # or to download the file and use it later once new vm is provisioned + preVMDestroyCmd: "" + + # mounts section can be used to mount data inside existing config maps or secrets into the pod as files where pre and post + # hooks are executed + # so that data present in config map or secret can be accessed while executing pre and post exec hooks + mounts: + configMap: + # - name: system-config + # path: /data/system-config + # - name: system-config-2 + # path: /data/system-config-2 + secret: + # - name: system-config + # path: /data/system-config + # - name: system-config-2 + # path: /data/system-config-2 + + # envs section can be used to inject data inside existing config maps or secrets into the pod as env variables + # where pre and post hooks are executed + # so that data present in config map or secret can be accessed while executing pre and post exec hooks + envs: + configMap: + # - name: database-app-config + # env: DATABASE_USER + # dataKey: "db.user" + secret: + # - name: database-app-secret + # env: DATABASE_PASSWORD + # dataKey: "db.password" + + # files present in below section will be added to the pod and will be accessible while executing + # pre and post exec hooks and absolute file path would be '/var/files/' + files: + # - name: pre-exec.sh + # content: | + # #!/bin/bash + # echo "I am pre exec" + # - name: post-exec.sh + # content: | + # #!/bin/bash + # echo "I am post exec" +``` + + + +## Virtual Machine Hooks + +The Generic-VM-vSphere pack supports various hooks while deploying VM applications and supports multiple use-cases of customizing workflow, as customers require. + +
+ + +## Using extraVMHclConfig + +The extraVMHclConfig command can be used to provide an extra configuration in the virtual machine and the configuration file should be provided in HashiCorp Configuration Language (HCL) format. + +```terraform +# extraVMHclConfig: | +# cdrom { +# client_device = true +# } +``` + +## Using preExecCmd and postExecCmd + +The **preExecCmd** and **postExecCmd** commands will be executed in every pod reconciliation. The loop runs at approximately a 2-minute interval. + +**preExecCMD** and **postVMInitCmd** are used to execute commands or scripts prior to virtual machine creation and after virtual machine creation respectively. + +
+ +```yaml +preExecCmd: "bash /var/files/pre-exec.sh" +``` + +```yaml +postExecCmd: "bash /var/files/pre-exec.sh" +``` + +
+ +## Using preVMInitCmd and postVMInitCmd + +The **preVMInitCmd** command is executed, only when the virtual machine is being created or recreated. Likewise, the **postVMInitCmd** command is executed only after the virtual machine is created or recreated. + +**Note**: These commands will not be executed in each reconciliation. + +
+ +```yaml +preVMInitCmd: "echo 'Hey! Hang on tight. I am gonna create a VM.'" +``` + +```yaml +postVMInitCmd: "echo 'Ooho! VM is created.'" +``` + +
+ +## Using preVMDestroyCmd + +Any command or script provided in this virtual machine hook will execute before the virtual machine is destroyed. It will be executed only when the VM is getting deleted. A virtual machine deletion can happen for any reason, like changing anything in cloud-init or removing the pack from the profile. + +
+ +```yaml +preVMDestroyCmd: "" +``` +
+ +:::info +During a first-time deployment, preVMDestroyCmd won't be invoked. However, if there is any change in cloud-init, then the VM resource will be recreated, preVMDestroyCmd will be invoked before deleting the VM, and once preVMDestroyCmd is executed successfully, only then the VM resource will be deleted. + +
+
+Once the VM is deleted and before another virtual machine is created, preVMInitCmd will be invoked. +::: + +
+
+ +## Mounts + +Mount the data inside the existing configuration map or secret into the pod as files, where pre-and-post hooks are executed. This allows the data present in the configuration map or the secrets file to be accessible while running pre-and-post exec hooks. + + +
+ +```yaml +mounts: + configMap: + # - name: system-config + # path: /data/system-config + # - name: system-config-2 + # path: /data/system-config-2 + secret: + # - name: system-config + # path: /data/system-config + # - name: system-config-2 + # path: /data/system-config-2 +``` + +
+ +## Environment Variables + +The ENVS section can inject data inside the existing config maps or secrets into the pod as environment variables, where pre-and post-hooks are executed so that data present in the config map or the secret file can be accessed while running pre-and-post exec hooks. + +
+ +```yaml +envs: + configMap: + # - name: database-app-config + # env: DATABASE_USER + # dataKey: "db.user" + secret: + # - name: database-app-secret + # env: DATABASE_PASSWORD + # dataKey: "db.password" +``` + +
+ +## Files + +Files present in this section will be added to the pod and will be accessible while executing pre-and-post execution hooks and absolute file path would be '/var/files/'. + +
+ +```yaml +files: +# - name: pre-exec.sh +# content: | +# #!/bin/bash +# echo "I am pre exec" +# - name: post-exec.sh +# content: | +# #!/bin/bash +# echo "I am post exec" +``` + +
diff --git a/docs/docs-content/integrations/grafana-spectrocloud-dashboards.md b/docs/docs-content/integrations/grafana-spectrocloud-dashboards.md new file mode 100644 index 0000000000..e832d1e7e4 --- /dev/null +++ b/docs/docs-content/integrations/grafana-spectrocloud-dashboards.md @@ -0,0 +1,89 @@ +--- +sidebar_label: 'Spectro Cloud Grafana Dashboards' +title: 'Spectro Cloud Grafana Dashboards' +description: 'Learn more about the Spectro Cloud Grafana dashboard and how to use it.' +type: "integration" +category: ['monitoring', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/spectrocloud-grafana-dashboards/blobs/sha256:a48c9929480a8c463e409e7563279f97d80e674c5cc91cb81c47454aea2c203d?type=image/png' +tags: ['packs', 'spectrocloud-grafana-dashboards', 'monitoring'] +--- + + +The Spectro Cloud Grafana Dashboards is an addon pack that exposes internal cluster resource metrics. You can access the information exposed by the pack in Grafana by visiting the **Spectro Cloud / Spectro Clusters** dashboard. The following information is exposed by the Spectro Cloud Grafana Dashboards.

+ +- Status of all cluster profile layers. + + +- CPU and Memory usage of the cluster and all the pods in the cluster. + + +- Cluster health status. + + +- The cluster's node health status and uptime. + +
+ + +![A grafana dashboard view of the cluster metric displaying pack status](/clusters_cluster-management_grafana_spectro_metrics.png) + + +## Versions Supported + +**1.0.X** + +## Prerequisites + +* A host cluster that has the [Prometheus Operator pack](prometheus-operator.md) `v45.4.X` or greater installed. Check out [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) for instructions on how to deploy a monitoring stack. + + +* A cluster profile with the [Prometheus Cluster Metrics](prometheus-cluster-metrics.md) pack `v3.4.X` or greater installed. + + +## Usage + +The Spectro Cloud Grafana Dashboards require no additional configuration and the pack is designed to work out-of-the-box. + +You can learn how to add the Spectro Cloud Grafana Dashboards to your cluster by following the steps outlined in the [Enable Monitoring on Host Cluster](../clusters/cluster-management/monitoring/deploy-agent.md). + +
+ +:::caution + +Pods without the defined attributes `request` and `limit` will not display metrics data in the Grafana out-of-the-box Kubernetes Pods dashboard. + +::: + + + +## Terraform + + +```terraform hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "spectro-cloud-grafana-dashboards" { + name = "spectrocloud-grafana-dashboards" + version = "1.0.0" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + +## References + +- [Enable Monitoring on Host Cluster](../clusters/cluster-management/monitoring/deploy-agent.md). + + +- [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) + + +- [Prometheus Operator pack](prometheus-operator.md) + + +- [Prometheus Agent](prometheus-agent.md) \ No newline at end of file diff --git a/docs/docs-content/integrations/heartbeat.md b/docs/docs-content/integrations/heartbeat.md new file mode 100644 index 0000000000..41f5363850 --- /dev/null +++ b/docs/docs-content/integrations/heartbeat.md @@ -0,0 +1,30 @@ +--- +sidebar_label: 'heart-beat' +title: 'Heartbeat' +description: 'Heart Beat monitoring pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['monitoring', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/heartbeat/blobs/sha256:19fec69ae172c3e54d5fb09c176517cf7bfeb1bc740bde65c200e14115510313?type=image/png' +tags: ['packs', 'heart-beat', 'monitoring'] +--- + + + +Heartbeat is a lightweight daemon installed to a remote server for periodically checking the status of the services and determine whether they are currently available and reachable. Heartbeat is useful to verify that your service level agreements are met during the service uptime. + +## Versions Supported + + + + + +**1.0.0** + + + + +## References + +- [Heartbeat Reference Documentation](https://www.elastic.co/guide/en/beats/heartbeat/current/index.html) diff --git a/docs/docs-content/integrations/integrations.mdx b/docs/docs-content/integrations/integrations.mdx new file mode 100644 index 0000000000..24f055cdaf --- /dev/null +++ b/docs/docs-content/integrations/integrations.mdx @@ -0,0 +1,20 @@ +--- +sidebar_label: "Packs List" +title: "Packs List" +description: "Learn about packs that Palette offers and choose from Pallette packs." +hide_table_of_contents: true +sidebar_custom_props: + icon: "teams" +tags: ["packs"] +--- + + +Palette provides packs that are tailored for specific uses to support the core infrastructure a cluster needs and add-on packs to extend Kubernetes functionality. Each pack you add to a cluster profile is considered a layer in the profile. + +When you create a cluster profile, you choose the type of pack you want to add: **Full**, **Infrastructure**, or **Add-on**. **Full** refers to the combination of **Infrastructure** and **Add-on** packs. + +When you choose **Infrastructure** or **Add-on**, Palette presents only packs that provide functionality for the selected pack type. When you choose **Full**, Palette presents all the packs so you can build your cluster profile from the base layer up. To learn more about cluster profiles, check out the [Cluster Profiles](/cluster-profiles) guide. + +To learn more about individual packs, use the search bar below to find a specific option. Alternatively, you can use the filter buttons to display available options. To learn about pack update and deprecation schedules, review [Maintenance Policy](/integrations/maintenance-policy). + + diff --git a/docs/docs-content/integrations/istio.md b/docs/docs-content/integrations/istio.md new file mode 100644 index 0000000000..ba7fc16649 --- /dev/null +++ b/docs/docs-content/integrations/istio.md @@ -0,0 +1,57 @@ +--- +sidebar_label: 'Istio' +title: 'Istio' +description: 'Choosing Istio as a Service Mesh app within the Spectro Cloud console' +hide_table_of_contents: true +type: "integration" +category: ['service mesh', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/istio/blobs/sha256:c80cf596d4859261ab892e987f835bd11161bd139dd8e4147b652c6b93924cb2?type=image/png' +tags: ['packs', 'istio', 'network'] +--- + + + +This Integration aims to automate and simplify the rollout of the various Istio components which helps with service mesh use cases. + +## Versions Supported + + + + + +* **1.14.3** +* **1.14.1** + + + + + +* **1.6.2** + + + + + +## Contents + +The integration deploys the Istio Operator with the 'demo' profile which deploys the following components: + +* Istiod +* Istio Ingress Gateway +* Istio Egress Gateway +* Grafana +* Prometheus +* Istio Tracing +* Kiali + + +## References + +- [Istio Home](https://istio.io) + + +- [Istio Documentation](https://istio.io/latest/docs) + + +- [Istio Operator GitHub](https://github.com/istio/operator) diff --git a/docs/docs-content/integrations/kibana.md b/docs/docs-content/integrations/kibana.md new file mode 100644 index 0000000000..34c6b6d6da --- /dev/null +++ b/docs/docs-content/integrations/kibana.md @@ -0,0 +1,44 @@ +--- +sidebar_label: 'Kibana' +title: 'Elasticsearch Fluentd Kibana' +description: 'Kibana Monitoring pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['logging', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/elastic-fluentd-kibana/blobs/sha256:3b6d6486eb216d46164fc8b7cb784b0be6b851a85726f18bdf4450d5ed1386eb?type=image/png' +tags: ['packs', 'elastic-fluentd-kibana', 'logging'] +--- +The logging integration installs a production-grade ElasticSearch cluster with Kibana and Fluentd by default on the Kubernetes cluster. This integration provides a rich set of logging features like forwarding, aggregating & parsing logs from the Kubernetes cluster. + +## Contents + +The default integration deployed will have the following components: + +* ElasticSearch Master (3 replicas). +* ElasticSearch Data (2 replicas). +* ElasticSearch Client (2 replicas). +* ElasticSearch Curator. +* Fluentd (one per node). +* Kibana. + +## Ingress + +Follow below steps to configure Ingress on Kibana + +1. Change serviceType from "LoadBalancer" to "ClusterIP" (line #643) +2. Ingress (line #670) + * Enable Ingress; change enabled from false to "true" + * Set Ingress rules like annotations, path, hosts, etc. + +With these config changes, you can access Kibana service on the Ingress Controller LoadBalancer hostname / IP + +## References + +- [Elasticsearch GitHub](https://github.com/helm/charts/tree/master/stable/elasticsearch) + + +- [Fluentd GitHub](https://github.com/helm/charts/tree/master/stable/fluentd) + + +- [Kibana GitHub](https://github.com/helm/charts/tree/master/stable/kibana) diff --git a/docs/docs-content/integrations/kong.md b/docs/docs-content/integrations/kong.md new file mode 100644 index 0000000000..a01a06be64 --- /dev/null +++ b/docs/docs-content/integrations/kong.md @@ -0,0 +1,42 @@ +--- +sidebar_label: 'Kong' +title: 'Kong' +description: 'Kong Ingress pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['ingress', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/kong/blobs/sha256:600f20583f85ccad4c515e51542f74aa9acb851d5b03ecb0e7b3435eb51ecf56?type=image/png' +tags: ['packs', 'kong', 'network'] +--- + +The Kong integration is an Ingress Controller for Kubernetes that configures ingress with a load balancer. You can use the Kong as an application load balancer for your application. + +## Version Supported + + + + + +* **2.13.1** + + + + + +* **1.4.0** + + + + + +## Components + +The integration adds the Kong Ingress Controller, which exposes a Kubernetes service of type LoadBalancer. + +## References + +- [Kong Ingress Controller Documentation ](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers) + + +- [Kong GitHub](https://github.com/Kong/kubernetes-ingress-controller) diff --git a/docs/docs-content/integrations/kubebench.md b/docs/docs-content/integrations/kubebench.md new file mode 100644 index 0000000000..629a3ce51d --- /dev/null +++ b/docs/docs-content/integrations/kubebench.md @@ -0,0 +1,33 @@ +--- +sidebar_label: 'kube-bench' +title: 'kube-bench' +description: 'kube-bench security pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['security', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: https://registry-addon.spectrocloud.com/v1/kube-bench/blobs/sha256:28c233e5ad884d5356a183c37f323263eb4acca860c28b326ecd99094b500c31?type=image/png +tags: ['packs', 'kube-bench', 'security'] +--- + + +Palette executes kube-bench, a CIS Benchmark scanner by Aqua Security, for every Kubernetes pack to ensure the master and worker nodes are configured securely. It is available as an Add-on layer within Palette. + +kube-bench runs against a series of checks specified in a `controls` YAML file. For more information on how to write tests and config files, refer to the [controls](https://github.com/aquasecurity/kube-bench/blob/main/docs/controls.md) section. + + +## Versions Supported + + + + + + +* **0.6.8** + + + + +## References + +- [kube-bench GitHub](https://github.com/aquasecurity/kube-bench/blob/main/docs/running.md#running-kube-bench) diff --git a/docs/docs-content/integrations/kubehunter.md b/docs/docs-content/integrations/kubehunter.md new file mode 100644 index 0000000000..4bd1b55766 --- /dev/null +++ b/docs/docs-content/integrations/kubehunter.md @@ -0,0 +1,29 @@ +--- +sidebar_label: 'kube-hunter' +title: 'kube-hunter' +description: 'kube-hunter monitoring pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['monitoring', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/kubehunter/blobs/sha256:6b6b9138fa056677646712a888192498247f71aa421edd27b25458a8fbf8af0c?type=image/png' +tags: ['packs', 'kube-hunter', 'security'] +--- + +Kube Hunter is an open-source tool that hunts for security issues in your Kubernetes clusters. +It’s designed to increase awareness and visibility of the security controls in Kubernetes environments. Kube-hunter probes a domain or address range for open Kubernetes-related ports, and tests for any configuration issues that leave your cluster exposed to attackers. It can be run on a machine in the cluster, and select the option to probe all the local network interfaces. + +## Versions Supported + + + + + +**1.0.3** + + + + +## References + +- [kube-hunter GitHub](https://github.com/aquasecurity/kube-hunter/) diff --git a/docs/docs-content/integrations/kubernetes-dashboard.md b/docs/docs-content/integrations/kubernetes-dashboard.md new file mode 100644 index 0000000000..07f5e020b0 --- /dev/null +++ b/docs/docs-content/integrations/kubernetes-dashboard.md @@ -0,0 +1,710 @@ +--- +sidebar_label: "Kubernetes Dashboard" +title: "Kubernetes Dashboard" +description: "Learn how to manage Kubernetes clusters and applications deployed in them by using the Kubernetes Dashboard Monitoring pack." +type: "integration" +hide_table_of_contents: true +category: ['monitoring', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: "https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png" +tags: ['packs', 'kubernetes-dashboard', 'monitoring'] +--- + + +The [Kubernetes Dashboard](https://github.com/kubernetes/dashboard) add-on pack is a general-purpose, web-based UI that allows you to manage clusters and the applications deployed in them. + +
+ +## Versions Supported + + + + + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](frp.md) reverse proxy. + + +- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings) guide to learn more. + +## Parameters + +| Name | Supported Values | Default Values | Description | +| --- | --- | --- | --- | +| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | +| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | +| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | +| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | +| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| +| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk. | + +:::caution + +Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](spectro-k8s-dashboard.md) guide. + +::: + +## Usage + +To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. +- **Pack Type**: Monitoring +- **Registry**: Public Repo +- **Pack Name**: Kubernetes Dashboard +- **Pack Version**: 2.0.x or higher + +The Kubernetes Dashboard pack requires the [Spectro Proxy](frp.md) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. + + +### Access Kubernetes Dashboard + +When connected to the cluster remotely, issue the following command to establish a connection to deploy the Kubernetes Dashboard on port 8080. + + +
+ +```bash +kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 +``` + +To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. + +From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: + +
+ +```bash +kubectl -namespace kubernetes-dashboard describe secret \ +$(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') +``` + +The following example shows the command output with the token value. + +
+ +```yaml +Name: kubernetes-dashboard-token-h4lnf +Namespace: kubernetes-dashboard +Labels: +Annotations: kubernetes.io/service-account.name: kubernetes-dashboard + kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1029 bytes +namespace: 20 bytes +token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw +``` + +
+ +### Configure Ingress + +Use the following steps to configure ingress for the Kubernetes Dashboard pack. + +
+ +1. Ensure the `service.type` parameter is set to "ClusterIP". + + +2. To enable ingress, set the `ingress.enabled` parameter to "true". + + +3. Set ingress rules, such as annotations, path, hosts, and any other rules. + +This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. + +Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. + +
+ +### Configure LoadBalancer + +Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. + +
+ + + + + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](frp.md) reverse proxy. + + +- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings) guide to learn more. + +## Parameters + +| Name | Supported Values | Default Values | Description | +| --- | --- | --- | --- | +| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | +| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | +| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | +| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | +| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| +| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk. | +:::caution + +Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](spectro-k8s-dashboard.md) guide. + +::: + +## Usage + +To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. +- **Pack Type**: Monitoring +- **Registry**: Public Repo +- **Pack Name**: Kubernetes Dashboard +- **Pack Version**: 2.0.x or higher + +The Kubernetes Dashboard pack requires the [Spectro Proxy](frp.md) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. + +
+ +### Access Kubernetes Dashboard + +When connected to the cluster remotely, run the following command to establish a connection to deploy the Kubernetes Dashboard on port 8080. + +
+ +```bash +kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 +``` + +To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. + +From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: + +
+ +```bash +kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') +``` + +The following example shows the command output with the token value. + +
+ +```yaml +Name: kubernetes-dashboard-token-h4lnf +Namespace: kubernetes-dashboard +Labels: +Annotations: kubernetes.io/service-account.name: kubernetes-dashboard + kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1029 bytes +namespace: 20 bytes +token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw +``` + +
+ +### Configure Ingress + +Use the following steps to configure ingress for the Kubernetes Dashboard pack. + +
+ +1. Ensure the `service.type` parameter is set to "ClusterIP". + + +2. To enable ingress, set the `ingress.enabled` parameter to "true". + + +3. Set ingress rules, such as annotations, path, hosts, and any other rules. + +This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. + +Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. + +
+ +### Configure LoadBalancer + +Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. + +
+ + + + + + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](frp.md) reverse proxy. + + +- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings) guide to learn more. + +## Parameters + +| Name | Supported Values | Default Values | Description | +| --- | --- | --- | --- | +| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | +| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | +| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | +| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | +| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| +| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk.| +:::caution + +Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](spectro-k8s-dashboard.md) guide. + +::: + +## Usage + +To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. +- **Pack Type**: Monitoring +- **Registry**: Public Repo +- **Pack Name**: Kubernetes Dashboard +- **Pack Version**: 2.0.x or higher + +The Kubernetes Dashboard pack requires the [Spectro Proxy](frp.md) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. + +
+ +### Access Kubernetes Dashboard + +When connected to the cluster remotely, run the following command to establish a connection to to deploy the Kubernetes Dashboard on port 8080. + +
+ +```bash +kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 +``` + +To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. + +From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: + +
+ +```bash +kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') +``` + +The following example shows the command output with the token value. + +
+ +```yaml +Name: kubernetes-dashboard-token-h4lnf +Namespace: kubernetes-dashboard +Labels: +Annotations: kubernetes.io/service-account.name: kubernetes-dashboard + kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1029 bytes +namespace: 20 bytes +token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw +``` + +
+ +### Configure Ingress + +Use the following steps to configure ingress for the Kubernetes Dashboard pack. + +
+ +1. Ensure the `service.type` parameter is set to "ClusterIP". + + +2. To enable ingress, set the `ingress.enabled` parameter to "true". + + +3. Set ingress rules, such as annotations, path, hosts, and any other rules. + +This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. + +Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. + +
+ +### Configure LoadBalancer + +Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. + + +
+ + + + + + + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](frp.md) reverse proxy. + + +- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings) guide to learn more. + +## Parameters + +| Name | Supported Values | Default Values | Description | +| --- | --- | --- | --- | +| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | +| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | +| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | +| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | +| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| +| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk.| + +:::caution + +Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](spectro-k8s-dashboard.md) guide. + +::: + + +## Usage + +To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. +- **Pack Type**: Monitoring +- **Registry**: Public Repo +- **Pack Name**: Kubernetes Dashboard +- **Pack Version**: 2.0.x or higher + +The Kubernetes Dashboard pack requires the [Spectro Proxy](frp.md) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. + +
+ +### Access Kubernetes Dashboard + +When connected to the cluster remotely, run the following command to establish a connection to to deploy the Kubernetes Dashboard on port 80. + +
+ +```bash +kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 +``` + +To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. + +From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: + +
+ +```bash +kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') +``` + +The following example shows the command output with the token value. + +
+ +```yaml +Name: kubernetes-dashboard-token-h4lnf +Namespace: kubernetes-dashboard +Labels: +Annotations: kubernetes.io/service-account.name: kubernetes-dashboard + kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1029 bytes +namespace: 20 bytes +token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw +``` + +
+ +### Configure Ingress + +Use the following steps to configure ingress for the Kubernetes Dashboard pack. + +
+ +1. Ensure the `service.type` parameter is set to "ClusterIP". + + +2. To enable ingress, set the `ingress.enabled` parameter to "true". + + +3. Set ingress rules, such as annotations, path, hosts, and any other rules. + +This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. + +Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. + +
+ +### Configure LoadBalancer + +Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. + +
+ + + + + + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](frp.md) reverse proxy. + + +- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings) guide to learn more. + +## Parameters + +| Name | Supported Values | Default Values | Description | +| --- | --- | --- | --- | +| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | +| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | +| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | +| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | +| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| +| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk.| + +:::caution + +Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](spectro-k8s-dashboard.md) guide. + +::: + +## Usage + +To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. +- **Pack Type**: Monitoring +- **Registry**: Public Repo +- **Pack Name**: Kubernetes Dashboard +- **Pack Version**: 2.0.x or higher + +The Kubernetes Dashboard pack requires the [Spectro Proxy](frp.md) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. + +
+ +### Access Kubernetes Dashboard + +When connected to the cluster remotely, run the following command to establish a connection to to deploy the Kubernetes Dashboard on port 80. + +
+ +```bash +kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 +``` + +To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. + +From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: + +
+ +```bash +kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') +``` + +The following example shows the command output with the token value. + +
+ +```yaml +Name: kubernetes-dashboard-token-h4lnf +Namespace: kubernetes-dashboard +Labels: +Annotations: kubernetes.io/service-account.name: kubernetes-dashboard + kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1029 bytes +namespace: 20 bytes +token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw +``` + +
+ +### Configure Ingress + +Use the following steps to configure ingress for the Kubernetes Dashboard pack. + +
+ +1. Ensure the `service.type` parameter is set to "ClusterIP". + + +2. To enable ingress, set the `ingress.enabled` parameter to "true". + + +3. Set ingress rules, such as annotations, path, hosts, and any other rules. + +This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. + +Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. + +
+ +### Configure LoadBalancer + +Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. + + +
+ + + + + + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](frp.md) reverse proxy. + + +- Users or groups must be mapped to a Kubernetes RBAC role, either a Role or a ClusterRole. You can create a custom role through a manifest and use Palette's roleBinding feature to associate the users or groups with the role. Refer to the [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings) guide to learn more. + +## Parameters + +| Name | Supported Values | Default Values | Description | +| --- | --- | --- | --- | +| `k8s-dashboard.namespace` | | kubernetes-dashboard | The namespace where you want to run the Kubernetes Dashboard deployment | +| `k8s-dashboard.clusterRole` | Any valid clusterRole from the Kubernetes cluster | `k8s-dashboard-readonly` | The ClusterRole to be attached to the ServiceAccount which defines RBAC to the cluster resources.

By default, a ClusterRole (k8s-dashboard-readonly) with ReadOnly access to most of the resources is deployed. | +| `k8s-dashboard.certDuration` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `8760h` (365 days) | Validity for the Self-signed certificate, specified in hours. | +| `k8s-dashboard.certRenewal` | A Go time.Duration string format in s (seconds), m (minutes), and h (hour) suffixes | `720h` (30 days) | Certificate renew before expiration duration | +| `k8s-dashboard.serviceType` | ClusterIP, LoadBalancer | ClusterIP | The ServiceType for dashboard. We recommend using ClusterIP service type to restrict access to the cluster.| +| `k8s-dashboard.skipLogin` | True, False | False | A flag to skip authentications in the Kubernetes Dashboard UI. We recommend using this only for demo purposes, as enabling it could expose a security risk.| + +:::caution + +Starting with Kubernetes Dashboard version 2.7.0, the **Connect** button is no longer available. For an optimized experience, we recommend you use the pre-configured version of this dashboard, called Spectro Kubernetes Dashboard pack. To learn more about it and start using it, check out the [Spectro Kubernetes Dashboard](spectro-k8s-dashboard.md) guide. + +::: + +## Usage + +To use the Kubernetes Dashboard pack, you have to add it to your cluster profile. Use the following information to find the Kubernetes Dashboard pack. +- **Pack Type**: Monitoring +- **Registry**: Public Repo +- **Pack Name**: Kubernetes Dashboard +- **Pack Version**: 2.0.x or higher + +The Kubernetes Dashboard pack requires the [Spectro Proxy](frp.md) pack, which serves as a reverse proxy to expose the Kubernetes dashboard. You must configure the Spectro Proxy pack. + +
+ +### Access Kubernetes Dashboard + +When connected to the cluster remotely, run the following command to establish a connection to to deploy the Kubernetes Dashboard on port 80. + +
+ +```bash +kubectl port-forward -namespace kubernetes-dashboard service/kubernetes-dashboard 8080:443 +``` + +To access Kubernetes Dashboard, navigate to `https://localhost:8080` in a browser of your choice. + +From the Dashboard login page, run the following command from the terminal window to obtain the bearer token: + +
+ +```bash +kubectl -namespace kubernetes-dashboard describe secret $(kubectl -namespace kubernetes-dashboard get secret | grep kubernetes-dashboard-token | awk '{print $1}') +``` + +The following example shows the command output with the token value. + +
+ +```yaml +Name: kubernetes-dashboard-token-h4lnf +Namespace: kubernetes-dashboard +Labels: +Annotations: kubernetes.io/service-account.name: kubernetes-dashboard + kubernetes.io/service-account.uid: 00e729f1-6638-4e68-8df5-afa2e2e38095 + +Type: kubernetes.io/service-account-token + +Data +==== +ca.crt: 1029 bytes +namespace: 20 bytes +token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ilg1bTg3RWM4Y1c3NnhkQ3dXbXNDUXQydVpYQklRUWoxa1BaS0ctVkVTSDQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1oNGxuZiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjAwZTcyOWYxLTY2MzgtNGU2OC04ZGY1LWFmYTJlMmUzODA5NSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.JU4GOJNjGpkHabUyxBt_2rvtXNjpR3w238BF2oMCQUNf_ZkUGSMeAAgIKxbAuk62dtJNDaRh5yAZ9J5KthMcU6k4qVmodUOJdlvigBVNjTDEhPM-sqJus62HMtwjpvm0CX-aP_A_BqHs2yJ3OgXSX0uHmkUO1FMoZSVaRpOvx7f5bPswxd87L3npuZt4p-NJIX32-DGjBnxdANAHcWil3YHIUbDgQIdjDfN6stGU_JByvzfCJpNCWWDinr772W7iZ3uA28F8uGS0ZMd1E5e1moEFBY8BM015Qxg2Y_k7lmv9S8GMkBJyTiJNiqnwLwfsiE1ycE4Tgq_vuQfFToIMNw +``` + +
+ +### Configure Ingress + +Use the following steps to configure ingress for the Kubernetes Dashboard pack. + +
+ +1. Ensure the `service.type` parameter is set to "ClusterIP". + + +2. To enable ingress, set the `ingress.enabled` parameter to "true". + + +3. Set ingress rules, such as annotations, path, hosts, and any other rules. + +This allows you to access the Kubernetes Dashboard in hostname or IP format using the IP address that the ingress controller exposes. + +Typically you would point a DNS CNAME record to the ingress controller IP. Talk to your system administrator to learn more about which hostname to use. + +
+ +### Configure LoadBalancer + +Use the `service.loadBalancerIP` and `service.externalPort` parameters to connect to the Kubernetes Dashboard. + +
+ +
+ + +# Troubleshooting + +- If the Kubernetes Dashboard is not accessible, check the dashboard pod for errors and ensure the dashboard service is in the **Running** state. + + +- When the namespace is customized while deploying the Kubernetes Dashboard, replace the namespace values in the commands shown above. + + +# Terraform + +You can reference the Kubernetes Dashboard pack in Terraform with a data resource. + +
+ +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "spectro-proxy" { + name = "k8s-dashboard" + version = "2.7.0" + type = "monitoring" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +# References + +- [Kubernetes Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) + + +- [Open Source Kubernetes Dashboard Documentation](https://github.com/kubernetes/dashboard/tree/master/docs) diff --git a/docs/docs-content/integrations/kubernetes-edge.md b/docs/docs-content/integrations/kubernetes-edge.md new file mode 100644 index 0000000000..82bc97e37a --- /dev/null +++ b/docs/docs-content/integrations/kubernetes-edge.md @@ -0,0 +1,1570 @@ +--- +sidebar_label: "Palette eXtended Kubernetes - Edge" +title: "Palette eXtended Kubernetes - Edge" +description: "Learn about the Palette eXtended Kubernetes - Edge pack and how you can use it your host clusters in an edge environment." +hide_table_of_contents: true +type: "integration" +category: ["kubernetes", 'amd64', 'fips'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: "https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png" +tags: ["packs", "kubernetes", "pxke", "edge"] +--- + +The Palette eXtended Kubernetes - Edge (PXK-E) pack supports Kubernetes clusters set up on Edge hosts installed in isolated locations like grocery stores, restaurants, and similar locations, versus a data center or cloud environment. We offer PXK-E as a core pack in Palette. + +:::info +Review our [Maintenance Policy](maintenance-policy.md) to learn about pack update and deprecation schedules. +::: + +## What is PXK-E? + +PXK-E is a customized version of the open-source Cloud Native Computing Foundation (CNCF) distribution of Kubernetes. This Kubernetes distribution is customized and optimized for edge computing environments and can be deployed through Palette. PXK-E is the Kubernetes distribution Palette defaults to when deploying Edge clusters. + +PXK-E differs from the upstream open-source Kubernetes version by optimizing for operations in an edge computing environment. PXK-E also differentiates itself by using the Kairos open-source project as the base operating system (OS). PXK-E’s use of Kairos means the OS is immutable, which significantly improves the security posture and reduces potential attack surfaces. + +Another differentiator of PXK-E is the carefully reviewed and applied hardening of the OS and Kubernetes. The hardening ranges from removing unused OS kernel modules to using an OS configuration that follows industry best practices. Our custom Kubernetes configuration addresses common deployment security pitfalls and implements industry best practices. + +With PXK-E, you can manage automatic OS upgrades while retaining immutability and the flexibility to roll out changes safely. The A/B partition architecture of Kairos allows for new OS and dependency versions to be installed in a separate partition and mounted at runtime. You can fall back to use the previous partition if issues are identified in the new partition. + +PXK-E manages the underlying OS and the Kubernetes layer together, which reduces the challenge of upgrading and maintaining two separate components. + +PXK-E allows you to apply different flavors of container storage interfaces (CSI) and container network interfaces (CNI). Other open-source Kubernetes distributions such as MicroK8s, RKE2, and K3s come with a default CSI and CNI. There is additional complexity and overhead when you want to consume different interface plugins with traditional Kubernetes distributions. Using PXK-E, you select the interface plugin you want to apply without additional overhead and complexity. + +There are no changes to the Kubernetes source code used in PXK-E, and it follows the same versioning schema as the upstream open-source Kubernetes distribution. + + +:::info + +We also offer Palette eXtended Kubernetes (PXK) for cloud and data center deployments. For more information, refer to the [Palette eXtended Kubernetes](kubernetes.md) guide to learn more about PXK. + +::: + +# Versions Supported + + + + + + +## Prerequisites + +- A minimum of 2 CPU and 4GB Memory. + + +## Parameters + +| Parameter | Description | +|----------------------------------------------------------|----------------------------------------------------| +| `cluster.config.clusterConfiguration.apiServer.extraArgs` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port.| +| `cluster.config.clusterConfiguration.apiServer.extraVolumes` | This parameter describes extra volumes for the Kubernetes API server, such as `audit-log` and `audit-policy`. | +| `cluster.config.clusterConfiguration.controllerManager.extraArgs` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | +| `cluster.config.clusterConfiguration.etcd.local.dataDir` | This parameter specifies the data directory for etcd, the distributed key-value store that Kubernetes uses to persist cluster state. | +| `cluster.config.clusterConfiguration.networking.podSubnet` | The IP subnet range to assign to pods. Default: 192.168.0.0/16 | +| `cluster.config.clusterConfiguration.networking.serviceSubnet` | The IP subnet range to assign to services. Default: 192.169.0.0/16 | +| `cluster.config.clusterConfiguration.scheduler.extraArgs` | This parameter contains extra arguments for the Kubernetes scheduler, such as disabling profiling. | +| `cluster.config.initConfiguration.nodeRegistration.kubeletExtraArgs` | This parameter contains extra arguments for kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | +| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK-E pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-custom-oidc). | + +You can add cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open-source project. For more information, check out the [Cloud Init Stages](../clusters/edge/edge-configuration/cloud-init.md) reference. + +You can also use pack settings described in the [Palette eXtended Kubernetes](kubernetes.md) guide. + + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](kubernetes-edge.md#configure-custom-oidc). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + + +#### Configuration Changes + +The PXK-E Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. + +```yaml +pack: + palette: + config: + oidc: + identityProvider: +``` + + +#### Example Kubeadm Configuration File + +```yaml +cluster: + config: | + clusterConfiguration: + apiServer: + extraArgs: + advertise-address: "0.0.0.0" + anonymous-auth: "true" + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + authorization-mode: RBAC,Node + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + disable-admission-plugins: AlwaysAdmit + enable-admission-plugins: AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction + profiling: "false" + secure-port: "6443" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + name: audit-log + pathType: DirectoryOrCreate + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + timeoutForControlPlane: 10m0s + controllerManager: + extraArgs: + feature-gates: RotateKubeletServerCertificate=true + pod-eviction-timeout: 1m0s + profiling: "false" + terminated-pod-gc-threshold: "25" + use-service-account-credentials: "true" + dns: {} + kubernetesVersion: v1.26.4 + etcd: + local: + dataDir: "/etc/kubernetes/etcd" + extraArgs: + listen-client-urls: "https://0.0.0.0:2379" + networking: + podSubnet: 192.168.0.0/16 + serviceSubnet: 192.169.0.0/16 + scheduler: + extraArgs: + profiling: "false" + initConfiguration: + localAPIEndpoint: {} + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: "true" + read-only-port: "0" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + discovery: {} + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: "true" + read-only-port: "0" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +stages: + initramfs: + - sysctl: + vm.overcommit_memory: 1 + kernel.panic: 10 + kernel.panic_on_oops: 1 + commands: + - ln -s /etc/kubernetes/admin.conf /run/kubeconfig + files: + - path: /etc/hosts + permission: "0644" + content: | + 127.0.0.1 localhost + - path: "/etc/kubernetes/audit-policy.yaml" + owner_string: "root" + permission: 0600 + content: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["configmaps"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + - level: None + users: ["cluster-autoscaler"] + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["configmaps", "endpoints"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" +pack: + palette: + config: + oidc: + identityProvider: palette +``` + +### Configure OIDC Identity Provider + +The OIDC IDP feature offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. + +When you add the PXK-E pack to a cluster profile, Palette displays the OIDC IDP options listed below. + +All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +You can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](kubernetes-edge.md#use-rbac-with-oidc). You can also configure OIDC for virtual clusters. For guidance, refer to [Configure OIDC for a Virtual Cluster](../clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md). + + +- **None**: This setting does not require OIDC configuration for the cluster. It displays in the YAML file as `noauth`. + +- **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in [Configure Custom OIDC](kubernetes-edge.md#configure-custom-oidc). This setting displays in the YAML file as `none`. + +- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. + +- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](../user-management/saml-sso/enable-saml.md) guide. + +:::info + +If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. + +::: + + +### Configure Custom OIDC + +Follow these steps to configure a third-party OIDC IDP. + +1. Add the following OIDC parameters to the `apiServer.extraArgs` section of your Kubernetes YAML file when creating a cluster profile. + + +```yaml +cluster: + config: + clusterConfiguration: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" +``` + +2. Add the following `kubeadmconfig.clientConfig` section that contains OIDC parameters to your Kubernetes YAML file. + + +```yaml +kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid +``` + +3. Provide third-party OIDC IDP details. + +### Use RBAC with OIDC + +You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. + +In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. + +![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) + + + + + + + +## Prerequisites + +- A minimum of 2 CPU and 4GB Memory. + + +## Parameters + +| Parameter | Description | +|----------------------------------------------------------|----------------------------------------------------| +| `cluster.config.clusterConfiguration.apiServer.extraArgs` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port.| +| `cluster.config.clusterConfiguration.apiServer.extraVolumes` | This parameter describes extra volumes for the Kubernetes API server, such as `audit-log` and `audit-policy`. | +| `cluster.config.clusterConfiguration.controllerManager.extraArgs` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | +| `cluster.config.clusterConfiguration.etcd.local.dataDir` | This parameter specifies the data directory for etcd, the distributed key-value store that Kubernetes uses to persist cluster state. | +| `cluster.config.clusterConfiguration.networking.podSubnet` | The IP subnet range to assign to pods. Default: 192.168.0.0/16 | +| `cluster.config.clusterConfiguration.networking.serviceSubnet` | The IP subnet range to assign to services. Default: 192.169.0.0/16 | +| `cluster.config.clusterConfiguration.scheduler.extraArgs` | This parameter contains extra arguments for the Kubernetes scheduler, such as disabling profiling. | +| `cluster.config.initConfiguration.nodeRegistration.kubeletExtraArgs` | This parameter contains extra arguments for kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | +| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK-E pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-custom-oidc). | + +You can add cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open-source project. For more information, check out the [Cloud Init Stages](../clusters/edge/edge-configuration/cloud-init.md) reference. + +You can also use pack settings described in the [Palette eXtended Kubernetes](kubernetes.md) guide. + + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](kubernetes-edge.md#configure-custom-oidc). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + + +#### Configuration Changes + +The PXK-E Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. + + +```yaml +pack: + palette: + config: + oidc: + identityProvider: +``` + + +#### Example Kubeadm Configuration File + +```yaml +cluster: + config: | + clusterConfiguration: + apiServer: + extraArgs: + advertise-address: "0.0.0.0" + anonymous-auth: "true" + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + authorization-mode: RBAC,Node + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + disable-admission-plugins: AlwaysAdmit + enable-admission-plugins: AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction + profiling: "false" + secure-port: "6443" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + name: audit-log + pathType: DirectoryOrCreate + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + timeoutForControlPlane: 10m0s + controllerManager: + extraArgs: + feature-gates: RotateKubeletServerCertificate=true + pod-eviction-timeout: 1m0s + profiling: "false" + terminated-pod-gc-threshold: "25" + use-service-account-credentials: "true" + dns: {} + kubernetesVersion: v1.26.4 + etcd: + local: + dataDir: "/etc/kubernetes/etcd" + extraArgs: + listen-client-urls: "https://0.0.0.0:2379" + networking: + podSubnet: 192.168.0.0/16 + serviceSubnet: 192.169.0.0/16 + scheduler: + extraArgs: + profiling: "false" + initConfiguration: + localAPIEndpoint: {} + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: "true" + read-only-port: "0" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + discovery: {} + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: "true" + read-only-port: "0" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +stages: + initramfs: + - sysctl: + vm.overcommit_memory: 1 + kernel.panic: 10 + kernel.panic_on_oops: 1 + commands: + - ln -s /etc/kubernetes/admin.conf /run/kubeconfig + files: + - path: /etc/hosts + permission: "0644" + content: | + 127.0.0.1 localhost + - path: "/etc/kubernetes/audit-policy.yaml" + owner_string: "root" + permission: 0600 + content: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["configmaps"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + - level: None + users: ["cluster-autoscaler"] + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["configmaps", "endpoints"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" +pack: + palette: + config: + oidc: + identityProvider: palette +``` + + +### Configure OIDC Identity Provider + +The OIDC IDP feature offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. + +When you add the PXK-E pack to a cluster profile, Palette displays the OIDC IDP options listed below. + +All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +You can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](kubernetes-edge.md#use-rbac-with-oidc). You can also configure OIDC for virtual clusters. For guidance, refer to [Configure OIDC for a Virtual Cluster](../clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md). + + +- **None**: This setting does not require OIDC configuration for the cluster. It displays in the YAML file as `noauth`. + + +- **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in [Configure Custom OIDC](kubernetes-edge.md#configure-custom-oidc). This setting displays in the YAML file as `none`. + + +- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. + + +- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](../user-management/saml-sso/enable-saml.md) guide. + +:::info + +If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. + +::: + + +### Configure Custom OIDC + +Follow these steps to configure a third-party OIDC IDP. + + +1. Add the following OIDC parameters to the `apiServer.extraArgs` section of your Kubernetes YAML file when creating a cluster profile. + + +```yaml +cluster: + config: + clusterConfiguration: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" +``` + +2. Add the following `kubeadmconfig.clientConfig` section that contains OIDC parameters to your Kubernetes YAML file. + + +```yaml +kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid +``` + +3. Provide third-party OIDC IDP details. + +### Use RBAC with OIDC + +You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. + +In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. + +![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) + + + + + + + + + +## Prerequisites + +- A minimum of 2 CPU and 4GB Memory. + + +## Parameters + +| Parameter | Description | +|----------------------------------------------------------|----------------------------------------------------| +| `cluster.config.clusterConfiguration.apiServer.extraArgs` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port.| +| `cluster.config.clusterConfiguration.apiServer.extraVolumes` | This parameter describes extra volumes for the Kubernetes API server, such as `audit-log` and `audit-policy`. | +| `cluster.config.clusterConfiguration.controllerManager.extraArgs` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | +| `cluster.config.clusterConfiguration.etcd.local.dataDir` | This parameter specifies the data directory for etcd, the distributed key-value store that Kubernetes uses to persist cluster state. | +| `cluster.config.clusterConfiguration.networking.podSubnet` | The IP subnet range to assign to pods. Default: 192.168.0.0/16 | +| `cluster.config.clusterConfiguration.networking.serviceSubnet` | The IP subnet range to assign to services. Default: 192.169.0.0/16 | +| `cluster.config.clusterConfiguration.scheduler.extraArgs` | This parameter contains extra arguments for the Kubernetes scheduler, such as disabling profiling. | +| `cluster.config.initConfiguration.nodeRegistration.kubeletExtraArgs` | This parameter contains extra arguments for kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | +| ``pack.palette.config.oidc.identityProvider`` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK-E pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-custom-oidc). | + +You can add cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open-source project. For more information, check out the [Cloud Init Stages](../clusters/edge/edge-configuration/cloud-init.md) reference. + +You can also use pack settings described in the [Palette eXtended Kubernetes](kubernetes.md) guide. + + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](kubernetes-edge.md#configure-custom-oidc). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + + +#### Configuration Changes + +The PXK-E Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. + +```yaml +palette: + config: + oidc: + identityProvider: +``` + + +#### Example Kubeadm Configuration File + +```yaml +cluster: + config: | + clusterConfiguration: + apiServer: + extraArgs: + advertise-address: "0.0.0.0" + anonymous-auth: "true" + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + authorization-mode: RBAC,Node + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + disable-admission-plugins: AlwaysAdmit + enable-admission-plugins: AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction + profiling: "false" + secure-port: "6443" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + name: audit-log + pathType: DirectoryOrCreate + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + timeoutForControlPlane: 10m0s + controllerManager: + extraArgs: + feature-gates: RotateKubeletServerCertificate=true + pod-eviction-timeout: 1m0s + profiling: "false" + terminated-pod-gc-threshold: "25" + use-service-account-credentials: "true" + dns: {} + kubernetesVersion: v1.25.2 + etcd: + local: + dataDir: "/etc/kubernetes/etcd" + extraArgs: + listen-client-urls: "https://0.0.0.0:2379" + networking: + podSubnet: 192.168.0.0/16 + serviceSubnet: 192.169.0.0/16 + scheduler: + extraArgs: + profiling: "false" + initConfiguration: + localAPIEndpoint: {} + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: "true" + read-only-port: "0" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + discovery: {} + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: "true" + read-only-port: "0" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + +stages: + initramfs: + - sysctl: + vm.overcommit_memory: 1 + kernel.panic: 10 + kernel.panic_on_oops: 1 + commands: + - "ln -s /etc/kubernetes/admin.conf /run/kubeconfig" + files: + - path: /etc/hosts + permission: "0644" + content: | + 127.0.0.1 localhost + - path: "/etc/kubernetes/audit-policy.yaml" + owner_string: "root" + permission: 0600 + content: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["configmaps"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + - level: None + users: ["cluster-autoscaler"] + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["configmaps", "endpoints"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + pack: + palette: + config: + oidc: + identityProvider: palette + ``` + +### Configure OIDC Identity Provider + +The OIDC IDP feature offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. + +When you add the PXK-E pack to a cluster profile, Palette displays the OIDC IDP options listed below. + +All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +You can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](kubernetes-edge.md#use-rbac-with-oidc). You can also configure OIDC for virtual clusters. For guidance, refer to [Configure OIDC for a Virtual Cluster](../clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md). + + +- **None**: This is the default setting and there is nothing to configure. This setting displays in the YAML file as `noauth`. + + +- **Custom**: This setting allows you to specify a third-party OIDC provider by configuring OIDC statements in the Kubeadm configuration file as described in [Configure Custom OIDC](kubernetes-edge.md#configure-custom-oidc). This setting displays in the YAML file as `none`. + + +- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. + + +- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](../user-management/saml-sso/enable-saml.md) guide. + +:::info + +If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. + +::: + +### Configure Custom OIDC + +Follow these steps to configure a third-party OIDC IDP. + +1. Add the following OIDC parameters to the `apiServer.extraArgs` section of your Kubernetes YAML file when creating a cluster profile. + + +```yaml +cluster: + config: + clusterConfiguration: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" +``` + +2. Add the following `kubeadmconfig.clientConfig` section that contains OIDC parameters to your Kubernetes YAML file. + + +```yaml +kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid +``` + +3. Provide third-party OIDC IDP details. + +### Use RBAC with OIDC + +You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. + +In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. + +![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) + + + + + + + + +## Prerequisites + +- A minimum of 2 CPU and 4GB Memory. + + +## Parameters + +| Parameter | Description | +|----------------------------------------------------------|----------------------------------------------------| +| `cluster.config.clusterConfiguration.apiServer.extraArgs` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port.| +| `cluster.config.clusterConfiguration.apiServer.extraVolumes` | This parameter describes extra volumes for the Kubernetes API server, such as `audit-log` and `audit-policy`. | +| `cluster.config.clusterConfiguration.controllerManager.extraArgs` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | +| `cluster.config.clusterConfiguration.etcd.local.dataDir` | This parameter specifies the data directory for etcd, the distributed key-value store that Kubernetes uses to persist cluster state. | +| `cluster.config.clusterConfiguration.networking.podSubnet` | The IP subnet range to assign to pods. Default: 192.168.0.0/16 | +| `cluster.config.clusterConfiguration.networking.serviceSubnet` | The IP subnet range to assign to services. Default: 192.169.0.0/16 | +| `cluster.config.clusterConfiguration.scheduler.extraArgs` | This parameter contains extra arguments for the Kubernetes scheduler, such as disabling profiling. | +| `cluster.config.initConfiguration.nodeRegistration.kubeletExtraArgs` | This parameter contains extra arguments for kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | +| ``pack.palette.config.oidc.identityProvider`` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK-E pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-custom-oidc). | + +You can add cloud-init stages exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open-source project. For more information, check out the [Cloud Init Stages](../clusters/edge/edge-configuration/cloud-init.md) reference. + +You can also use pack settings described in the [Palette eXtended Kubernetes](kubernetes.md) guide. + + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Manually configure a third-party OIDC IDP. For more information, check out [Configure Custom OIDC](kubernetes-edge.md#configure-custom-oidc). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + + +#### Configuration Changes + +The PXK-E Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. + +```yaml +palette: + config: + dashboard: + identityProvider: +``` + + +#### Example Kubeadm Configuration File + +```yaml +cluster: + config: | + clusterConfiguration: + apiServer: + extraArgs: + advertise-address: "0.0.0.0" + anonymous-auth: "true" + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + authorization-mode: RBAC,Node + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + disable-admission-plugins: AlwaysAdmit + enable-admission-plugins: AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction + profiling: "false" + secure-port: "6443" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + name: audit-log + pathType: DirectoryOrCreate + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + timeoutForControlPlane: 10m0s + controllerManager: + extraArgs: + feature-gates: RotateKubeletServerCertificate=true + pod-eviction-timeout: 1m0s + profiling: "false" + terminated-pod-gc-threshold: "25" + use-service-account-credentials: "true" + dns: {} + kubernetesVersion: v1.24.6 + etcd: + local: + dataDir: "/etc/kubernetes/etcd" + extraArgs: + listen-client-urls: "https://0.0.0.0:2379" + networking: + podSubnet: 192.168.0.0/16 + serviceSubnet: 192.169.0.0/16 + scheduler: + extraArgs: + profiling: "false" + initConfiguration: + localAPIEndpoint: {} + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: "true" + read-only-port: "0" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + discovery: {} + nodeRegistration: + kubeletExtraArgs: + event-qps: "0" + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: "true" + read-only-port: "0" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + +stages: + initramfs: + - sysctl: + vm.overcommit_memory: 1 + kernel.panic: 10 + kernel.panic_on_oops: 1 + commands: + - "ln -s /etc/kubernetes/admin.conf /run/kubeconfig" + files: + - path: /etc/hosts + permission: "0644" + content: | + 127.0.0.1 localhost + - path: "/etc/kubernetes/audit-policy.yaml" + owner_string: "root" + permission: 0600 + content: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["system:unsecured"] + namespaces: ["kube-system"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["configmaps"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + - level: None + users: ["cluster-autoscaler"] + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["configmaps", "endpoints"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + ``` + + +### Configure OIDC Identity Provider + +The OIDC IDP feature offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. + +When you add the PXK-E pack to a cluster profile, Palette displays the OIDC IDP options listed below. + +All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +You can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](kubernetes-edge.md#use-rbac-with-oidc). You can also configure OIDC for virtual clusters. For guidance, refer to [Configure OIDC for a Virtual Cluster](../clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md). + + +- **None**: This is the default setting and there is nothing to configure. This setting displays in the YAML file as `noauth`. + +- **Custom**: This setting allows you to specify a third-party OIDC provider by configuring OIDC statements in the Kubeadm configuration file as described in [Configure Custom OIDC](kubernetes-edge.md#configure-custom-oidc). This setting displays in the YAML file as `none`. + +- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. + +- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](../user-management/saml-sso/enable-saml.md) guide. + +:::info + +If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. + +::: + + +### Configure Custom OIDC + +Follow these steps to configure a third-party OIDC IDP. + +1. Add the following OIDC parameters to the `apiServer.extraArgs` section of your Kubernetes YAML file when creating a cluster profile. + + +```yaml +cluster: + config: + clusterConfiguration: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" +``` + +2. Add the following `kubeadmconfig.clientConfig` section that contains OIDC parameters to your Kubernetes YAML file. + + +```yaml +kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid +``` + +3. Provide third-party OIDC IDP details. + + +### Use RBAC with OIDC + +You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. + +In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. + +![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) + + + + + + + + +:::caution + +All versions less than v1.23.x are considered deprecated. Upgrade to a newer version to take advantage of new features. + +::: + + + + + + +## Terraform + +You can reference Kubernetes in Terraform with the following code snippet. + +
+ +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "edge-k8s" { + name = "edge-k8s" + version = "1.26.4" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +## Resources + +- [Kubernetes](https://kubernetes.io/) + + + +- [Kubernetes Documentation](https://kubernetes.io/docs/concepts/overview/) + + + +- [Image Swap with Palette](../clusters/cluster-management/image-swap.md) diff --git a/docs/docs-content/integrations/kubernetes-generic.md b/docs/docs-content/integrations/kubernetes-generic.md new file mode 100644 index 0000000000..1550eb57b2 --- /dev/null +++ b/docs/docs-content/integrations/kubernetes-generic.md @@ -0,0 +1,985 @@ +--- +sidebar_label: "Kubernetes" +title: "Kubernetes" +description: "Learn about the Kubernetes pack and how you can use it with your host clusters." +hide_table_of_contents: true +type: "integration" +category: ["kubernetes", 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: "https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png" +tags: ["packs", "kubernetes"] +--- + +The Kubernetes pack supports several cloud and data center infrastructure providers. This pack defines the default properties we use to deploy Kubernetes clusters and enables most of the Kubernetes hardening standards that the Center for Internet Security (CIS) recommends. + +We also support managed Kubernetes distributions for Elastic Kubernetes Service (EKS), Azure Kubernetes Service (AKS), Google Kubernetes Engine (GKE), and Tencent Kubernetes Engine (TKE). + +:::info +Review the [Maintenance Policy](maintenance-policy.md) to learn about pack update and deprecation schedules. +::: + +
+ +## Versions Supported + + + + + + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory. + + +- Users or groups mapped to a Kubernetes RBAC role. + + +- Operating System (OS) dependencies as listed in the table. + +| OS Distribution | OS Version | Supports Kubernetes 1.27.x | +|---------------|------------|----------------------------| +| CentOS | 7.7 | ❌ | +| Ubuntu | 22.04 | ✅ | +| Ubuntu | 20.04 | ❌ | +| Ubuntu | 18.04 | ❌ | + + +## Parameters + +| Parameter | Description | +|-----------|-------------| +| `pack.palette.config.oidcidentityProvider`| OIDC identity provider configuration. | +| ``pack.podCIDR`` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| +| ``pack.serviceClusterIpRange`` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| +| `pack.serviceDomain` | The cluster DNS service domain. Default: `cluster.local`. To change the default, you must add this parameter to the Kubernetes YAML file at cluster creation and specify the cluster DNS service domain to use. This value cannot be changed after cluster creation is complete. Refer to the [Change Cluster DNS Service Domain](kubernetes-generic.md?platform=AKS&versions=k8s_v1.27#change-cluster-dns-service-domain) section. | +| ``kubeadmconfig.apiServer.extraArgs`` | A list of additional apiServer flags you can set.| +| ``kubeadmconfig.apiServer.extraVolumes`` | A list of additional volumes to mount on apiServer.| +| ``kubeadmconfig.controllerManager.extraArgs`` | A list of additional ControllerManager flags to set.| +| ``kubeadmconfig.scheduler.extraArgs`` | A list of additional Kube scheduler flags to set.| +| ``kubeadmconfig.kubeletExtraArgs`` | A list of kubelet arguments to set and copy to the nodes.| +| ``kubeadmconfig.files`` | A list of additional files to copy to the nodes.| +| ``kubeadmconfig.preKubeadmCommands`` | A list of additional commands to invoke **before** running kubeadm commands.| +| ``kubeadmconfig.postKubeadmCommands`` | A list of additional commands to invoke **after** running kubeadm commands.| + + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. + + As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](../clusters/clusters.md) guide and [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). + + +- Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to [Change Cluster DNS Service Domain](kubernetes-generic.md?platform=AKS&versions=k8s_v1.27#change-cluster-dns-service-domain). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + +### Change Cluster DNS Service Domain + +The `pack.serviceDomain` parameter with default value `cluster.local` is not visible in the Kubernetes YAML file, and its value can only be changed at cluster creation. To change the value, you must add `serviceDomain: "cluster.local"` to the Kubernetes YAML file when you create a cluster, and specify the service domain you want to use. + +```yaml hideClipboard +pack: + k8sHardening: True + podCIDR: "172.16.0.0/16" + serviceClusterIPRange: "10.96.0.0/12" + serviceDomain: "" +``` + +:::caution + +You can only specify the service domain at cluster creation. After cluster creation completes, you cannot update the value. Attempting to update it results in the error `serviceDomain update is forbidden for existing cluster`. + +::: + +For more information about networking configuration with DNS domains, refer to the Kubernetes [Networking](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-Networking) API documentation. + +### Configuration Changes + +The Kubeadm config is updated with hardening improvements that do the following: + +- Meet CIS standards for operating systems (OS). + +- Enable a Kubernetes audit policy in the pack. The audit policy is hidden, and you cannot customize the default audit policy. If you want to apply your custom audit policy, refer to the [Enable Audit Logging](../audit-logs/kube-api-audit-logging.md) guide to learn how to create your custom audit policy by adjusting API server flags. + +- Replace a deprecated PodSecurityPolicy (PSP) with one that offers three built-in policy profiles for broad security coverage: + + - **Privileged**: An unrestricted policy that provides wide permission levels and allows for known privilege escalations. + + - **Baseline**: A policy that offers minimal restrictions and prevents known privilege escalations. As shown in the example below, you can override the default cluster-wide policy to set baseline enforcement by enabling the `PodSecurity` Admission plugin in the `enable-admission-plugins` section of the YAML file. You can then add a custom Admission configuration and set the `admission-control-config-file` flag to the custom Admission. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" + admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + ``` + + - **Restricted**: A heavily restricted policy that follows Pod hardening best practices. This policy is set to warn and audit and identifies Pods that require privileged access. + + You can enforce these policies at the cluster level or the Namespace level. For workloads that require privileged access, you can relax `PodSecurity` enforcement by adding these labels in the Namespace: + + ```yaml + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/enforce-version: v1.26 + ``` + + +### Kubeadm Configuration File + +The default pack YAML contains minimal configurations offered by the managed provider. + + +### Configure OIDC Identity Provider + +You can configure an OpenID Connect (OIDC) identity provider to authenticate users and groups in your cluster. OIDC is an authentication layer on top of OAuth 2.0, an authorization framework that allows users to authenticate to a cluster without using a password. + +OIDC requires a *RoleBinding* for the users or groups you want to provide cluster access. You must create a RoleBinding to a Kubernetes role that is available in the cluster. The Kubernetes role can be a custom role you created or a [default Kubernetes role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles), such as the `cluster-admin` role. To learn how to create a RoleBinding through Palette, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + + +#### Configure Custom OIDC + +The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory). + + + + + + +Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. + + +1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. Replace the `identityProvider` value with your OIDC provider name. + + ```yaml + pack: + palette: + config: + oidc: + identityProvider: yourIdentityProviderNameHere + ``` + +2. Add the following `kubeadmconfig` parameters. Replace the values with your OIDC provider values. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" + ``` + +3. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. + + ```yaml + kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid + ``` + + + + + + + +Follow these steps to configure OIDC for managed EKS clusters. + +
+ +1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. + +```yaml +oidcIdentityProvider: + identityProviderConfigName: 'Spectro-docs' + issuerUrl: 'issuer-url' + clientId: 'user-client-id-from-Palette' + usernameClaim: "email" + usernamePrefix: "-" + groupsClaim: "groups" + groupsPrefix: "" + requiredClaims: +``` + +2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. + +```yaml +clientConfig: + oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" + oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" + oidc-client-secret: yourSecretKeyHere + oidc-extra-scope: profile,email +``` + +3. Provide third-party OIDC IDP details. + +
+
+ + +
+ + + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory. + + +- Users or groups mapped to a Kubernetes RBAC role. + + +- Operating System (OS) dependencies as listed in the table. + +| OS Distribution | OS Version | Supports Kubernetes 1.26.x | +|---------------|------------|----------------------------| +| CentOS | 7.7 | ✅ | +| Ubuntu | 22.04 | ✅ | +| Ubuntu | 20.04 | ❌ | +| Ubuntu | 18.04 | ❌ | + + +## Parameters + +| Parameter | Description | +|-----------|-------------| +| `pack.palette.config.oidcidentityProvider`| OIDC identity provider configuration. | +| ``pack.podCIDR`` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| +| ``pack.serviceClusterIpRange`` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| +| `pack.serviceDomain` | The cluster DNS service domain. Default: `cluster.local`. To change the default, you must add this parameter to the Kubernetes YAML file at cluster creation and specify the cluster DNS service domain to use. This value cannot be changed after cluster creation is complete. Refer to the [Change Cluster DNS Service Domain](kubernetes-generic.md?platform=AKS&versions=k8s_v1.26#change-cluster-dns-service-domain-1) section. | +| ``kubeadmconfig.apiServer.extraArgs`` | A list of additional apiServer flags you can set.| +| ``kubeadmconfig.apiServer.extraVolumes`` | A list of additional volumes to mount on apiServer.| +| ``kubeadmconfig.controllerManager.extraArgs`` | A list of additional ControllerManager flags to set.| +| ``kubeadmconfig.scheduler.extraArgs`` | A list of additional Kube scheduler flags to set.| +| ``kubeadmconfig.kubeletExtraArgs`` | A list of kubelet arguments to set and copy to the nodes.| +| ``kubeadmconfig.files`` | A list of additional files to copy to the nodes.| +| ``kubeadmconfig.preKubeadmCommands`` | A list of additional commands to invoke **before** running kubeadm commands.| +| ``kubeadmconfig.postKubeadmCommands`` | A list of additional commands to invoke **after** running kubeadm commands.| + + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. + + As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](../clusters/clusters.md) guide and [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). + +- Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to [Change Cluster DNS Service Domain](kubernetes-generic.md?platform=AKS&versions=k8s_v1.26#change-cluster-dns-service-domain-1). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + +### Change Cluster DNS Service Domain + +The `pack.serviceDomain` parameter with default value `cluster.local` is not visible in the Kubernetes YAML file, and its value can only be changed at cluster creation. To change the value, you must add `serviceDomain: "cluster.local"` to the Kubernetes YAML file when you create a cluster, and specify the service domain you want to use. + +```yaml +pack: + k8sHardening: True + podCIDR: "172.16.0.0/16" + serviceClusterIPRange: "10.96.0.0/12" + serviceDomain: "" +``` + +:::caution + +You can only specify the service domain at cluster creation. After cluster creation completes, you cannot update the value. Attempting to update it results in the error `serviceDomain update is forbidden for existing cluster`. + +::: + +For more information about networking configuration with DNS domains, refer to the Kubernetes [Networking](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-Networking) API documentation. + + +### Configuration Changes + +The Kubeadm config is updated with hardening improvements that do the following: + +- Meet CIS standards for operating systems (OS). + +- Enable a Kubernetes audit policy in the pack. The audit policy is hidden, and you cannot customize the default audit policy. If you want to apply your custom audit policy, refer to the [Enable Audit Logging](../audit-logs/kube-api-audit-logging.md) guide to learn how to create your custom audit policy by adjusting API server flags. + +- Replace a deprecated PodSecurityPolicy (PSP) with one that offers three built-in policy profiles for broad security coverage: + + - **Privileged**: An unrestricted policy that provides wide permission levels and allows for known privilege escalations. + + - **Baseline**: A policy that offers minimal restrictions and prevents known privilege escalations. As shown in the example below, you can override the default cluster-wide policy to set baseline enforcement by enabling the `PodSecurity` Admission plugin in the `enable-admission-plugins` section of the YAML file. You can then add a custom Admission configuration and set the `admission-control-config-file` flag to the custom Admission. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" + admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + ``` + + - **Restricted**: A heavily restricted policy that follows Pod hardening best practices. This policy is set to warn and audit and identifies Pods that require privileged access. + + You can enforce these policies at the cluster level or the Namespace level. For workloads that require privileged access, you can relax `PodSecurity` enforcement by adding these labels in the Namespace: + + ```yaml + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/enforce-version: v1.26 + ``` + +### Kubeadm Configuration File + +The default pack YAML contains minimal configurations offered by the managed provider. + + +### Configure OIDC Identity Provider + +You can configure an OpenID Connect (OIDC) identity provider to authenticate users and groups in your cluster. OIDC is an authentication layer on top of OAuth 2.0, an authorization framework that allows users to authenticate to a cluster without using a password. + +OIDC requires a *RoleBinding* for the users or groups you want to provide cluster access. You must create a RoleBinding to a Kubernetes role that is available in the cluster. The Kubernetes role can be a custom role you created or a [default Kubernetes role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles), such as the `cluster-admin` role. To learn how to create a RoleBinding through Palette, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +#### Configure Custom OIDC + +The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory). + + + + + + +Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. + +1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. Replace the `identityProvider` value with your OIDC provider name. + +
+ + ```yaml + pack: + palette: + config: + oidc: + identityProvider: yourIdentityProviderNameHere + ``` + +2. Add the following `kubeadmconfig` parameters. Replace the values with your OIDC provider values. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" + ``` + +3. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. + + ```yaml + kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid + ``` + + +
+ + + + +Follow these steps to configure OIDC for managed EKS clusters. + +1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. + +```yaml +oidcIdentityProvider: + identityProviderConfigName: 'Spectro-docs' + issuerUrl: 'issuer-url' + clientId: 'user-client-id-from-Palette' + usernameClaim: "email" + usernamePrefix: "-" + groupsClaim: "groups" + groupsPrefix: "" + requiredClaims: +``` + +2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. + +```yaml +clientConfig: + oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" + oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" + oidc-client-secret: yourSecretKeyHere + oidc-extra-scope: profile,email +``` + +3. Provide third-party OIDC IDP details. + + +
+ + +
+ + + + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory. + +- Operating System (OS) dependencies as listed in the table. + +| OS Distribution | OS Version | Supports Kubernetes 1.25.x | +|---------------|------------|----------------------------| +| CentOS | 7.7 | ✅ | +| Ubuntu | 22.04 | ✅ | +| Ubuntu | 20.04 | ❌ | +| Ubuntu | 18.04 | ❌ | + + +## Parameters + +| Parameter | Description | +|-----------|-------------| +| `pack.palette.config.oidcidentityProvider`| OIDC identity provider configuration. | +| ``pack.podCIDR`` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| +| ``pack.serviceClusterIpRange`` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| +| `pack.serviceDomain` | The cluster DNS service domain. Default: `cluster.local`. To change the default, you must add this parameter to the Kubernetes YAML file at cluster creation and specify the cluster DNS service domain to use. This value cannot be changed after cluster creation is complete. Refer to the [Change Cluster DNS Service Domain](kubernetes-generic.md?versions=k8s_v1.25#change-cluster-dns-service-domain-2) section. | +| ``kubeadmconfig.apiServer.extraArgs`` | A list of additional apiServer flags you can set.| +| ``kubeadmconfig.apiServer.extraVolumes`` | A list of additional volumes to mount on apiServer.| +| ``kubeadmconfig.controllerManager.extraArgs`` | A list of additional ControllerManager flags to set.| +| ``kubeadmconfig.scheduler.extraArgs`` | A list of additional Kube scheduler flags to set.| +| ``kubeadmconfig.kubeletExtraArgs`` | A list of kubelet arguments to set and copy to the nodes.| +| ``kubeadmconfig.files`` | A list of additional files to copy to the nodes.| +| ``kubeadmconfig.preKubeadmCommands`` | A list of additional commands to invoke **before** running kubeadm commands.| +| ``kubeadmconfig.postKubeadmCommands`` | A list of additional commands to invoke **after** running kubeadm commands.| + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. + + As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](../clusters/clusters.md) guide and [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). + +- Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to [Change Cluster DNS Service Domain](kubernetes-generic.md?versions=k8s_v1.25#change-cluster-dns-service-domain-2). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + +### Change Cluster DNS Service Domain + +The `pack.serviceDomain` parameter with default value `cluster.local` is not visible in the Kubernetes YAML file, and its value can only be changed at cluster creation. To change the value, you must add `serviceDomain: "cluster.local"` to the Kubernetes YAML file when you create a cluster, and specify the service domain you want to use. + +```yaml +pack: + k8sHardening: True + podCIDR: "172.16.0.0/16" + serviceClusterIPRange: "10.96.0.0/12" + serviceDomain: "" +``` + +:::caution + +You can only specify the service domain at cluster creation. After cluster creation completes, you cannot update the value. Attempting to update it results in the error `serviceDomain update is forbidden for existing cluster`. + +::: + +For more information about networking configuration with DNS domains, refer to the Kubernetes [Networking](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-Networking) API documentation. + +### Configuration Changes + +The Kubeadm config is updated with hardening improvements that do the following: + +- Meet CIS standards for operating systems (OS). + + +- Enable a Kubernetes audit policy in the pack that you can customize by adjusting API server flags. + + +- Replace a deprecated PodSecurityPolicy (PSP) with one that offers three built-in policy profiles for broad security coverage: + + - **Privileged**: An unrestricted policy that provides wide permission levels and allows for known privilege escalations. + + - **Baseline**: A policy that offers minimal restrictions and prevents known privilege escalations. As shown in the example below, you can override the default cluster-wide policy to set baseline enforcement by enabling the `PodSecurity` Admission plugin in the `enable-admission-plugins` section of the YAML file. You can then add a custom Admission configuration and set the `admission-control-config-file` flag to the custom Admission. + + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" + admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + ``` + + - **Restricted**: A heavily restricted policy that follows Pod hardening best practices. This policy is set to warn and audit and identifies Pods that require privileged access. + + You can enforce these policies at the cluster level or the Namespace level. For workloads that require privileged access, you can relax `PodSecurity` enforcement by adding these labels in the Namespace: + + ```yaml + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/enforce-version: v1.25 + ``` + +### Kubeadm Configuration File + +The default pack YAML contains minimal configurations offered by the managed provider. + + +### Configure OIDC Identity Provider + +You can configure an OpenID Connect (OIDC) identity provider to authenticate users and groups in your cluster. OIDC is an authentication layer on top of OAuth 2.0, an authorization framework that allows users to authenticate to a cluster without using a password. + +OIDC requires a *RoleBinding* for the users or groups you want to provide cluster access. You must create a RoleBinding to a Kubernetes role that is available in the cluster. The Kubernetes role can be a custom role you created or a [default Kubernetes role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles), such as the `cluster-admin` role. To learn how to create a RoleBinding through Palette, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + + +#### Configure Custom OIDC + +The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory). + + + + + + +Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. + +1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. Replace the `identityProvider` value with your OIDC provider name. + + ```yaml + pack: + palette: + config: + oidc: + identityProvider: palette + ``` + + +2. Add the following `kubeadmconfig` parameters. Replace the values with your OIDC provider values. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" + ``` + +3. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. + + ```yaml + kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid + ``` + + + + + + + +Follow these steps to configure OIDC for managed EKS clusters. + +
+ +1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. + +
+ + ```yaml + oidcIdentityProvider: + identityProviderConfigName: 'Spectro-docs' + issuerUrl: 'issuer-url' + clientId: 'user-client-id-from-Palette' + usernameClaim: "email" + usernamePrefix: "-" + groupsClaim: "groups" + groupsPrefix: "" + requiredClaims: + ``` + +2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. + +
+ + ```yaml + clientConfig: + oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" + oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" + oidc-client-secret: yourSecretKeyHere + oidc-extra-scope: profile,email + ``` + +3. Provide third-party OIDC IDP details. + +
+
+ +
+ + + + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory. + + +- Operating System (OS) dependencies as listed in the table. + +| OS Distribution | OS Version | Supports Kubernetes 1.24.x | +|---------------|------------|----------------------------| +| CentOS | 7.7 | ✅ | +| Ubuntu | 22.04 | ❌ | +| Ubuntu | 20.04 | ✅ | +| Ubuntu | 18.04 | ❌ | + + +## Parameters + +| Parameter | Description | +|-----------|-------------| +| `pack.palette.config.oidcidentityProvider`| OIDC identity provider configuration. | +| `pack.podCIDR` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| +| `pack.serviceClusterIpRange` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| +| `pack.serviceDomain` | The cluster DNS service domain. Default: `cluster.local`. To change the default, you must add this parameter to the Kubernetes YAML file at cluster creation and specify the cluster DNS service domain to use. This value cannot be changed after cluster creation is complete. Refer to the [Change Cluster DNS Service Domain](kubernetes-generic.md?versions=k8s_v1.24#change-cluster-dns-service-domain-3) section. | +| `kubeadmconfig.apiServer.extraArgs` | A list of additional apiServer flags you can set.| +| `kubeadmconfig.apiServer.extraVolumes` | A list of additional volumes to mount on apiServer.| +| `kubeadmconfig.controllerManager.extraArgs` | A list of additional ControllerManager flags to set.| +| `kubeadmconfig.scheduler.extraArgs` | A list of additional Kube scheduler flags to set.| +| `kubeadmconfig.kubeletExtraArgs` | A list of kubelet arguments to set and copy to the nodes.| +| `kubeadmconfig.files` | A list of additional files to copy to the nodes. | +| `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands.| +| `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands.| + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. + + As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](../clusters/clusters.md) guide and [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). + +- Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to [Change Cluster DNS Service Domain](kubernetes-generic.md?versions=k8s_v1.24#change-cluster-dns-service-domain-3). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + +### Change Cluster DNS Service Domain + +The `pack.serviceDomain` parameter with default value `cluster.local` is not visible in the Kubernetes YAML file, and its value can only be changed at cluster creation. To change the value, you must add `serviceDomain: "cluster.local"` to the Kubernetes YAML file when you create a cluster, and specify the service domain you want to use. + +```yaml +pack: + k8sHardening: True + podCIDR: "172.16.0.0/16" + serviceClusterIPRange: "10.96.0.0/12" + serviceDomain: "" +``` + +:::caution + +You can only specify the service domain at cluster creation. After cluster creation completes, you cannot update the value. Attempting to update it results in the error `serviceDomain update is forbidden for existing cluster`. + +::: + +For more information about networking configuration with DNS domains, refer to the Kubernetes [Networking](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-Networking) API documentation. + +
+ +### Configuration Changes + +The Kubeadm config is updated with hardening improvements that do the following: + +- Meet CIS standards for operating systems (OS). + + +- Enable a Kubernetes audit policy in the pack that you can customize by adjusting API server flags. + + +- Replace a deprecated PodSecurityPolicy (PSP) with one that offers three built-in policy profiles for broad security coverage: + + - **Privileged**: An unrestricted policy that provides wide permission levels and allows for known privilege escalations. + + - **Baseline**: A policy that offers minimal restrictions and prevents known privilege escalations. As shown in the example below, you can override the default cluster-wide policy to set baseline enforcement by enabling the `PodSecurity` Admission plugin in the `enable-admission-plugins` section of the YAML file. You can then add a custom Admission configuration and set the `admission-control-config-file` flag to the custom Admission. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" + admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + ``` + + - **Restricted**: A heavily restricted policy that follows Pod hardening best practices. This policy is set to warn and audit and identifies Pods that require privileged access. + + You can enforce these policies at the cluster level or the Namespace level. For workloads that require privileged access, you can relax `PodSecurity` enforcement by adding these labels in the Namespace: + + ```yaml + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/enforce-version: v1.24 + ``` + + +### Kubeadm Configuration File + +The default pack YAML contains minimal configurations offered by the managed provider. + + +### Configure OIDC Identity Provider + +You can configure an OpenID Connect (OIDC) identity provider to authenticate users and groups in your cluster. OIDC is an authentication layer on top of OAuth 2.0, an authorization framework that allows users to authenticate to a cluster without using a password. + +OIDC requires a *RoleBinding* for the users or groups you want to provide cluster access. You must create a RoleBinding to a Kubernetes role that is available in the cluster. The Kubernetes role can be a custom role you created or a [default Kubernetes role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles), such as the `cluster-admin` role. To learn how to create a RoleBinding through Palette, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + + +#### Configure Custom OIDC + +The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory). + + + + + + +Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. + + +1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. Replace the `identityProvider` value with your OIDC provider name. + + ```yaml + pack: + palette: + config: + oidc: + identityProvider: palette + ``` + + +2. Add the following `kubeadmconfig` parameters. Replace the values with your OIDC provider values. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" + ``` + +3. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. + + ```yaml + kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid + ``` + + + + + + + +Follow these steps to configure OIDC for managed EKS clusters. + +
+ +1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. + +
+ + ```yaml + oidcIdentityProvider: + identityProviderConfigName: 'Spectro-docs' + issuerUrl: 'issuer-url' + clientId: 'user-client-id-from-Palette' + usernameClaim: "email" + usernamePrefix: "-" + groupsClaim: "groups" + groupsPrefix: "" + requiredClaims: + ``` + +2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. + +
+ + ```yaml + clientConfig: + oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" + oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" + oidc-client-secret: yourSecretKeyHere + oidc-extra-scope: profile,email + ``` + +3. Provide third-party OIDC IDP details. + +
+
+ + +
+ + + +:::caution + +All versions less than v1.23.x are considered deprecated. Upgrade to a newer version to take advantage of new features. + +::: + +
+ +
+
+ + +## Terraform + + +You can reference Kubernetes in Terraform with the following code snippet. + + + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "k8s" { + name = "kubernetes-aks" + version = "1.26" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "k8s" { + name = "kubernetes-eks" + version = "1.24" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "k8s" { + name = "kubernetes-gke" + version = "1.25.8" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "k8s" { + name = "kubernetes-tke" + version = "1.24.4" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + + +## Resources + +- [Kubernetes](https://kubernetes.io/) + + + +- [Kubernetes Documentation](https://kubernetes.io/docs/concepts/overview/) + + + +- [Image Swap with Palette](../clusters/cluster-management/image-swap.md) diff --git a/docs/docs-content/integrations/kubernetes.md b/docs/docs-content/integrations/kubernetes.md new file mode 100644 index 0000000000..ca022fbec8 --- /dev/null +++ b/docs/docs-content/integrations/kubernetes.md @@ -0,0 +1,1287 @@ +--- +sidebar_label: "Palette eXtended Kubernetes" +title: "Palette eXtended Kubernetes" +description: "Learn about the Palette eXtended Kubernetes pack and how you can use it with your host clusters." +hide_table_of_contents: true +type: "integration" +category: ["kubernetes", 'amd64', 'fips'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: "https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png" +tags: ["packs", "kubernetes", "pxk"] +--- + +The Palette eXtended Kubernetes (PXK) pack supports several [cloud and data center infrastructure providers](/clusters). This pack defines the default properties we use to deploy Kubernetes clusters and enables most of the Kubernetes hardening standards that the Center for Internet Security (CIS) recommends. + +We also support managed Kubernetes distributions for Elastic Kubernetes Service (EKS), Azure Kubernetes Service (AKS), Google Kubernetes Engine (GKE), and Tencent Kubernetes Engine (TKE). + +We offer PXK as a core pack in Palette. + +Review our [Maintenance Policy](maintenance-policy.md) to learn about pack update and deprecation schedules. + +## What is PXK? + +Palette eXtended Kubernetes (PXK) is a customized version of the open-source Cloud Native Computing Foundation (CNCF) distribution of Kubernetes. This Kubernetes version can be deployed through Palette to all major infrastructure providers, public cloud providers, and private data center providers. This is the default distribution when deploying a Kubernetes cluster through Palette. You have the option to choose other Kubernetes distributions, such as MicroK8s, Konvoy, and more, should you want to consume a different Kubernetes distribution. + +PXK is different from the upstream open-source Kubernetes version primarily because of the carefully reviewed and applied hardening of the operating system (OS) and Kubernetes. The hardening ranges from removing unused kernel modules to using an OS configuration that follows industry best practices. Our custom Kubernetes configuration addresses common Kubernetes deployment security pitfalls and implements industry best practices. + +A benefit of Palette when used with PXK is the ability to apply different flavors of container storage interface (CSI) plugins and container network interface (CNI) plugins. Other open-source Kubernetes distributions, such as MicroK8s, RKE2, and K3s, come with a default CSI and CNI. Additional complexity and overhead are required from you to enable different interfaces. PXK supports the ability to select other interface plugins out of the box without any additional overhead or complexity needed from your side. + +There are no changes to the Kubernetes source code and we also follow the same versioning schema as the upstream open-source Kubernetes distribution. + +:::info + +We also offer Palette eXtended Kubernetes Edge (PXK-E) for Edge deployments. Refer to the [PXK-E glossary definition](../glossary-all.md#palette-extended-kubernetes-edge-pxk-e) to learn more about PXK-E. + +::: + +## Versions Supported + + + + + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory. + +- Operating System (OS) dependencies as listed in the table. + +| OS Distribution | OS Version | Supports Kubernetes 1.27.x | +|---------------|------------|----------------------------| +| CentOS | 7.7 | ❌ | +| Ubuntu | 22.04 | ✅ | +| Ubuntu | 20.04 | ❌ | +| Ubuntu | 18.04 | ❌ | + + +## Parameters + +| Parameter | Description | +|-----------|-------------| +| `pack.podCIDR` | The CIDR range for Pods in the cluster. This should match the networking layer property. Default: `192.168.0.0/16`| +| `pack.serviceClusterIpRange` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| +| `pack.serviceDomain` | The cluster DNS service domain. Default: `cluster.local`. To change the default, you must add this parameter to the Kubernetes YAML file at cluster creation and specify the cluster DNS service domain to use. This value cannot be changed after cluster creation is complete. Refer to the [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.27#change-cluster-dns-service-domain) section. | +| `pack.palette.config.dashboard.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](kubernetes.md#configure-oidc-identity-provider). | +| `kubeadmconfig.apiServer.extraArgs` | A list of additional apiServer flags you can set.| +| `kubeadmconfig.apiServer.extraVolumes` | A list of additional volumes to mount on the apiServer.| +| `kubeadmconfig.controllerManager.extraArgs` | A list of additional ControllerManager flags to set.| +| `kubeadmconfig.scheduler.extraArgs` | A list of additional Kube scheduler flags to set.| +| `kubeadmconfig.kubeletExtraArgs` | A list of kubelet arguments to set and copy to the nodes.| +| `kubeadmconfig.files` | A list of additional files to copy to the nodes.| +| `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands.| +| `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands.| +| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](#configure-custom-oidc). | +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. + + As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](../clusters/clusters.md) guide and [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). + +- Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.27#change-cluster-dns-service-domain). + +- Manually configure a third-party OpenID Connect (OIDC) Identity Provider (IDP). For more information, check out [Configure Custom OIDC](#configure-custom-oidc). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + +### Change Cluster DNS Service Domain + +The `pack.serviceDomain` parameter with default value `cluster.local` is not visible in the Kubernetes YAML file, and its value can only be changed at cluster creation. To change the value, you must add `serviceDomain: "cluster.local"` to the Kubernetes YAML file when you create a cluster, and specify the service domain you want to use. + +```yaml hideClipboard +pack: + k8sHardening: True + podCIDR: "172.16.0.0/16" + serviceClusterIPRange: "10.96.0.0/12" + serviceDomain: "" +``` + +:::caution + +You can only specify the service domain at cluster creation. After cluster creation completes, you cannot update the value. Attempting to update it results in the error `serviceDomain update is forbidden for existing cluster`. + +::: + +For more information about networking configuration with DNS domains, refer to the Kubernetes [Networking](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-Networking) API documentation. + +### Configuration Changes + +The PXK Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. + +```yaml +palette: + config: + dashboard: + identityProvider: +``` + + +### Example Kubeadm Configuration File + +```yaml hideClipboard +pack: + k8sHardening: True + podCIDR: "192.168.0.0/16" + serviceClusterIpRange: "10.96.0.0/12" + palette: + config: + dashboard: + identityProvider: palette +kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" + admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + - name: pod-security-standard + hostPath: /etc/kubernetes/pod-security-standard.yaml + mountPath: /etc/kubernetes/pod-security-standard.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + pod-eviction-timeout: "1m0s" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + kubeletExtraArgs: + read-only-port: "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + - targetPath: /etc/kubernetes/pod-security-standard.yaml + targetOwner: "root:root" + targetPermissions: "0600" + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1 + kind: PodSecurityConfiguration + defaults: + enforce: "baseline" + enforce-version: "v1.26" + audit: "baseline" + audit-version: "v1.26" + warn: "restricted" + warn-version: "v1.26" + audit: "restricted" + audit-version: "v1.26" + exemptions: + # Array of authenticated usernames to exempt. + usernames: [] + # Array of runtime class names to exempt. + runtimeClasses: [] + # Array of namespaces to exempt. + namespaces: [kube-system] + + preKubeadmCommands: + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + postKubeadmCommands: + - 'echo "List of post kubeadm commands to be executed"' + + # Client configuration to add OIDC based authentication flags in kubeconfig + #clientConfig: + #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" + #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" + #oidc-client-secret: yourSecretClientSecretGoesHere + #oidc-extra-scope: profile,email +``` + + +### Configure OIDC Identity Provider + +Platforms that use PXK can use the OIDC IDP feature, which offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. + +When you add the PXK pack to a cluster profile, Palette displays the OIDC IDP options listed below. + +All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](/clusters/cluster-management/cluster-rbac#create-role-bindings). You can also configure OIDC for virtual clusters. For guidance, refer to [Configure OIDC for a Virtual Cluster](../clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md). + +- **None**: This setting does not require OIDC configuration for the cluster. It displays in the YAML file as `noauth`. + + :::caution + + We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that rely on OIDC. + + ::: + + +- **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in [Configure Custom OIDC](#configurecustomoidc). This setting displays in the YAML file as `none`. + +- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. + +- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](../user-management/saml-sso/saml-sso.md) guide. + + :::info + + If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. + + ::: + + +### Configure Custom OIDC + +The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory). + + + + + + +Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. + +1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" + ``` + +2. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. + + ```yaml + kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid + ``` + + + + + + + +Follow these steps to configure OIDC for managed EKS clusters. + +1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. + +```yaml +oidcIdentityProvider: + identityProviderConfigName: 'Spectro-docs' + issuerUrl: 'issuer-url' + clientId: 'user-client-id-from-Palette' + usernameClaim: "email" + usernamePrefix: "-" + groupsClaim: "groups" + groupsPrefix: "" + requiredClaims: +``` + +2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. + +```yaml +clientConfig: + oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" + oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" + oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv + oidc-extra-scope: profile,email +``` + +3. Provide third-party OIDC IDP details. + + + + + + +### Use RBAC with OIDC + +You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. + +In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. + +![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) + + + + + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory. + +- Operating System (OS) dependencies as listed in the table. + +| OS Distribution | OS Version | Supports Kubernetes 1.26.x | +|---------------|------------|----------------------------| +| CentOS | 7.7 | ✅ | +| Ubuntu | 22.04 | ✅ | +| Ubuntu | 20.04 | ❌ | +| Ubuntu | 18.04 | ❌ | + + +## Parameters + +| Parameter | Description | +|-----------|-------------| +| `pack.podCIDR` | The CIDR range for Pods in the cluster. This should match the networking layer property. Default: `192.168.0.0/16`| +| `pack.serviceClusterIpRange` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| +| `pack.serviceDomain` | The cluster DNS service domain. Default: `cluster.local`. To change the default, you must add this parameter to the Kubernetes YAML file at cluster creation and specify the cluster DNS service domain to use. This value cannot be changed after cluster creation is complete. Refer to the [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.26#change-cluster-dns-service-domain-1) section. | +| `pack.palette.config.dashboard.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](kubernetes.md#configure-oidc-identity-provider). | +| `kubeadmconfig.apiServer.extraArgs` | A list of additional apiServer flags you can set.| +| `kubeadmconfig.apiServer.extraVolumes` | A list of additional volumes to mount on the apiServer.| +| `kubeadmconfig.controllerManager.extraArgs` | A list of additional ControllerManager flags to set.| +| `kubeadmconfig.scheduler.extraArgs` | A list of additional Kube scheduler flags to set.| +| `kubeadmconfig.kubeletExtraArgs` | A list of kubelet arguments to set and copy to the nodes.| +| `kubeadmconfig.files` | A list of additional files to copy to the nodes.| +| `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands.| +| `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands.| +| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](#configurecustomoidc). | + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. + + As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](../clusters/clusters.md) guide and [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). + +- Manually configure a third-party OpenID Connect (OIDC) Identity Provider (IDP). For more information, check out [Configure Custom OIDC](#configurecustomoidc). + +- Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.26#change-cluster-dns-service-domain-1). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + +### Change Cluster DNS Service Domain + +The `pack.serviceDomain` parameter with default value `cluster.local` is not visible in the Kubernetes YAML file, and its value can only be changed at cluster creation. To change the value, you must add `serviceDomain: "cluster.local"` to the Kubernetes YAML file when you create a cluster, and specify the service domain you want to use. + +```yaml +pack: + k8sHardening: True + podCIDR: "172.16.0.0/16" + serviceClusterIPRange: "10.96.0.0/12" + serviceDomain: "" +``` + +:::caution + +You can only specify the service domain at cluster creation. After cluster creation completes, you cannot update the value. Attempting to update it results in the error `serviceDomain update is forbidden for existing cluster`. + +::: + +For more information about networking configuration with DNS domains, refer to the Kubernetes [Networking](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-Networking) API documentation. + + +### Configuration Changes + +The PXK Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. + + +```yaml +palette: + config: + dashboard: + identityProvider: +``` + +
+ +### Example Kubeadm Configuration File + +```yaml +pack: + k8sHardening: True + podCIDR: "192.168.0.0/16" + serviceClusterIpRange: "10.96.0.0/12" + palette: + config: + dashboard: + identityProvider: palette +kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" + admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + - name: pod-security-standard + hostPath: /etc/kubernetes/pod-security-standard.yaml + mountPath: /etc/kubernetes/pod-security-standard.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + pod-eviction-timeout: "1m0s" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + kubeletExtraArgs: + read-only-port: "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + - targetPath: /etc/kubernetes/pod-security-standard.yaml + targetOwner: "root:root" + targetPermissions: "0600" + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1 + kind: PodSecurityConfiguration + defaults: + enforce: "baseline" + enforce-version: "v1.26" + audit: "baseline" + audit-version: "v1.26" + warn: "restricted" + warn-version: "v1.26" + audit: "restricted" + audit-version: "v1.26" + exemptions: + # Array of authenticated usernames to exempt. + usernames: [] + # Array of runtime class names to exempt. + runtimeClasses: [] + # Array of namespaces to exempt. + namespaces: [kube-system] + + preKubeadmCommands: + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + postKubeadmCommands: + - 'echo "List of post kubeadm commands to be executed"' + + # Client configuration to add OIDC based authentication flags in kubeconfig + #clientConfig: + #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" + #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" + #oidc-client-secret: yourSecretClientSecretGoesHere + #oidc-extra-scope: profile,email +``` + + +### Configure OIDC Identity Provider + +Platforms that use PXK can use the OIDC IDP feature, which offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. + +When you add the PXK pack to a cluster profile, Palette displays the OIDC IDP options listed below. + +All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). You can also configure OIDC for virtual clusters. For guidance, refer to [Configure OIDC for a Virtual Cluster](../clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md). + + + +- **None**: This setting does not require OIDC configuration for the cluster. It displays in the YAML file as `noauth`. + + :::caution + + We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that rely on OIDC. + + ::: + + +- **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in [Configure Custom OIDC](#configurecustomoidc). This setting displays in the YAML file as `none`. + +- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. + +- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](../user-management/saml-sso/saml-sso.md) guide. + + :::info + + If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. + + ::: + + +### Configure Custom OIDC + +The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory). + + + + + + +Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. + +1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" + ``` + +2. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. + + ```yaml + ubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid + ``` + + + + + + + +Follow these steps to configure OIDC for managed EKS clusters. + +1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. + + ```yaml hideClipboard + oidcIdentityProvider: + identityProviderConfigName: 'Spectro-docs' + issuerUrl: 'issuer-url' + clientId: 'user-client-id-from-Palette' + usernameClaim: "email" + usernamePrefix: "-" + groupsClaim: "groups" + groupsPrefix: "" + requiredClaims: + ``` + +2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. + + ```yaml hideClipboard + clientConfig: + oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" + oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" + oidc-client-secret: yourSecretClientSecretGoesHere + oidc-extra-scope: profile,email + ``` + +3. Provide third-party OIDC IDP details. + + + + + + +### Use RBAC with OIDC + +You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. + +In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. + +![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) + + +
+ + + + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory. + +- Operating System (OS) dependencies as listed in the table. + +| OS Distribution | OS Version | Supports Kubernetes 1.25.x | +|---------------|------------|----------------------------| +| CentOS | 7.7 | ✅ | +| Ubuntu | 22.04 | ✅ | +| Ubuntu | 20.04 | ❌ | +| Ubuntu | 18.04 | ❌ | + + +## Parameters + +| Parameter | Description | +|-----------|-------------| +| `pack.podCIDR` | The CIDR range for Pods in the cluster. This should match the networking layer property. Default: `192.168.0.0/16`| +| `pack.serviceClusterIpRange` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| +| `pack.serviceDomain` | The cluster DNS service domain. Default: `cluster.local`. To change the default, you must add this parameter to the Kubernetes YAML file at cluster creation and specify the cluster DNS service domain to use. This value cannot be changed after cluster creation is complete. Refer to the [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.25#change-cluster-dns-service-domain) section. | +| `pack.palette.config.dashboard.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](kubernetes.md#configure-oidc-identity-provider). | +| `kubeadmconfig.apiServer.extraArgs` | A list of additional apiServer flags you can set.| +| `kubeadmconfig.apiServer.extraVolumes` | A list of additional volumes to mount on the apiServer.| +| `kubeadmconfig.controllerManager.extraArgs` | A list of additional ControllerManager flags to set.| +| `kubeadmconfig.scheduler.extraArgs` | A list of additional Kube scheduler flags to set.| +| `kubeadmconfig.kubeletExtraArgs` | A list of kubelet arguments to set and copy to the nodes.| +| `kubeadmconfig.files` | A list of additional files to copy to the nodes.| +| `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands.| +| `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands.| +| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](#configurecustomoidc). | + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. + + As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](../clusters/clusters.md) guide and [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). + +- Manually configure a third-party OpenID Connect (OIDC) Identity Provider (IDP). For more information, check out [Configure Custom OIDC](#configurecustomoidc). + +- Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.25#change-cluster-dns-service-domain). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + +### Change Cluster DNS Service Domain + +The `pack.serviceDomain` parameter with default value `cluster.local` is not visible in the Kubernetes YAML file, and its value can only be changed at cluster creation. To change the value, you must add `serviceDomain: "cluster.local"` to the Kubernetes YAML file when you create a cluster, and specify the service domain you want to use. + +```yaml +pack: + k8sHardening: True + podCIDR: "172.16.0.0/16" + serviceClusterIPRange: "10.96.0.0/12" + serviceDomain: "" +``` + +:::caution + +You can only specify the service domain at cluster creation. After cluster creation completes, you cannot update the value. Attempting to update it results in the error `serviceDomain update is forbidden for existing cluster`. + +::: + +For more information about networking configuration with DNS domains, refer to the Kubernetes [Networking](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-Networking) API documentation. + + +### Configuration Changes + +The PXK Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. + + +```yaml hideClipboard +palette: + config: + dashboard: + identityProvider: +``` + + +### Example Kubeadm Configuration File + +```yaml hideClipboard +pack: + k8sHardening: True + podCIDR: "192.168.0.0/16" + serviceClusterIpRange: "10.96.0.0/12" + palette: + config: + dashboard: + identityProvider: palette +kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" + admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + - name: pod-security-standard + hostPath: /etc/kubernetes/pod-security-standard.yaml + mountPath: /etc/kubernetes/pod-security-standard.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + pod-eviction-timeout: "1m0s" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + kubeletExtraArgs: + read-only-port: "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + - targetPath: /etc/kubernetes/pod-security-standard.yaml + targetOwner: "root:root" + targetPermissions: "0600" + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1 + kind: PodSecurityConfiguration + defaults: + enforce: "baseline" + enforce-version: "v1.25" + audit: "baseline" + audit-version: "v1.25" + warn: "restricted" + warn-version: "v1.25" + audit: "restricted" + audit-version: "v1.25" + exemptions: + # Array of authenticated usernames to exempt. + usernames: [] + # Array of runtime class names to exempt. + runtimeClasses: [] + # Array of namespaces to exempt. + namespaces: [kube-system] + + preKubeadmCommands: + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + + # Client configuration to add OIDC based authentication flags in kubeconfig + #clientConfig: + #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" + #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" + #oidc-client-secret: yourSecretClientSecretGoesHere + #oidc-extra-scope: profile,email + ``` + + +### Configure OIDC Identity Provider + +Platforms that use PXK can use the OIDC IDP feature, which offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. + +When you add the PXK pack to a cluster profile, Palette displays the OIDC IDP options listed below. + +All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). You can also configure OIDC for virtual clusters. For guidance, refer to [Configure OIDC for a Virtual Cluster](../clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md). + + +- **None**: This is the default setting and there is nothing to configure. This setting displays in the YAML file as `noauth`. + + :::caution + + We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that rely on OIDC. + + ::: + + +- **Custom**: This setting allows you to specify a third-party OIDC provider by configuring OIDC statements in the Kubeadm configuration file as described in [Configure Custom OIDC](#configurecustomoidc). This setting displays in the YAML file as `none`. + +- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. + +- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](../user-management/saml-sso/saml-sso.md) guide. + + :::info + + If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. + + ::: + + +### Configure Custom OIDC + +The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory). + + + + + + +Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. + +1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. + + ```yaml hideClipboard + kubeadmconfig: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" + ``` + +2. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. + + ```yaml hideClipboard + kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid + ``` + +3. Provide third-party OIDC IDP details. Refer to the [SAML & SSO Setup](/user-management/saml-sso) for guidance on configuring a third party IDP with Palette. + + + + + + +Follow these steps to configure OIDC for managed EKS clusters. + +1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. + +```yaml + oidcIdentityProvider: hideClipboard + identityProviderConfigName: 'Spectro-docs' + issuerUrl: 'issuer-url' + clientId: 'user-client-id-from-Palette' + usernameClaim: "email" + usernamePrefix: "-" + groupsClaim: "groups" + groupsPrefix: "" + requiredClaims: + ``` + +2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. + +```yaml +clientConfig: + oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" + oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" + oidc-client-secret: yourSecretClientSecretGoesHere + oidc-extra-scope: profile,email +``` + + + + + +### Use RBAC with OIDC + +You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. + +In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. + +![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) + + + + + + + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory. + +- Operating System (OS) dependencies as listed in the table. + +| OS Distribution | OS Version | Supports Kubernetes 1.24.x | +|---------------|------------|----------------------------| +| CentOS | 7.7 | ✅ | +| Ubuntu | 22.04 | ❌ | +| Ubuntu | 20.04 | ✅ | +| Ubuntu | 18.04 | ❌ | + + +## Parameters + +| Parameter | Description | +|-----------|-------------| +| ``pack.podCIDR`` | The CIDR range for Pods in cluster. This should match the networking layer property. Default: `192.168.0.0/16`| +| ``pack.serviceClusterIpRange`` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12`| +| `pack.serviceDomain` | The cluster DNS service domain. Default: `cluster.local`. To change the default, you must add this parameter to the Kubernetes YAML file at cluster creation and specify the cluster DNS service domain to use. This value cannot be changed after cluster creation is complete. Refer to the [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.24#change-cluster-dns-service-domain) section. | +| ``kubeadmconfig.apiServer.extraArgs`` | A list of additional apiServer flags you can set.| +| ``kubeadmconfig.apiServer.extraVolumes`` | A list of additional volumes to mount on apiServer.| +| ``kubeadmconfig.controllerManager.extraArgs`` | A list of additional ControllerManager flags to set.| +| ``kubeadmconfig.scheduler.extraArgs`` | A list of additional Kube scheduler flags to set.| +| ``kubeadmconfig.kubeletExtraArgs`` | A list of kubelet arguments to set and copy to the nodes.| +| ``kubeadmconfig.files`` | A list of additional files to copy to the nodes. | +| ``kubeadmconfig.preKubeadmCommands`` | A list of additional commands to invoke **before** running kubeadm commands.| +| ``kubeadmconfig.postKubeadmCommands`` | A list of additional commands to invoke **after** running kubeadm commands.| + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Change the default ``podCIDR`` and ``serviceClusterIpRange`` values. CIDR IPs specified in the configuration file take precedence over other defined CIDR IPs in your environment. + + As you build your cluster, check that the ``podCIDR`` value does not overlap with any hosts or with the service network and the ``serviceClusterIpRange`` value does not overlap with any IP ranges assigned to nodes or pods. For more information, refer to the [Clusters](../clusters/clusters.md) guide and [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). + +- Manually configure a third-party OpenID Connect (OIDC) Identity Provider (IDP). For more information, check out [Configure Custom OIDC](#configurecustomoidc). + +- Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.24#change-cluster-dns-service-domain). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more information, refer to the [Spectro Proxy](frp.md) guide. + +### Change Cluster DNS Service Domain + +The `pack.serviceDomain` parameter with default value `cluster.local` is not visible in the Kubernetes YAML file, and its value can only be changed at cluster creation. To change the value, you must add `serviceDomain: "cluster.local"` to the Kubernetes YAML file when you create a cluster, and specify the service domain you want to use. + +```yaml +pack: + k8sHardening: True + podCIDR: "172.16.0.0/16" + serviceClusterIPRange: "10.96.0.0/12" + serviceDomain: "" +``` + +:::caution + +You can only specify the service domain at cluster creation. After cluster creation completes, you cannot update the value. Attempting to update it results in the error `serviceDomain update is forbidden for existing cluster`. + +::: + +For more information about networking configuration with DNS domains, refer to the Kubernetes [Networking](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-Networking) API documentation. + + +### Configuration Changes + +The PXK Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the ``identityProvider`` parameter. + +```yaml +palette: + config: + dashboard: + identityProvider: +``` + + +### Example Kubeadm Configuration File + +```yaml +pack: + k8sHardening: True + podCIDR: "192.168.0.0/16" + serviceClusterIpRange: "10.96.0.0/12" + palette: + config: + dashboard: + identityProvider: noauth +kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + pod-eviction-timeout: "1m0s" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + kubeletExtraArgs: + read-only-port: "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/privileged-psp.yaml + targetPath: /etc/kubernetes/hardening/privileged-psp.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + preKubeadmCommands: + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + postKubeadmCommands: + - 'export KUBECONFIG=/etc/kubernetes/admin.conf && [ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"' + # Client configuration to add OIDC based authentication flags in kubeconfig + #clientConfig: + #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" + #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" + #oidc-client-secret: yourSecretClientSecretGoesHere + #oidc-extra-scope: profile,email + ``` + +### Configure OIDC Identity Provider + +Platforms that use PXK can use the OIDC IDP feature, which offers the convenience of managing OIDC at the Kubernetes layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a different IDP by adding the OIDC configuration to your cluster profile. + +When you add the PXK pack to a cluster profile, Palette displays the OIDC IDP options listed below. + +All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes role to users and groups, refer to [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). You can also configure OIDC for virtual clusters. For guidance, refer to [Configure OIDC for a Virtual Cluster](../clusters/palette-virtual-clusters/configure-oidc-virtual-cluster.md). + +- **None**: This is the default setting and there is nothing to configure. This setting displays in the YAML file as `noauth`. + + :::caution + + We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that rely on OIDC. + + ::: + +- **Custom**: This setting allows you to specify a third-party OIDC provider by configuring OIDC statements in the Kubeadm configuration file as described in [Configure Custom OIDC](#configurecustomoidc). This setting displays in the YAML file as `none`. + +- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting displays in the YAML file as `palette`. + +- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more information, check out the [SSO Setup](../user-management/saml-sso/saml-sso.md) guide. + + :::info + + If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC authentication and not SAML authentication. + + ::: + + +### Configure Custom OIDC + +The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon Elastic Kubernetes Service (EKS) and [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory). + + + + + + +Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to use Azure Active Directory (AAD) to enable OIDC integration. Refer to [Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory) to learn more. Click the **Amazon EKS** tab for steps to configure OIDC for EKS clusters. + +1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. + + ```yaml + kubeadmconfig: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" + ``` + +2. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. + + ```yaml + kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid + ``` + +3. Provide third-party OIDC IDP details. + + + + + + +Follow these steps to configure OIDC for managed EKS clusters. + +1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, and enter your third-party provider details. + + ```yaml + oidcIdentityProvider: + identityProviderConfigName: 'Spectro-docs' + issuerUrl: 'issuer-url' + clientId: 'user-client-id-from-Palette' + usernameClaim: "email" + usernamePrefix: "-" + groupsClaim: "groups" + groupsPrefix: "" + requiredClaims: + ``` + +2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. + + ```yaml + clientConfig: + oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" + oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" + oidc-client-secret: yourSecretClientSecretGoesHere + oidc-extra-scope: profile,email + ``` + + + + + +### Use RBAC with OIDC + +You can create a role binding that uses individual users as the subject or specify a group name as the subject to map many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To learn more, review [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. + +In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. + +![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) + + + + + + + + +:::caution + +All versions less than v1.23.x are considered deprecated. Upgrade to a newer version to take advantage of new features. + +::: + + + +
+ + +## Terraform + +You can reference Kubernetes in Terraform with the following code snippet. + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "k8s" { + name = "kubernetes" + version = "1.26.4" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +## Resources + +- [Kubernetes](https://kubernetes.io/) + + + +- [Kubernetes Documentation](https://kubernetes.io/docs/concepts/overview/) + + + +- [Image Swap with Palette](../clusters/cluster-management/image-swap.md) diff --git a/docs/docs-content/integrations/kubevious.md b/docs/docs-content/integrations/kubevious.md new file mode 100644 index 0000000000..7b661e8830 --- /dev/null +++ b/docs/docs-content/integrations/kubevious.md @@ -0,0 +1,64 @@ +--- +sidebar_label: 'Kubevious' +title: 'Kubevious' +description: 'Kubevious Monitoring pack in Spectro Cloud' +type: "integration" +hide_table_of_contents: true +category: ['monitoring', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/kubevious/blobs/sha256:5e33d7b51b1317a834b4552d96fc1cc8463000a7eedbcb4b784ea07236f3d7f7?type=image/png' +tags: ['packs', 'kubevious', 'monitoring'] +--- + +Kubevious integration provides a graphical interface that renders easy to understand, application-centric Kubernetes configurations. + +## Versions Supported + + + + +* **1.0.10** + + + + +* **0.8.15** + + + + + * **0.5.9** + + + + +## Components + +This integration deploys the following components: + +* Deployment +* MySql DB +* UI +* Parser + +## Ingress + +Follow the steps below to configure Ingress on Kubevious, according to the corresponding version + +1. Within the manifest, find the kubevious section **user** > **interface** > **service** > **type** and confirm/change, according to the Kubevious version as listed in the table below. + + | **Versions** | **Parameters** | **Action** | + | ------------ | -------------------------------- | -------------------------------------------------------------------- | + | **1.0.10** | ui: service: type: **ClusterIP** | Confirm that it states **ClusterIP**. | + | **0.8.15** | ui: service: type: **ClusterIP** | Confirm that it states **ClusterIP**. | + | **0.5.9** | ui: svcType: **LoadBalancer** | Change kubevious.ui.svcType from **LoadBalancer** to **Cluster IP**. | + +2. Configure Ingress + * Enable Ingress; change enabled from *false* to **true**. + * Set Ingress rules like annotations, path, hosts, etc. + +With these configuration changes, you can access the Kubevious service on the Ingress Controller LoadBalancer hostname/IP. + +## References + +- [Kubevious GitHub](https://github.com/kubevious/kubevious) diff --git a/docs/docs-content/integrations/kubevirt.md b/docs/docs-content/integrations/kubevirt.md new file mode 100644 index 0000000000..7a69001684 --- /dev/null +++ b/docs/docs-content/integrations/kubevirt.md @@ -0,0 +1,63 @@ +--- +sidebar_label: 'KubeVirt' +title: 'KubeVirt' +description: 'Choosing KubeVirt within the Palette console' +hide_table_of_contents: true +type: "integration" +category: ['system app', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/kubevirt/blobs/sha256:185e7a7658c05ab478f2822b080a7e21da9113b4a8bf5fb7fb3338d9a5796eed?type=image/png' +tags: ['packs', 'kubevirt', 'system app'] +--- + +KubeVirt is a virtual machine management add-on for Kubernetes clusters. Create predefine virtual machines using KubeVirt, and Palette will provision KubeVirt as an Add-on Pack to manage the VM resources within the orchestrator. + +
+ +## Version Supported + + + + +**0.51.0** + + + + +**0.55.0** + + + + +
+
+ +## Notable Parameters + +```yaml +manifests: + KubeVirt-operator: + # Enable Emulation (when no nested virtualization enabled) + useEmulation: true + KubeVirt-cr: + contents: | + apiVersion: KubeVirt.io/v1 + kind: KubeVirt + metadata: + name: KubeVirt + namespace: KubeVirt + spec: + certificateRotateStrategy: {} + configuration: + developerConfiguration: + featureGates: [] + customizeComponents: {} + imagePullPolicy: IfNotPresent + workloadUpdateStrategy: {} +``` + +## References + +- [Installing KubeVirt on Kubernetes](https://KubeVirt.io/user-guide/operations/installation/#installing-KubeVirt-on-kubernetes) + +- [GitHub KubeVirt](https://github.com/KubeVirt/KubeVirt/releases/tag/v0.51.0) diff --git a/docs/docs-content/integrations/kubewatch.md b/docs/docs-content/integrations/kubewatch.md new file mode 100644 index 0000000000..afd1ba8adf --- /dev/null +++ b/docs/docs-content/integrations/kubewatch.md @@ -0,0 +1,41 @@ +--- +sidebar_label: 'kube-watch' +title: 'kube-watch' +description: 'kube-watch monitoring pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['monitoring', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/kubewatch/blobs/sha256:a277fb90357df9cbffe98eea1ed100fba1b17970b8fc056d210c4f7bfe4f17a3?type=image/png' +tags: ['packs', 'kube-watch', 'monitoring'] +--- + + +Kubewatch is a Kubernetes watcher that currently publishes notification to available collaboration hubs/notification channels. It is run in the k8s cluster for monitoring resource changes and event notifications are obtained through webhooks. The supported webhooks are: + - slack + - hipchat + - mattermost + - flock + - webhook + - smtp + +## Usage: + + kubewatch [flags] + kubewatch [command] + + +## Versions Supported + + + + + +**1.0.7** + + + + +## References + +- [kube-watch GitHub](https://github.com/robusta-dev/kubewatch) diff --git a/docs/docs-content/integrations/longhorn.md b/docs/docs-content/integrations/longhorn.md new file mode 100644 index 0000000000..b59c456be7 --- /dev/null +++ b/docs/docs-content/integrations/longhorn.md @@ -0,0 +1,112 @@ +--- +sidebar_label: 'Longhorn' +title: 'Longhorn' +description: 'Longhorn pack in Palette' +hide_table_of_contents: true +type: "integration" +category: ["storage", 'amd64', 'fips'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/csi-longhorn/blobs/sha256:8257bd6697941139cea8ace907e25b3859cb8de48f965a5b6011d518cad0a2db?type=image/png' +tags: ['packs', 'longhorn', 'storage'] +--- + +Longhorn is a lightweight distributed block storage system for cloud native storage Kubernetes that allows you to replicate storage to Kubernetes clusters. Once Longhorn is installed, it adds persistent volume support to the Kubernetes cluster using containers and microservices. + +Longhorn creates a dedicated storage controller for each block device volume and replicates the volume across multiple nodes. + +## Version Supported + + + + + + +## Prerequisites + +- Kubernetes cluster is 1.21 or higher. + +## Parameters + +The table lists commonly used parameters you can configure when adding this pack. + +| Parameter | Description | Default | +|-------------------------|--------------------------------------------------------|---------------------------------------------| +| defaultClass | The volume type to be used. | `true` | +| defaultFsType | The default file system. | `ext4` | +| defaultClassReplicaCount| The default number of copies of data store in your cluster. | `3` | +| defaultDataLocality | The default location where data computation will occur. | `disabled` Best effort | +| reclaimPolicy | This means that a dynamically provisioned volume will be automatically deleted when deletes when corresponding PersistentVolumeClaim is deleted. For important data, it is more appropriate to use the "Retain" policy | `Delete` | +| migratable | The ability to transfer data to another data storage systems | `false` | +| recurringJobSelector:enable | The management of recurring jobs. You can enable this feature and type a comma-separated list of jobs to run: `recurringJobSelector:enable:jobList [ ]` | `false` | + +## Usage + +Longhorn provides these features: + +- Enterprise-grade distributed storage with no single point of failure. +- Incremental snapshots of block storage. +- Backup to secondary storage (NFSv4 or S3-compatible object storage) built on change block detection. +- Recurring snapshot and backup. + +For more information, check out Longhorn guide on [How to Create Volumes](https://longhorn.io/docs/1.4.0/volumes-and-nodes/create-volumes/). + + + + + + +## Prerequisites + +- Kubernetes cluster is at least version 1.22 and not higher than 1.24. + +## Parameters + +The table lists commonly used parameters you can configure when adding this pack. + +| Parameter | Description | Default | +|-------------------------|--------------------------------------------------------|---------------------------------------------| +| defaultClass | The volume type to be used. | `true` | +| defaultFsType | The default file system. | `ext4` | +| defaultClassReplicaCount| The default number of copies of data store in your cluster. | `3` | +| defaultDataLocality | The default location where data computation will occur. | `disabled` Best effort | +| reclaimPolicy | This means that a dynamically provisioned volume will be automatically deleted when deletes when corresponding PersistentVolumeClaim is deleted. For important data, it is more appropriate to use the "Retain" policy | `Delete` | +| migratable | The ability to transfer data to another data storage systems | `false` | +| recurringJobSelector:enable | The management of recurring jobs. You can enable this feature and type a comma-separated list of jobs to run: `recurringJobSelector:enable:jobList [ ]` | `false` | + +## Usage + +Longhorn provides these features: + +- Enterprise-grade distributed storage with no single point of failure. +- Incremental snapshots of block storage. +- Backup to secondary storage (NFSv4 or S3-compatible object storage) built on change block detection. +- Recurring snapshot and backup. + +For more information, check out Longhorn guide on [How to Create Volumes](https://longhorn.io/docs/1.4.0/volumes-and-nodes/create-volumes/). + + + + +## Terraform + +When using this Pack as a base layer, you need the following terraform code. + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "csi-longhorn" { + name = "longhorn" + version = "1.3.1" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + +## References + +- [Longhorn Home](https://longhorn.io/) + +- [Longhorn Documentation](https://longhorn.io/docs) diff --git a/docs/docs-content/integrations/maintenance-policy.md b/docs/docs-content/integrations/maintenance-policy.md new file mode 100644 index 0000000000..d4bdff173f --- /dev/null +++ b/docs/docs-content/integrations/maintenance-policy.md @@ -0,0 +1,174 @@ +--- +sidebar_label: "Maintenance Policy" +title: "Packs Maintenance Policy" +description: "Learn about Palette pack update and deprecation schedules." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["packs", "deprecation"] +--- + +Palette supports two pack categories: *Infrastructure* and *Add-on*. Infrastructure packs are often sourced from third parties and are infrastructure-related or support critical container-based environments. Packs are used to create layers in cluster profiles. Infrastructure packs are grouped as follows: + +- Kubernetes + +- Operating System (OS) + +- Container Network Interface (CNI) + +- Container Storage Interface (CSI) + + +Add-on packs provide additional functionality that you can add to your cluster profile and are grouped as follows: + +- Load Balancer + +- Ingress + +- Logging + +- Monitoring + +- Security + +- Authenticaiton + +- System Apps + + +Check out the [Packs List](integrations.mdx) document, where you can use the filter buttons to display a list of Palette packs in each category and learn about the individual packs. + +
+ +## Pack Updates + +Packs undergo rigorous vulnerability scans and penetration testing before they become available in Palette. The following sections describe our update schedule for each infrastructure pack category. + +
+ +### Kubernetes Packs + +We provide Cloud Native Computing Foundation (CNCF) Kubernetes updates as follows: + +
+ +- **Major versions**: Assessed based on the extent of changes. + + +- **Minor versions**: Provided within eight weeks of a new Kubernetes release. + + +- **Patch versions**: Provided within four weeks of a new Kubernetes release. + + + +### OS Packs + +We provide Ubuntu LTS and CentOS updates for IaaS clusters as follows: + +
+ +- **Major versions** - Added within eight weeks of release. + + +- **Patch and Minor versions**: Updated at runtime using Palette’s on-demand or scheduled OS upgrades and patch-on-boot capabilities. + + +### CNI Packs + +We provide CNI pack updates as follows: + +
+ +- **Major versions**: Assessed based on the extent of changes. + + +- **Minor version**: Added within six weeks of release. + + +- **Patch versions**: Added within four weeks of release. + + +### CSI Packs + +We provide CSI pack updates as follows: + +
+ +- **Major versions**: Assessed based on the extent of changes. + + +- **Minor version**: Added within six weeks of release. + + +- **Patch versions**: Added within four weeks of release. + + +### Add-on Packs + +We provide add-on pack updates as follows: + +
+ +- **Major versions**: Assessed based on the extent of changes. + + +- **Minor version**: Added within six weeks of release. + + +- **Patch versions**: Added within four weeks of release. + + +## Pack Deprecations + + +We deprecate and remove packs when a more stable version of the pack is available or when the underlying technology becomes obsolete. When a pack is deprecated, you will still be able to create new cluster profiles using the pack and deploy clusters that use profiles containing the pack. + +Palette displays the deprecation stage when you click the information icon next to the pack name during profile creation. + +![Screenshot showing how Palette indicates a pack's stage of deprecation.](/integrations_deprecation-stage.png) + +An information icon in the profile stack also displays a message that instructs about required pack versions. + +![Screenshot showing message in profile stack that tells you the required pack version to use.](/integrations_deprecation-profile-stack-msg.png) + + +:::info + +You can review deprecated packs in the [Deprecated Packs](deprecated-packs.md) resource. +::: + + +We adhere to the following stages of deprecation: + +
+ +- **Deprecated**: When a pack or a pack version is deprecated, this indicates it will be removed in the future. You will still be able to create new cluster profiles using the pack and launch clusters using existing profiles that contain the pack. + + The pack remains in *Deprecated* state for three months before it moves to *Disabled* state. + +
+ +- **Disabled**: When a pack is disabled, it is no longer available for selection in Palette. When creating new profiles, you must use a newer version of the pack. You can still launch clusters using existing profiles that contain the disabled pack. + + The pack remains in *Disabled* state for three months before it is deleted. + +
+ +- **Deleted**: When a pack is deleted, it is removed from Palette. An active cluster that contains the deleted pack will continue to operate. However, you will not be able to deploy a new cluster profile that contains the deleted pack. + +:::info + +For important guidelines on updating pack versions, review [Update the Pack Version](../cluster-profiles/task-update-profile.md#update-the-pack-version). + +::: + +### Kubernetes Packs + +A minor Kubernetes version is deprecated in Palette when the Kubernetes community announces the version is entering End of Life (EOL). + +
+ +### CNI / CSI / Add-on Packs + +Palette supports a minor version of CNI, CSI, and add-on packs until two newer versions are available. At that time, packs in these categories are deprecated. diff --git a/docs/docs-content/integrations/metallb.md b/docs/docs-content/integrations/metallb.md new file mode 100644 index 0000000000..aa65e046a6 --- /dev/null +++ b/docs/docs-content/integrations/metallb.md @@ -0,0 +1,253 @@ +--- +sidebar_label: 'MetalLB' +title: 'MetalLB' +description: 'MetalLB Load Balancer pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['load balancers', 'amd64', 'arm64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/lb-metallb/blobs/sha256:3d09a1eab856a03d5b821062dcd1da624256e8f1e2ede404d88cb088d3adb945?type=image/png' +tags: ['packs', 'metallb', 'network'] +--- + +MetalLB is a load-balancer implementation for bare metal [Kubernetes](https://kubernetes.io/) clusters that uses standard routing protocols. This integration is recommended for self-hosted clouds and helps external services obtain an IP address when the service type is set to LoadBalancer. + +MetalLB deploys a controller and a speaker. The speaker is deployed as a DaemonSet on all nodes. + + +## Versions Supported + + + + + +## Prerequisites + +- A Kubernetes cluster with Kubernetes version 1.13.0 or later that does not already have network load-balancing functionality. + + +- A cluster network configuration that does not conflict with MetalLB. For more information, refer to the official Kubernetes [Cluster Networking](https://kubernetes.io/docs/concepts/cluster-administration/networking) documentation + + +- Ensure sufficient IPv4 addresses for MetalLB are available for the number of services in each cluster. + + +- When using the Border Gateway Protocol (BGP), one or more BGP-capable routers are required. + + +- When using the Layer 2 (L2) operating mode, Transmission Control Protocol (TCP) and User Datagram Protocol (UDP) traffic on port 7946 must be allowed between nodes, as required by the [HashiCorp memberlist](https://github.com/hashicorp/memberlist). You can configure other port as needed. + + +## Parameters + +The `addresses` parameter applies to the manifest-based MetalLB pack. You can provide multiple entries, but usually only one is needed. + +| **Parameter** | **Description** | +|---------------|-----------------| +| `addresses`| This can be a range of addresses or a CIDR address. Examples:
192.168.250.48-192.168.250.55
192.168.250.0/24 | + +## Usage + +The *lb-metallb* manifest-based pack supports direct configuration of an L2 IP address set. The *lb-metallb-helm* Helm-based pack provides an L2 address pool. + +
+ +### Manifest + +Manifest-based MetalLB supports direct configuration of an L2 IP address set. You can set either a range of addresses or use CIDR format, such as `192.168.250.48/29`. A more advanced MetalLB configuration, such as Border Gateway Protocol (BGP) routing requires you to write your own manifests and add them to the Palette cluster profile. + +The example shows the syntax used to set an address range. + +
+ +```yaml +manifests: + metallb: + images: + controller: "" + speaker: "" + namespace: "metallb-system" + avoidBuggyIps: true + addresses: + - 192.168.250.48-192.168.250.55 +``` + +
+ +### Helm Chart + +Helm-based MetalLB default gives you an L2 address pool by default. It has two sections, `charts:metallb-full:configuration` and `charts:metallb-full:metallb`, as shown in the following examples. + +Use the `charts:metallb-full:configuration` parameter section to set resource types that MetalLB supports. The pack default gives you an L2 address pool. To set up a more advanced scenario, you can use the other resource types provided in the pack. The pack includes a commented-out example for each resource. + +
+ + +```yaml +charts: + metallb-full: + configuration: + ipaddresspools: + first-pool: + spec: + addresses: + - 192.168.10.0/24 + avoidBuggyIPs: true + autoAssign: true + + l2advertisements: + default: + spec: + ipAddressPools: + - first-pool + + bgpadvertisements: {} + bgppeers: {} + communities: {} + bfdprofiles: {} +``` +
+ +The `charts:metallb-full:metallb` parameter section provides access to all the options of the base chart that installs MetalLB. You don’t need to change anything unless you want to install MetalLB in Free Range Routing (FRR) mode. To use FRR mode, set the option to `true`, as the example shows. + +
+ +```yaml +charts: + metallb-full: + metallb: + speaker: + frr: + enabled: true +``` + + + + + + +
+ + + + +## Prerequisites + +- A Kubernetes cluster with Kubernetes version 1.13.0 or later that does not already have network load-balancing functionality. + + + +- A cluster network configuration that does not conflict with MetalLB. For more information, refer to the official Kubernetes [Cluster Networking](https://kubernetes.io/docs/concepts/cluster-administration/networking) documentation. + + +- Ensure sufficient IPv4 addresses for MetalLB are available for the number of services in each cluster. + + +- When using the Border Gateway Protocol (BGP), one or more BGP-capable routers are required. + + +- When using the L2 operating mode, Transmission Control Protocol (TCP) and User Datagram Protocol (UDP) traffic on port 7946 must be allowed between nodes, as required by the [HashiCorp memberlist](https://github.com/hashicorp/memberlist). You can configure other port as needed. + + +## Parameters + +The `addresses` parameter applies to the manifest-based MetalLB pack. You can provide multiple entries but only one is typically needed. + +| **Parameter** | **Description** | +|---------------|-----------------| +| `addresses`| This can be a range of addresses or a CIDR address. Examples:
192.168.250.48-192.168.250.55
192.168.250.0/24 | + +## Usage + +The *lb-metallb* manifest-based pack supports direct configuration of an L2 IP address set. You can set either a range of addresses or use CIDR format, such as `192.168.250.48/29`. A more advanced MetalLB configuration, such as Border Gateway Protocol (BGP) routing requires you to write your own manifests and add them to the Palette cluster profile. + +The example shows the syntax used to set an address range. + +
+ +```yaml +manifests: + metallb: + images: + controller: "" + speaker: "" + namespace: "metallb-system" + avoidBuggyIps: true + addresses: + - 192.168.250.48-192.168.250.55 +``` + + + + + +
+ + + +:::caution + +All versions of the manifest-based pack less than v0.9.x are considered deprecated. Upgrade to a newer version to take advantage of new features. + +::: + + + +
+ +
+ +## Troubleshooting + +If controller and speaker pods are not assigning new IP addresses that you provided in the MetalLB pack, it is likely pods in existing deployments do not have the latest configMap file. + +IP addresses you specify in MetalLB pack values go into a configMap called `config` in the `metallb-system` namespace. The MetalLB controller and speakers use the configMap as a volume mount. + +Any changed IP addresses will get updated in the configMap. You can confirm this by issuing the following command. + +
+ +```bash +kubectl describe cm config --namespace metallb-system +``` + +
+ +Since the controller and speaker pods are using a previous copy of the configMap, existing deployments are unaware of the changes made to configMap. + +
+
+ +To ensure updated addresses are reflected in the configMap, you need to restart the controller and speaker pods so they fetch the new configMap and start assigning new addresses correctly. Issue the commands below to restart the controller and speaker. + +
+ + +```bash +kubectl rollout restart deploy controller --namespace metallb-system +kubectl rollout restart ds speaker --namespace metallb-system +``` + +
+ +## Terraform + +```hcl +data "spectrocloud_pack" "MetalLB" { + name = "lb-metallb" + version = "0.13.5" +} +data "spectrocloud_pack" "MetalLB-Helm" { + name = "lb-metallb-helm" + version = "0.13.7" +} +``` + +
+ +## References + +- [MetalLB](https://metallb.universe.tf/) + + +- [MetalLB GitHub ](https://github.com/metallb/metallb) \ No newline at end of file diff --git a/docs/docs-content/integrations/microk8s.md b/docs/docs-content/integrations/microk8s.md new file mode 100644 index 0000000000..9231fbaeca --- /dev/null +++ b/docs/docs-content/integrations/microk8s.md @@ -0,0 +1,90 @@ +--- +sidebar_label: 'MicroK8s' +title: 'MicroK8s' +description: 'MicroK8s pack in Palette' +hide_table_of_contents: true +type: "integration" +category: ["kubernetes", 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/kubernetes-microk8s/blobs/sha256:b971b64f62e2e67b0a166316f96e6f4211aacea6e28459bb89275e8882ade985?type=image/png' +tags: ['packs', 'microk8s', 'kubernetes'] +--- + +MicroK8s is a Cloud Native Computing Foundation (CNCF) certified upstream Kubernetes deployment that runs entirely on your workstation or edge device. It runs all Kubernetes services natively without virtual machines and packs all the required libraries and binaries. + +## Prerequisites + +- One of the following Ubuntu environments to run commands: + - 22.04 LTS + - 20.04 LTS + - 18.04 LTS + - 16.04 LTS + + Or another operating system that supports snapd. + + +- At least 20 GB of disk space and 4 GB of memory. +- An internet connection. + +:::info + +If your environment doesn't meet these requirements, there are alternative ways to install MicroK8s, including additional OS support and an offline deployment. + +::: + + +## Versions Supported + + + + + +* **1.25.0** + + + + + +* **1.24.0** + + + + +MicroK8s installs a minimal, lightweight Kubernetes you can run and use on almost any machine. When installing MicroK8s you can specify a channel made up of two components: + +- **Track**: denotes the upstream Kubernetes version. +- **Risk level**: indicates the maturity level of the release, such as stable and edge. + +MicroK8s comes with its own packaged version of the ``kubectl`` command for operating Kubernetes. This avoids interfering with any version that may already be on the host machine. You can run it in a terminal like this: +
+ +``` yaml +microk8s kubectl +``` + +If you are using or want to use a different kubectl command, you can configure it for your Linux, Mac, or Windows operating system. + +
+ + + +:::caution + +When you deploy AWS EBS pack with MicroK8s, you need to change EBS CSI pack `node.kubelet`` values from `/var/lib/kubelet` to `/var/snap/microk8s/common/var/lib/kubelet`. + +```yaml + node: + env: [] + kubeletPath: /var/lib/kubelet +``` + +```yaml + node: + env: [] + kubeletPath: /var/snap/microk8s/common/var/lib/kubelet +``` +::: + +## References + +- [MicroK8s ](https://microk8s.io/docs) diff --git a/docs/docs-content/integrations/multus-cni.md b/docs/docs-content/integrations/multus-cni.md new file mode 100644 index 0000000000..a6136d3f8d --- /dev/null +++ b/docs/docs-content/integrations/multus-cni.md @@ -0,0 +1,56 @@ +--- +sidebar_label: 'Multus CNI' +title: 'Multus' +description: 'Choosing Multus CNI within the Palette console' +type: "integration" +category: ['network', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +hide_table_of_contents: true +logoUrl: 'https://registry.spectrocloud.com/v1/cni-multus/blobs/sha256:3727499ea41784a17c818b7269c27918b8664766c40d1b1f3cd90c34d5154676?type=image/png' +tags: ['packs', 'multus', 'network'] +--- + +The Multus Container Network Interface (CNI) plugin enables multiple, network interfaces to attach to pods within Kubernetes. Palette provisions the CNI-Multus 3.8.0 Add-on pack, so you can create a multi-homed pod for Kubernetes right within the orchestrator. + +
+ +## Version Supported + + + + +**cni-multus 3.8.0** + + + + +
+
+ +# Notable Parameters + +| **Parameters** | **Values** | **Required/Optional** | **Description** | +| ---------------------- | ------- | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| **name** | string | required | Enter the name of the network. | +| **type** | string | required | "multus" | +| **confDir** | string | optional | This is the directory for the CNI config file that multus reads. default /etc/cni/multus/net.d. | +| **cniDir** | string | optional | Multus CNI data directory. | +| **binDir** | string | optional | Additional directory for CNI plugins which multus calls. | +| **kubeconfig** | string | optional | Kubeconfig file for the out-of-cluster communication with kube-apiserver. | +| **logToStderr** | boolean | optional | Enable or disable logging to STDERR. Defaults to true. | +| **logFile** | string | optional | File path for log file. Multus add log in given file. | +| **logLevel** | string | optional | Logging level | +| **logOptions** | object | optional | Logging option | +| **namespaceIsolation** | boolean | optional | Enables a security feature the where pods are only allowed to access
NetworkAttachmentDefinitions in the namespace where the pod resides. Defaults to *false*. | +| **capabilities** | list | optional | Capabilities supported by at least one of the delegates. | +| **readinessindicatorfile** | string | | The path to a file whose existence denotes that the default network is ready. | + + +## References + +- [Multus-CNI](https://github.com/k8snetworkplumbingwg/multi-net-spec) + +- [Multus-CNI Quickstart Guide](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/quickstart.md) + +- [Mutltus Configuration](https://github.com/k8snetworkplumbingwg/multus-cni/blob/master/docs/configuration.md) + diff --git a/docs/docs-content/integrations/nfs-subdir-external.md b/docs/docs-content/integrations/nfs-subdir-external.md new file mode 100644 index 0000000000..e8546668ef --- /dev/null +++ b/docs/docs-content/integrations/nfs-subdir-external.md @@ -0,0 +1,38 @@ +--- +sidebar_label: 'nfs-subdir-External' +title: 'Kubernetes NFS Subdir External Provisionerl' +description: 'NFS-Subdir-External Provisioner pack in Spectro Cloud' +type: "integration" +category: ['storage', 'amd64'] +hide_table_of_contents: true +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-nfs-subdir-external/blobs/sha256:4b40eb85382d04dc4dcfc174b5e288b963b6201f6915e14b07bd8a5c4323b51b?type=image/png' +tags: ['packs', 'nfs-subdir-external', 'storage'] +--- + + +NFS Subdir External Provisioner is an automatic provisioner for Kubernetes that uses the already configured NFS server, automatically creating Persistent storage volumes. It installs the storage classes and NFS client provisioner into the workload cluster + +## Prerequisites + +Kubernetes >=1.9 + + +## Versions Supported + + + + + +**1.0** + + + + + + +## References + +- [Kubernetes NFS Subdir External Provisioner GitHub](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner) + +- [Kubernetes NFS Subdir External Provisioner Documentation](https://artifacthub.io/docs) diff --git a/docs/docs-content/integrations/nginx.md b/docs/docs-content/integrations/nginx.md new file mode 100644 index 0000000000..26d5d2db1b --- /dev/null +++ b/docs/docs-content/integrations/nginx.md @@ -0,0 +1,111 @@ +--- +sidebar_label: 'Nginx' +title: 'Nginx' +description: 'Nginx Ingress pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['ingress', 'amd64', 'arm64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/nginx/blobs/sha256:a36bf7e8023f018298ddbf0c82a49c38e872db4b0e480a39c285ae002916a83f?type=image/png' +tags: ['packs', 'nginx', 'network'] +--- + + +Ingress resource(s) in Kubernetes helps provide Service(s) externally-reachable URLs, load balance traffic, terminate SSL / TLS, and offer name-based virtual hosting. NGINX integration is an [Ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers) responsible for fulfilling the Ingress, usually with a load balancer, though it may also configure your edge router or additional frontends to help handle the traffic. + +## Versions Supported + + + + + +* **1.4.0** + + + + + +* **1.3.0** + + + + + + +* **1.2.5** + +
+ + **1.2.4** + +
+ + **1.2.3** + +
+ + **1.2.1** (deprecated) + +
+ + **1.2.0** (deprecated) + +
+ + + +* **1.0.4** + + + + + +* **0.26.1** + + + + + + * **0.43.0** + + + +
+ + +## Components + +Integration creates the following components: + +* Ingress Controller. +* Default Backend. + +## Default SSL Certificate + +NGINX Ingress controller provides an option to set a default SSL certificate to be used for requests that do not match any of the configured server names. The default certificate will also be used for ingress tls: sections that do not have a secretName option. +Below steps will come in handy to set the default certificate. + +1. Create a secret with key and certificate + ```bash + kubectl -n kube-system create secret tls ingress-tls --cert server.crt --key server.key + ``` +2. Edit Nginx ingress pack values to include extraArgs.default-ssl-certificate section which will reference the secret created above + ```bash + charts: + nginx-ingress: + fullnameOverride: "nginx-ingress" + controller: + ... + ... + extraArgs: + default-ssl-certificate: "kube-system/ingress-tls" + ``` + +## Troubleshooting + +For basic troubleshooting, refer the below troubleshooting guide: +https://github.com/kubernetes/ingress-nginx/blob/master/docs/troubleshooting.md + +## References + +- [Nginx Ingress Controller](https://www.nginx.com/products/nginx-ingress-controller/) diff --git a/docs/docs-content/integrations/opa-gatekeeper.md b/docs/docs-content/integrations/opa-gatekeeper.md new file mode 100644 index 0000000000..d514106447 --- /dev/null +++ b/docs/docs-content/integrations/opa-gatekeeper.md @@ -0,0 +1,70 @@ +--- +sidebar_label: 'OpenPolicyAgent' +title: 'Open Policy Agent' +description: 'OpenPolicyAgent security pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['security', 'amd64', 'arm64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/open-policy-agent/blobs/sha256:fcbad202dc9ca5e7a756562d8f9fc180ee77474034447dabc302d8a5a2bbe148?type=image/png" alt="OpenPolicyAgent logo' +tags: ['packs', 'open-policy-agent', 'security'] +--- + + +Palette users can leverage the **Open Policy Agent (OPA) Gatekeeper** to strengthen the security administration of Kubernetes environment. The major motivation behind the deployment is admission customization via configurations without code. Gatekeeper provides an admission control system based on policies or rules implemented through parameterized and admin configurable constraints. Palette supports **Gatekeeper v3.0**. + +The major features of OPA are: + +* **Validating Admission Control** +* **Policies and Constraints** + * **Sample Policies**: + * All namespaces must have a label that lists a point-of-contact. + * All pods must have an upper bound for resource usage. + * All images must be from an approved repository. + * Services must all have globally unique selectors. + * **Constraint Properties** + * AND-ed together + * Schema validation + * Selection semantics +* **Audit**: The periodical evaluation of resources against constraints. +* **Data Replication**: Constraints to be compared against other objects in the cluster. + +## Versions Supported + + + + + +* **3.11.0** +* **3.9.0** + + + + + + +**3.7.0** + + + + + +**3.6.0** + + + + + +**3.5.1** + + + + + +## References + +- [Open Policy Agent Documentation](https://open-policy-agent.github.io/gatekeeper/website/docs) + +- [Open Policy Agent GitHub](https://github.com/open-policy-agent/gatekeeper) + + diff --git a/docs/docs-content/integrations/openstack-cinder.md b/docs/docs-content/integrations/openstack-cinder.md new file mode 100644 index 0000000000..85a530f56b --- /dev/null +++ b/docs/docs-content/integrations/openstack-cinder.md @@ -0,0 +1,62 @@ +--- +sidebar_label: 'OpenStackCinder' +title: 'Open Stack Cinder' +description: 'OpenStackCinder storage pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['storage', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-openstack-cinder/blobs/sha256:ebb9650566d2cdfe9b0fc7d474a1cdcd562a9020807e49f891df199379ab8961?type=image/png' +tags: ['packs', 'openstack-cinder', 'storage'] +--- + + +Unlike the traditional storage drivers of Kubernetes and the implementation of the Container Storage Interface (CSI), we can deliver storage plug-ins using a standard interface without ever having to change the core Kubernetes code. Open Stack Cinder provides OpenShift Container Platform users with storage options, such as volume snapshots that are not possible with in-tree volume plug-ins. + +## Versions Supported + + + + + +**1.23** + + + + + +**1.22** + + + + + +**1.21** + + + + + +**1.20** + + + + + +**1.19** + + + + + +**1.18** + + + + + +## References + +- [OpenStack Cinder CSI Driver Operator](https://docs.openshift.com/container-platform/4.7/storage/container_storage_interface/persistent-storage-csi-cinder.html#csi-about_persistent-storage-csi-cinder) + +- [CSI Cinder driver](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md/) diff --git a/docs/docs-content/integrations/permission-manager.md b/docs/docs-content/integrations/permission-manager.md new file mode 100644 index 0000000000..c8a8632210 --- /dev/null +++ b/docs/docs-content/integrations/permission-manager.md @@ -0,0 +1,51 @@ +--- +sidebar_label: 'Permission Manager' +title: 'Permission Manager' +description: 'Permission Manager Authentication pack in Spectro Cloud' +type: "integration" +hide_table_of_contents: true +category: ['authentication','amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/permission-manager/blobs/sha256:15d08b02d78823c12616b72d1b5adb0520940016b89bae1f758e6f1a105597ff?type=image/png' +tags: ['packs', 'permission-manager', 'security'] +--- + + +This integration provides a graphical user interface for RBAC management in Kubernetes. You can create users, assign namespaces/permissions, and distribute Kubeconfig YAML files quickly. + + +## Versions Supported + + + + +* **1.0.0** + + + + +## Configuration + +| Name | Supported Value | Description | +| --- | --- | --- | +| namespace| Any valid namespace string | The namespace under which this integration should be deployed onto| +| authPassword | | Login password for the web interface | + +## Customizing the permission templates + +Create a ClusterRole starting with `template-namespaced-resources___` or `template-cluster-resources___` and apply it to the cluster. Permission manager will honor any custom resources with this naming convention and will populate on the user interface. + +## Ingress + +Follow below steps to configure Ingress on Permission Manager + +1. Change serviceType from "LoadBalancer" to "ClusterIP" (line #10) +2. Ingress (line #13) + * Enable Ingress; Change enabled from false to "true" + * Set Ingress rules like annotations, path, hosts, etc. + +With these config changes, you can access Permission manager service on the Ingress Controller LoadBalancer hostname / IP + +## References + +- [Permission Manager GitHub](https://github.com/sighupio/permission-manager) diff --git a/docs/docs-content/integrations/portworx.md b/docs/docs-content/integrations/portworx.md new file mode 100644 index 0000000000..0d81424f38 --- /dev/null +++ b/docs/docs-content/integrations/portworx.md @@ -0,0 +1,551 @@ +--- +sidebar_label: 'Portworx' +title: 'Portworx' +description: 'Portworx storage integration for on-prem installations' +hide_table_of_contents: true +type: "integration" +category: ['storage', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/csi-portworx/blobs/sha256:e27bc9aaf22835194ca38062061c29b5921734eed922e57d693d15818ade7486?type=image/png' +tags: ['packs', 'portworx', 'storage'] +--- + +[Portworx](https://portworx.com/) is a software-defined persistent storage solution designed and purpose-built for applications deployed as containers, via container orchestrators such as Kubernetes. You can use Palette to install Portworx on the cloud or on-premises. + +## Versions Supported + +
+ + + + + +* **2.11.2** + + + + + + +* **2.10.0** + + + + + + +* **2.9.0** + + + + +* **2.8.0** + + + + +* **2.6.1** + + + + +## Prerequisites + +For deploying Portworx for Kubernetes, make sure to configure the properties in the pack: + + +* Have at least three nodes with the proper [hardware, software, and network requirements](https://docs.portworx.com/install-portworx/prerequisites). + + +* Ensure you are using a supported Kubernetes version. + + +* Identify and set up the storageType. + + +
+ +## Contents + +The default installation of Portworx will deploy the following components in the Kubernetes cluster. + + +* Portworx + + +* CSI Provisioner + + +* [Lighthouse](https://portworx.com/blog/manage-portworx-clusters-using-lighthouse/) + + +* [Stork](https://github.com/libopenstorage/stork) and [Stork on Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/stork/) + + +* Storage class making use of portworx-volume provisioner. + +## Parameters + +### Manifests - Portworx + +```yaml + +manifests: + portworx: + + # The namespace to install Portworx resources + namespace: "portworx" + + # Portworx storage type and size + storageType: "type=zeroedthick,size=150" + + # Max storgae nodes per zone + maxStorageNodesPerZone: 3 + + # Node recovery timeout in seconds + nodeRecoveryTimeout: 1500 + + # Portworx storage class config + storageClass: + enabled: true + isDefaultStorageClass: true + allowVolumeExpansion: true + reclaimPolicy: Retain + volumeBindingMode: Immediate + parameters: + repl: "3" + priority_io: "high" + #sharedv4: true + + k8sVersion: '{{.spectro.system.kubernetes.version}}' + + templateVersion: "v4" + + # List of additional container args to be passed + args: + ociMonitor: + #- "-dedicated_cache" + #- "-a" + storkDeployment: + #- "--app-initializer=true" + storkScheduler: + #- "--scheduler-name=xyz" + autoPilot: + csiProvisioner: + csiSnapshotter: + csiSnapshotController: + csiResizer: + + # The private registry from where images will be pulled from. When left empty, images will be pulled from the public registry + # Example, imageRegistry: "harbor.company.com/portworx" + imageRegistry: "" + +``` +# Integration With External etcd + +Starting Portworx v2.6.1, you can use the presets feature to toggle between the available ETCD options. + +By default, Portworx is set to use internal KVDB. However, you can integrate Portworx to an external etcd server by following the steps below. + +1. Enable `useExternalKvdb` flag by setting it to *true*. + + +2. Configure the external etcd endpoints in `externalKvdb.endpoints`. + + +If the external etcd server is configured to authenticate via certificates, additionally you may want to set up the following: + +1. Enable `externalKvdb.useCertsForSSL` flag by setting it to *true*. + + +2. Setup certificate related configuration in `externalKvdb.cacert`, `externalKvdb.cert`, and `externalKvdb.key`. + + +:::caution +Make sure to follow the correct indentation style; otherwise, certs will not be imported correctly and will result in Portworx deployment failure. +::: + + +## Etcd Presets + +These are the three types of Presets that can be selected and modified. + +
+ + + + +## Use Internal KVDB + +```yaml +# ECTD selection + useExternalKvdb: false + + # External kvdb related config + externalKvdb: + + useCertsForSSL: false + +vsphere-cloud-controller-manager: + k8sVersion: '{{.spectro.system.kubernetes.version}}' +``` + + + + +## Use Non-Secure KVDB Endpoints + +```yaml +# External kvdb related config + externalKvdb: + # List of External KVDB endpoints to use with Portworx. Used only when useExternalKvdb is true + endpoints: + - etcd:http://100.26.199.167:2379 + - etcd:http://100.26.199.168:2379 + - etcd:http://100.26.199.169:2379 + useCertsForSSL: false + useExternalKvdb: true + vsphere-cloud-controller-manager: + k8sVersion: '{{.spectro.system.kubernetes.version}}' +``` + + + + + +## Use Certs Secured KVDB Endpoints + +```yaml + +# External KVDB Related Configuration + externalKvdb: + # List of External KVDB endpoints to use with Portworx. Used only when useExternalKvdb is true + endpoints: + - etcd:https://100.26.199.167:2379 + - etcd:https://100.26.199.168:2379 + - etcd:https://100.26.199.169:2379 + useCertsForSSL: true + # The CA cert to use for etcd authentication. Make sure to follow the same indentation style as given in the example below + cacert: |- + -----BEGIN CERTIFICATE----- + MIIC3DCCAcQCCQCr1j968rOV3zANBgkqhkiG9w0BAQsFADAwMQswCQYDVQQGEwJV + UzELMAkGA1UECAwCQ0ExFDASBgNVBAcMC1NhbnRhIENsYXJhMB4XDTIwMDkwNDA1 + MzcyNFoXDTI1MDkwMzA1MzcyNFowMDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB + MRQwEgYDVQQHDAtTYW50YSBDbGFyYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC + AQoCggEBALt2CykKKwncWNQqB6Jg0QXd58qeDk40OF4Ti8DewZiZgpQOgA/+GYO7 + bx2/oQyAwjvhpTYjmMN5zORJpE3p9A+o57An1+B9D8gm1W1uABVEmwiKZhXpa+3H + Zlon58GR+kAJPbMIpvWbjMZb4fxZM0BPo0PHzzITccoaTV4+HY4YoDNAVjfZ1cEn + Hu2PUyN8M4RM+HdE4MOQVwqFDq/Fr6mLBMV0PdiwML0tjZ7GSGSjv1hme3mOLvKP + qSWx4hCd5oTegEfneUKKnVhH3JLpSU1NaC6jU3vhyowRNOShi77/uJCnkx3mp9JG + c4YruKrGc997wmUMsIv0owt49Y3dAi8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEA + kEXPdtpOURiZIi01aNJkzLvm55CAhCg57ZVeyZat4/LOHdvo+eXeZ2LHRvEpbakU + 4h1TQJqeNTd3txI0eIx8WxpwbJNxesuTecCWSIeaN2AApIWzHev/N7ZYJsZ0EM2f + +rYVcX8mcOkLeyKDInCKySxIPok8kU4qQLTWytJbeRYhxh7mSMuZXu7mtSh0HdP1 + C84Ml+Ib9uY2lbr1+15MhfSKdpvmLVOibRIrdqQirNhl8uU9I1/ExDxXyR2NBMLW + tzGgsz5dfFDZ4oMqAc8Nqm9LuvmIZYMCunMZedI2h7jGH3LVQXdM81iZCgJdTgKf + i9CNyx+CcwUCkWQzhrHBQA== + -----END CERTIFICATE----- + # The cert to use for etcd authentication. Make sure to follow the same indentation style as given in the example below + cert: |- + -----BEGIN CERTIFICATE----- + MIIDaTCCAlGgAwIBAgIJAPLC+6M3EezhMA0GCSqGSIb3DQEBCwUAMDAxCzAJBgNV + BAYTAlVTMQswCQYDVQQIDAJDQTEUMBIGA1UEBwwLU2FudGEgQ2xhcmEwHhcNMjAw + OTA0MDUzODIyWhcNMjIxMjA4MDUzODIyWjA4MQswCQYDVQQGEwJVUzETMBEGA1UE + CAwKQ2FsaWZvcm5pYTEUMBIGA1UEBwwLU2FudGEgQ2xhcmEwggEiMA0GCSqGSIb3 + DQEBAQUAA4IBDwAwggEKAoIBAQCycmCHPrX0YNk75cu3H5SQv/D1qND2+2rGvv0Z + x28A98KR/Bdchk1QaE+UHYPWejsRWUtEB0Q0KreyxpwH1B4EHNKpP+jV9YqCo5fW + 3QRipWONKgvrSKkjVp/4U/NAAWCHfruB1d9u/qR4utY7sEKHE9AxmbyG+K19mOB2 + FJc7NOsTwN8d6uA5ZfFKmv3VtZzl0+Vq1qFSyIZT9zXYM22YjBAqXk9FVoI0FoQt + zpymQrsajfS+hNX7lSUVKKv3IplpNqSOyTHRF7TWo5NOH+YRWJHLAgZoq2w/yaEi + 5IdjLdb1JXmVUyBgq590WcJZDakwD9SPOHrM9K1vTl9I41q7AgMBAAGjfjB8MEoG + A1UdIwRDMEGhNKQyMDAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEUMBIGA1UE + BwwLU2FudGEgQ2xhcmGCCQCr1j968rOV3zAJBgNVHRMEAjAAMAsGA1UdDwQEAwIE + 8DAWBgNVHREEDzANggtleGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAUOBn + YdTif6WlRpQOj+3quGrafJSNL8TqHkpmgaInSpMVFwDsmPF/HoAVVpX+H3oMY8p7 + Ll4I1Q7szpGRnKpJuzMZp5+gNpmwAz2MdAr7Ae9wH/+o8c2avbfpaHFWVTJZJ6X1 + Q6m6jmXcU0QSS4zj+lyxDNKnXfwVL8hVp0mXRFfPpb4l5ZCBoj4IA2UgyeU7F/nn + nvR5rmg781zc0lUL6X7HaSfQjtPDTSZYFqwE93vSe42JP7NWM96lZHy2IlfE88Wp + jUvOOJjaFVuluaJ78uCydMGEkJmipxH+1YXicH47RQ30tD5QyXxGBi+8jw5z0RiR + ptWD/oDFCiCjlffyzg== + -----END CERTIFICATE----- + # The key to use for etcd authentication. Make sure to follow the same indentation style as given in the example below + key: |- + -----BEGIN RSA PRIVATE KEY----- + MIIEogIBAAKCAQEAsnJghz619GDZO+XLtx+UkL/w9ajQ9vtqxr79GcdvAPfCkfwX + XIZNUGhPlB2D1no7EVlLRAdENCq3ssacB9QeBBzSqT/o1fWKgqOX1t0EYqVjjSoL + 60ipI1af+FPzQAFgh367gdXfbv6keLrWO7BChxPQMZm8hvitfZjgdhSXOzTrE8Df + HergOWXxSpr91bWc5dPlatahUsiGU/c12DNtmIwQKl5PRVaCNBaELc6cpkK7Go30 + voTV+5UlFSir9yKZaTakjskx0Re01qOTTh/mEViRywIGaKtsP8mhIuSHYy3W9SV5 + lVMgYKufdFnCWQ2pMA/Ujzh6zPStb05fSONauwIDAQABAoIBAGHELIKspv/m993L + Pttrn/fWUWwmO6a1hICzLvQqwfRjyeQ1m48DveQp4j+iFBM0EJymsYfp+0IhjVeT + XPUlD/Ts3bYA384pouOEQbJkkPyC5JH40WLtAk3sLeTeCc2tc3eIxa6SwMGNHgtP + QgSdwzVCc7RZKGNCZ7sCQSgwi9LRdyjHU0z0KW3lHqsMkK+yEg8zuH2DpIgvFej8 + KxjwF9ZEsnYDcERdd4TOu2NTEIl5N7F8E6di/CLP/wkfHazjX+qGcuBXjeGhPgdb + fKCcrFxhbavaJRMGLqnOD99l/zvySnA+LUSZ35KB/2ZfLMv71Z9oABTlyiR+76GW + 0lcQjmECgYEA2Jrq2qe7IUZ8CURWJ6rDKgD83LGRCHAWZ+dYvFmdsyfAGMV4+p4V + zKSidiTWAgl7ppiZdaEPu/2cH8uohDkdx2CTSUKPUM6+PBhE4hwSA42RlnIpGWbf + YEqcZ/qeo1IFb1A1YslwdslCVLc3INEbWairBEGis8aAxUaoEiTiPTMCgYEA0ubQ + 05BijLK6XH6YfASDLxwRg6jxn3mBqh+pAwE4tVVJVI9yXnNzN4/WKJJM+mdSGfpv + UcJy86ZcmHNzanZUPWh80U2pyRoVXvVQpY8hdMQ3neya60mc6+Nneba2LflkBVmd + cdoNGO0zAcGb0FKDCF2H3fizDxcoOyUjeKlLnFkCgYABU0lWlyok9PpzUBC642eY + TTM+4nNBuvXYIuk/FclKPFcHj8XCus7lVqiL0oPgtVAlX8+okZi4DMA0zZk1XegZ + vTSJgTfBRdKSKY/aVlOh4+7dHcu0lRWO0EYOuNDZrPnNiY8aEKN4hpi6TfivYbgq + H0cUmpY1RWSqUFlc6w7bUwKBgEMINctoksohbHZFjnWsgX2RsEdmhRWo6vuFgJSB + 6OJJrzr/NNysWSyJvQm8JldYS5ISNRuJcDvc3oVd/IsT/QZflXx48MQIVE6QLgfR + DFMuonbBYyPxi7y11Ies+Q53u8CvkQlEwvDvQ00Fml6GOzuHbs2wZEkhlRnnXfTV + 6kBRAoGAP9NUZox5ZrwkOx7iH/zEx3X3qzFoN/zSI2iUi2XRWaglGbNAxqX5/ug8 + xJIi1Z9xbsZ/3cPEdPif2VMdvIy9ZSsBwIEuzRf8YNw6ZGphsO95FKrgmoqA44mm + WsqUCBt5+DnOaDyvMkokP+T5tj/2LXemuIi4Q5nrOmw/WwVGGGs= + -----END RSA PRIVATE KEY----- + useExternalKvdb: true +vsphere-cloud-controller-manager: + k8sVersion: '{{.spectro.system.kubernetes.version}}' + +``` + + + + +# Environments + +
+ + + + +## vSphere Environment + +For deploying Portworx storage on vSphere environments, make sure to configure the following properties in the pack: + +* vSphere Configuration file + + +* Storage Type + + +* Kubernetes Version + +### vSphere Manifest + +Additional parameters for the manifest is as follows: + +
+ +```yaml + +# VSphere cloud configurations +vsphereConfig: + insecure: "true" + host: "" + port: "443" + datastorePrefix: "datastore" + installMode: "shared" + userName: "" + password: "" + # Enter the name of the secret which has vsphere user credentials (Use keys VSPHERE_USER, VSPHERE_PASSWORD) + userCredsSecret: "" +``` +
+ +## Using Secrets for vSphere User Credentials + +Portworx pack values allow you to configure vSphere user credentials in two ways: + + +1. Username & password - (`portworx.vsphereConfig.userName` and `portworx.vsphereConfig.password`). + + +2. Secret - (`portworx.vsphereConfig.userCredsSecret` is available with v2.6.1 and above). + + +If you chose the latter, make sure to create the secret in the target cluster manually or by bringing your own (BYO) manifest Add-on pack. + +
+ +:::caution +Until the secret is created in the cluster, Portworx deployments might fail to run. When secret is configured, reconciliation should recover Portworx. +::: + +Secret can be created using the spec below, + +
+ + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: px-vsphere-secret + namespace: kube-system +type: Opaque +data: + VSPHERE_USER: "b64 encoded admin username" + VSPHERE_PASSWORD: "b64 encoded admin password" +``` +and this secret can be referenced in the Portworx pack values as shown below: + +
+ +``` +manifests: + portworx: + vsphereConfig: + userCredsSecret: "px-vsphere-secret" +``` + +Ensure to follow the correct indentation style; otherwise, certificates will not be imported correctly and resulting in a Portworx deployment failure. + +
+ + + +## AWS Environment +Palette provisions Portworx in an AWS environment. The following are the packs supported: +
+ +### Packs Supported + + + + +**portworx-aws-2.9** + + + + + +**portworx-aws-2.10** + + + + + +
+ +### Prerequisites + +To deploy Portworx in an AWS environment, have the following prerequisites in place. + + +* Ensure the Portworx Nodes have the TCP ports open at **9001-9022**. + + +* Ensure there is an open UDP port at **9002**. + + +* Apply the following policy to the **User** in AWS: + +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:ModifyVolume", + "ec2:DetachVolume", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeTags", + "ec2:DescribeVolumeAttribute", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVolumeStatus", + "ec2:DescribeVolumes", + "ec2:DescribeInstances", + "autoscaling:DescribeAutoScalingGroups" + ], + "Resource": [ + "*" + ] + } + ] +} +``` + +
+ +## AWS Manifest + +```yaml +manifests: + portworx: + + # The namespace to install Portworx resources + namespace: "portworx" + + # Portworx storage type and size + storageType: "type=gp3,size=150" + + # Max storage nodes per zone + maxStorageNodesPerZone: 3 + + # Node recovery timeout in seconds + nodeRecoveryTimeout: 1500 + + # Portworx storage class config + storageClass: + enabled: true + isDefaultStorageClass: true + allowVolumeExpansion: true + reclaimPolicy: Retain + volumeBindingMode: Immediate + parameters: + repl: "3" + priority_io: "high" + #sharedv4: true + + # Kubernetes version. + k8sVersion: '{{.spectro.system.kubernetes.version}}' + + templateVersion: "v4" + + # List of additional container args to be passed + args: + ociMonitor: + #- "-dedicated_cache" + #- "-a" + storkDeployment: + #- "--app-initializer=true" + storkScheduler: + #- "--scheduler-name=xyz" + autoPilot: + csiProvisioner: + csiSnapshotter: + csiSnapshotController: + csiResizer: + + # The private registry from where images will be pulled from. When left empty, images will be pulled from the public registry + # Example, imageRegistry: "harbor.company.com/portworx" + imageRegistry: "" + + # ECTD selection + useExternalKvdb: false + + # External kvdb related config + externalKvdb: + + useCertsForSSL: false +``` + +
+ + +
+ +
+ +
+ +## References + +- [Portworx Install with Kubernetes](https://docs.portworx.com/portworx-install-with-kubernetes/) + +- [Lighthouse](https://legacy-docs.portworx.com/enterprise/lighthouse-new.html) + +- [Installation Prerequisites](https://docs.portworx.com/install-portworx/prerequisites/) + +- [Install Portworx on AWS ASG using the DaemonSet](https://docs.portworx.com/install-portworx/cloud/aws/aws-asg/daemonset/) diff --git a/docs/docs-content/integrations/portworx_operator.md b/docs/docs-content/integrations/portworx_operator.md new file mode 100644 index 0000000000..e0fdf44f41 --- /dev/null +++ b/docs/docs-content/integrations/portworx_operator.md @@ -0,0 +1,621 @@ +--- +sidebar_label: 'Portworx /w Operator' +title: 'Portworx Operator' +description: 'Portworx storage CSI for all use cases' +hide_table_of_contents: true +type: "integration" +category: ['storage', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/csi-portworx/blobs/sha256:e27bc9aaf22835194ca38062061c29b5921734eed922e57d693d15818ade7486?type=image/png' +tags: ['packs', 'portworx', 'storage'] +--- + +[Portworx](https://portworx.com/) is a software-defined persistent storage solution designed and purpose-built for applications deployed as containers via container orchestrators such as Kubernetes. You can use Palette to install Portworx on a cloud platform, on-premises, or at the edge. + +## Versions Supported + + + + + +* **2.11.x** + + + + + +## Prerequisites + +For deploying Portworx with Operator for Kubernetes, make sure to configure the properties in the pack: +
+ +* Have at least three nodes with the proper [hardware, software, and network requirements](https://docs.portworx.com/install-portworx/prerequisites). + +* Ensure you use a supported Kubernetes version (1.19 or above). + +* Identify and set up the storageType. + +
+ +## Contents + +The default installation of Portworx /w Operator will deploy the following components in the Kubernetes cluster: +
+ +* Portworx Operator + +* `StorageCluster` resource that tells the Operator how to deploy & configure Portworx + +* `StorageClass` resource for dynamic provisioning of PersistentVolumes using the portworx-volume provisioner + +* [Stork](https://github.com/libopenstorage/stork) and [Stork on Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/stork/) + + +Optionally, you can enable [Lighthouse](https://legacy-docs.portworx.com/enterprise/lighthouse-new) for essential monitoring of the Portworx cluster. + +
+ +## Parameters + +### Charts - Portworx: + +```yaml +charts: + portworx-generic: + + license: + # Valid options for "type" are: essentials, saas, enterprise + # If you want to deploy the PX Enterprise Trial version, or need manual offline activation, + # select the "enterprise" type and set "activateLicense" to false. + type: essentials + # The next block only gets used if the type is set to "essentials" + essentials: + # Base64-decoded value of the px-essen-user-id value in the px-essential secret + # Find your Essentials Entitlement ID at https://central.portworx.com/profile + userId: 1234abcd-12ab-12ab-12ab-123456abcdef + # Base64-decoded value of the px-osb-endpoint value in the px-essential secret + # Leave at the default value unless there are special circumstances + endpoint: https://pxessentials.portworx.com/osb/billing/v1/register + # The next block only gets used if the type is set to "saas" + saas: + key: + # The next block only gets used if the type is set to "enterprise" + enterprise: + activateLicense: true + activationId: + # customLicenseServer: + # url: http://hostname:7070/fne/bin/capability + # importUnknownCa: true + # licenseBorrowInterval: 1w15m + # addFeatures: + # - feature1 + # - feature2 + + storageCluster: + # When autoGenerateName is true, a name of type "px-cluster-1234abcd-12ab-12ab-12ab-123456abcdef" is generated and the "name" field is ignored + autoGenerateName: false + name: "px-{{.spectro.system.cluster.name}}" + # annotations: + # If you need additional annotations, specify them here + spec: {} + # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here + + storageClass: + name: spectro-storage-class + isDefaultStorageClass: true + # annotations: + # If you need additional annotations, specify them here + allowVolumeExpansion: true + # Delete or Retain + reclaimPolicy: Delete + # WaitForFirstConsumer or Immediate + volumeBindingMode: WaitForFirstConsumer + parameters: + repl: "3" + priority_io: "high" + # sharedv4: true + # Add additional parameters as needed (https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/create-pvcs/dynamic-provisioning/) +``` +# License Model + +This pack can install Portworx in three different licensing modes: + +* **Essentials**: a free Portworx license with limited functionality that allows you to run small production or proof-of-concept workloads. Essentials limits capacity and advanced features, but otherwise functions the same way as the fully-featured Portworx Enterprise version of Portworx. + + +* **Enterprise**: the fully featured version of Portworx. If you install this model without a valid key, Portworx will automatically enter a 30-day trial mode. + + +* **Enterprise SaaS PAYG**: the fully featured version of Portworx but using a SaaS license key that allows unlimited use and in-arrears billing. If you install this model without a valid key, Portworx will automatically enter a 30-day trial mode. + + +Use the presets in the pack user interface to select which license model you want to use, then update the `charts.portworx-generic.license` section for your chosen license model. + +
+ + + + +```yaml + license: + type: essentials + essentials: + # Base64-decoded value of the px-essen-user-id value in the px-essential secret + # Find your Essentials Entitlement ID at https://central.portworx.com/profile + userId: 1234abcd-12ab-12ab-12ab-123456abcdef + # Base64-decoded value of the px-osb-endpoint value in the px-essential secret + # Leave at the default value unless there are special circumstances + endpoint: https://pxessentials.portworx.com/osb/billing/v1/register +``` + + + + +```yaml + license: + type: saas + saas: + key: +``` + + + + + +```yaml + license: + type: enterprise + enterprise: + activateLicense: true + activationId: + # customLicenseServer: + # url: http://hostname:7070/fne/bin/capability + # importUnknownCa: true + # licenseBorrowInterval: 1w15m + # addFeatures: + # - feature1 + # - feature2 +``` + + + + + +## Storage Specification + +This pack can install Portworx in various different storage environment: + +* **Using existing disks (generic)**: This mode does not integrate with any particular storage solution, it just uses existing disks available on the nodes. + + +* **AWS Cloud Storage**: This mode integrates with Amazon EBS block volumes and allows EKS and EC2 kubernetes clusters to dynamically attach EBS volumes to worker nodes for Portworx. + + +* **Azure Cloud Storage**: This mode integrates with Azure block storage and allows AKS and regular Azure kubernetes clusters to dynamically attach Azure block storage to worker nodes for Portworx. + + +* **Google Cloud Storage**: This mode integrates with Google persistent disks and allows GKE and regular Google kubernetes clusters to dynamically attach persistent disks to worker nodes for Portworx. + + +* **VMware vSphere Datastores**: This mode integrates with VMware vSphere storage and allows kubernetes clusters on vSphere to dynamically attach vSAN and regular Datastore disks to worker nodes for Portworx. + + +* **Pure Storage Flash Array**: This mode integrates with Pure Storage Flash Arrays and allows kubernetes clusters to dynamically attach Flash Array disks over iSCSI to worker nodes for Portworx. + + +Use the presets in the pack user interface to select which storage specification you want to use, then update the `charts.portworx-generic.storageCluster` section to your specific needs. + +
+ + + + +```yaml + storageCluster: + spec: + # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here + image: portworx/oci-monitor:2.11.2 + imagePullPolicy: Always + kvdb: + internal: true + # endpoints: + # - etcd:https://etcd.company.domain:2379 + # authSecret: px-kvdb-auth + storage: + useAll: true + journalDevice: auto + secretsProvider: k8s + stork: + enabled: true + args: + webhook-controller: "true" + autopilot: + enabled: true + csi: + enabled: true + monitoring: + prometheus: + enabled: false + exportMetrics: false +``` + + + + +```yaml + storageCluster: + annotations: + portworx.io/is-eks: "true" + spec: + # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here + image: portworx/oci-monitor:2.11.2 + imagePullPolicy: Always + kvdb: + internal: true + # endpoints: + # - etcd:https://etcd.company.domain:2379 + # authSecret: px-kvdb-auth + cloudStorage: + deviceSpecs: + - type=gp2,size=150 + kvdbDeviceSpec: type=gp2,size=150 + secretsProvider: k8s + stork: + enabled: true + args: + webhook-controller: "true" + autopilot: + enabled: true + csi: + enabled: true + monitoring: + prometheus: + enabled: false + exportMetrics: false +``` +### Prerequisites + +To deploy Portworx in an AWS environment, ensure the following IAM Policy is created in AWS and attached to the correct IAM Role: +
+ +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:ModifyVolume", + "ec2:DetachVolume", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeTags", + "ec2:DescribeVolumeAttribute", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVolumeStatus", + "ec2:DescribeVolumes", + "ec2:DescribeInstances", + "autoscaling:DescribeAutoScalingGroups" + ], + "Resource": [ + "*" + ] + } + ] +} +``` + +* When deploying a regular Kubernetes cluster on AWS EC2 using Palette, attach the policy to the `nodes.cluster-api-provider-aws.sigs.k8s.io` IAM Role. Or alternatively, edit the AWS cloud account in Palette, enable the `Add IAM Policies` option, and select the Portworx IAM Policy described above. This will automatically attach the IAM Policy to the correct IAM Role. + +* When deploying an EKS cluster, use the `managedMachinePool.roleAdditionalPolicies` option in the `kubernetes-eks` pack to automatically attach the Portworx IAM Policy to the EKS worker pool IAM role that Palette will manage for you. For example: + +```yaml +managedMachinePool: + roleAdditionalPolicies: + - "arn:aws:iam::012345678901:policy/my-portworx-policy" +``` + +
+ + +
+ + +```yaml + storageCluster: + annotations: + portworx.io/is-aks: "true" + spec: + # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here + image: portworx/oci-monitor:2.11.2 + imagePullPolicy: Always + kvdb: + internal: true + # endpoints: + # - etcd:https://etcd.company.domain:2379 + # authSecret: px-kvdb-auth + cloudStorage: + deviceSpecs: + - type=Premium_LRS,size=150 + kvdbDeviceSpec: type=Premium_LRS,size=150 + secretsProvider: k8s + stork: + enabled: true + args: + webhook-controller: "true" + autopilot: + enabled: true + csi: + enabled: true + monitoring: + prometheus: + enabled: false + exportMetrics: false + env: + - name: AZURE_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: px-azure + key: AZURE_CLIENT_SECRET + - name: AZURE_CLIENT_ID + valueFrom: + secretKeyRef: + name: px-azure + key: AZURE_CLIENT_ID + - name: AZURE_TENANT_ID + valueFrom: + secretKeyRef: + name: px-azure + key: AZURE_TENANT_ID + azureSecret: + tenantId: "your_azure_tenant_id" + clientId: "your_azure_client_id" + clientSecret: "your_client_secret" +``` + + + + +```yaml + storageCluster: + annotations: + portworx.io/is-gke: "true" + spec: + # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here + image: portworx/oci-monitor:2.11.2 + imagePullPolicy: Always + kvdb: + internal: true + # endpoints: + # - etcd:https://etcd.company.domain:2379 + # authSecret: px-kvdb-auth + cloudStorage: + deviceSpecs: + - type=pd-standard,size=150 + kvdbDeviceSpec: type=pd-standard,size=150 + secretsProvider: k8s + stork: + enabled: true + args: + webhook-controller: "true" + autopilot: + enabled: true + csi: + enabled: true + monitoring: + prometheus: + enabled: false + exportMetrics: false +``` + + + + +```yaml + storageCluster: + spec: + # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here + image: portworx/oci-monitor:2.11.2 + imagePullPolicy: Always + kvdb: + internal: true + # endpoints: + # - etcd:https://etcd.company.domain:2379 + # authSecret: px-kvdb-auth + cloudStorage: + deviceSpecs: + - type=lazyzeroedthick,size=150 + kvdbDeviceSpec: type=lazyzeroedthick,size=32 + secretsProvider: k8s + stork: + enabled: true + args: + webhook-controller: "true" + autopilot: + enabled: true + csi: + enabled: true + monitoring: + prometheus: + enabled: false + exportMetrics: false + env: + - name: VSPHERE_INSECURE + value: "true" + - name: VSPHERE_USER + valueFrom: + secretKeyRef: + name: px-vsphere-secret + key: VSPHERE_USER + - name: VSPHERE_PASSWORD + valueFrom: + secretKeyRef: + name: px-vsphere-secret + key: VSPHERE_PASSWORD + - name: VSPHERE_VCENTER + value: "my-vcenter.company.local" + - name: VSPHERE_VCENTER_PORT + value: "443" + - name: VSPHERE_DATASTORE_PREFIX + value: "datastore" + - name: VSPHERE_INSTALL_MODE + value: "shared" + vsphereSecret: + user: "username_for_vCenter_here" + password: "your_password" +``` + + + + +```yaml + storageCluster: + spec: + # Use the Portworx Spec Builder at https://central.portworx.com/landing/login to define custom configurations, then paste the spec section here + image: portworx/oci-monitor:2.11.2 + imagePullPolicy: Always + kvdb: + internal: true + # endpoints: + # - etcd:https://etcd.company.domain:2379 + # authSecret: px-kvdb-auth + cloudStorage: + deviceSpecs: + - size=150 + kvdbDeviceSpec: size=32 + secretsProvider: k8s + stork: + enabled: true + args: + webhook-controller: "true" + autopilot: + enabled: true + csi: + enabled: true + monitoring: + prometheus: + enabled: false + exportMetrics: false + env: + - name: PURE_FLASHARRAY_SAN_TYPE + value: "ISCSI" +``` + +To activate the Pure Flash Array integration, you will need to create a `secret` on your cluster named `px-pure-secret` that contains your Flash Array license. You can do this by running the below kubectl command: + +``` +kubectl create secret generic px-pure-secret --namespace kube-system --from-file=pure.json= +``` + + + +
+ +## Integration With External Etcd + +Portworx Enterprise supports multiple Etcd scenarios. + +Portworx will default use its internal key-value store (KVDB). However, you can integrate Portworx to an external Etcd server by following the steps below. +
+ +1. Select the `Use External Kvdb over HTTP` or `Use External Kvdb over SSL` preset in the pack user interface. If your external Etcd server requires certificate authentication, you need the `Use External Kvdb over SSL` preset. + + +2. Configure the external Etcd endpoint(s) in `charts.portworx-generic.storageCluster.spec.kvdb.endpoints`. + + +3. When using the `Use External Kvdb over SSL` preset, leave the `charts.portworx-generic.storageCluster.spec.kvdb.endpoints` option to its default of `px-kvdb-auth` since that is the name of the secret that will be created by this pack. + + +When using the `Use External Kvdb over SSL` preset, you additionally need to configure the `charts.portworx-generic.externalKvdb` section: +
+ +1. Set `charts.portworx-generic.externalKvdb.useCertsForSSL` to `true` to enable certificate authentication. + + +2. Input your SSL certificates in the `cacert`, `cert`, and `key` sections of `charts.portworx-generic.externalKvdb`. The preset will give you cropped example values that you can overwrite with your actual PEM certificates. + + +:::caution +Make sure to follow the provided indentation style; otherwise, certs will not be imported correctly and will result in Portworx deployment failure. +::: + + +## Kvdb and Etcd Presets + +These are the three types of Presets that can be selected and modified. The pack defaults to the `Use Internal Kvdb` option. Change to a different preset if you need to connect to an external Etcd server. + +
+ + + + +```yaml + storageCluster: + spec: + kvdb: + internal: true +``` + + + + +```yaml + storageCluster: + spec: + kvdb: + endpoints: + - etcd:http://etcd.company.domain:2379 +``` + + + + + +```yaml + storageCluster: + spec: + kvdb: + endpoints: + - etcd:http://etcd.company.domain:2379 + authSecret: px-kvdb-auth + + # External kvdb related config, only used if storageCluster.spec.kvdb.internal != true + externalKvdb: + useCertsForSSL: true + # The CA cert to use for etcd authentication. Make sure to follow the same indentation style as given in the example below + cacert: |- + -----BEGIN CERTIFICATE----- + MIIC3DCCAcQCCQCr1j968rOV3zANBgkqhkiG9w0BAQsFADAwMQswCQYDVQQGEwJV + < .. > + i9CNyx+CcwUCkWQzhrHBQA== + -----END CERTIFICATE----- + # The cert to use for etcd authentication. Make sure to follow the same indentation style as given in the example below + cert: |- + -----BEGIN CERTIFICATE----- + MIIDaTCCAlGgAwIBAgIJAPLC+6M3EezhMA0GCSqGSIb3DQEBCwUAMDAxCzAJBgNV + < .. > + ptWD/oDFCiCjlffyzg== + -----END CERTIFICATE----- + # The key to use for etcd authentication. Make sure to follow the same indentation style as given in the example below + key: |- + -----BEGIN RSA PRIVATE KEY----- + MIIEogIBAAKCAQEAsnJghz619GDZO+XLtx+UkL/w9ajQ9vtqxr79GcdvAPfCkfwX + < .. > + WsqUCBt5+DnOaDyvMkokP+T5tj/2LXemuIi4Q5nrOmw/WwVGGGs= + -----END RSA PRIVATE KEY----- +``` + + + + +
+ +## References + +- [Portworx Install with Kubernetes](https://docs.portworx.com/portworx-install-with-kubernetes/) +- [Lighthouse](https://docs.portworx.com/reference/lighthouse/) +- [Installation Prerequisites](https://docs.portworx.com/install-portworx/prerequisites/) diff --git a/docs/docs-content/integrations/prismacloud.md b/docs/docs-content/integrations/prismacloud.md new file mode 100644 index 0000000000..a670679251 --- /dev/null +++ b/docs/docs-content/integrations/prismacloud.md @@ -0,0 +1,27 @@ +--- +sidebar_label: 'prisma-cloud-compute' +title: 'Prisma Cloud Compute' +description: 'prism-cloud-compute Security pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['security', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/prismacloud/blobs/sha256:9ddb035af0e9f299e5df178ebb3153e90383a5e42ded2c1a3f6c9470dd851c12?type=image/png' +tags: ['packs', 'prismacloud', 'security'] +--- + +Prisma Cloud Compute is a cloud workload protection platform (CWPP) offering protection for hosts, containers, and server-less deployments in any cloud, and across the software lifecycle. Prisma Cloud Compute is cloud-native and API-enabled. It can protect tenant workloads, regardless of the underlying compute technology or the cloud deployment. +## Versions Supported + + + + + +**20.9.0** + + + + +## References + +- [Prisma Cloud Compute Documentation](https://docs.paloaltonetworks.com/prisma/prisma-cloud) diff --git a/docs/docs-content/integrations/prometheus-agent.md b/docs/docs-content/integrations/prometheus-agent.md new file mode 100644 index 0000000000..fd23c7c4b0 --- /dev/null +++ b/docs/docs-content/integrations/prometheus-agent.md @@ -0,0 +1,141 @@ +--- +sidebar_label: 'Prometheus Agent' +title: 'Prometheus Agent' +description: 'Prometheus Agent Monitoring Pack' +type: "integration" +hide_table_of_contents: true +category: ['monitoring', 'amd64', 'arm64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/prometheus-operator/blobs/sha256:64589616d7f667e5f1d7e3c9a39e32c676e03518a318924e123738693e104ce0?type=image/png' +tags: ['packs', 'prometheus-agent', 'monitoring'] +--- + +Prometheus is an open-source monitoring and alerting system that is designed to collect and analyze metrics from various systems and services. + +Prometheus is built around a time-series database that stores metrics data. It uses a flexible querying language called PromQL to extract and process metrics data. Prometheus also has a powerful alerting system that can be used to send notifications when specific conditions are met. + +Prometheus can be used to monitor a wide range of systems and services, including servers, containers, databases, and applications. It can be deployed in a variety of environments, including on-prem, cloud, and hybrid setups. + +The Prometheus Agent pack works in tandem with the [Prometheus Operator pack](prometheus-operator.md). Check out the guides [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) and [Enable Monitoring on Host Cluster](../clusters/cluster-management/monitoring/deploy-agent.md) to learn how to create a monitoring stack with Prometheus for your Palette environment. + + +## Versions Supported + +**19.0.X** + +## Prerequisites + +* A host cluster that has the [Prometheus Operator pack](prometheus-operator.md) installed. + +## Parameters + +The Prometheus agent supports all the parameters exposed by the Prometheus Helm Chart. Refer to the [Prometheus Helm Chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#configuration) documentation for details. + +From a Palette perspective, you must provide a value for the `remoteWrite.url` parameter shown in the following example. + +
+ +```yaml +charts: + prometheus: + server: + remoteWrite: + - url: "" +``` + +The `remoteWrite.url` is exposed by the [Prometheus Operator pack](prometheus-operator.md) when installed in a cluster. You can find the Prometheus server URL by reviewing the details of the Kubernetes cluster hosting the Prometheus server. Use the URL exposed by the Prometheus service. + +The following image displays a host cluster with the Prometheus Operator pack installed. Use the URL exposed for port 9090 to populate the `remoteWrite.url` parameter. + +![A view of the cluster details page with a highlighted box around the Prometheus service URL](/integrations_prometheus-agent_cluster-detail-view.png) + +
+ +:::caution + +The Prometheus server URL must be in the format `http://HOST:PORT/api/v1/write`. +Example: `http://a2c938972938b4f0daee5f56edbd40af-1690032247.us-east-1.elb.amazonaws.com:9090/api/v1/write` + +::: + +If the Prometheus server is configured with authentication, add the authentication parameters. Replace `` and `` with the actual credential values. + +
+ +```yaml +charts: + prometheus: + server: + remoteWrite: + - url: "" + remote_timeout: "5s" + basic_auth: + username: "" + password: +``` + +## Usage + +The Prometheus agent pack works out-of-the-box and only requires you to provide a Prometheus server URL. Add the Prometheus agent pack to a cluster profile to get started with Prometheus. You can create a new cluster profile that has the Prometheus agent as an add-on pack or you can [update an existing cluster profile](../cluster-profiles/task-update-profile.md) by adding the Prometheus agent pack. + + +Log in to the Grafana dashboard to view and create dashboards. You can find the Grafana dashboard URL by reviewing the details of the Kubernetes cluster hosting the Prometheus server. Use the URL exposed by the **prometheus-operator-kube-prometheus-stack-grafana** service. + +![The URL of the service prometheus-operator-kube-prometheus-stack-grafana](/integrations_prometheus-agent_cluster-detail-view-grafana.png) + + +Palette exposes a set of Grafana dashboards by default. You can find the Spectro Cloud dashboards by navigating to Grafana's left **Main Menu** > **Dashboards** and expanding the **Spectro Cloud** folder. + +The following dashboards are available by default: + +- Kubernetes/System/API Server: A view of the resources and status of the Kubernetes cluster hosting the Prometheus server. + + +- Kubernetes/Views/Global: An aggregate view of all the resources used by Kubernetes clusters. + + +- Kubernetes/Views/Namespaces: An aggregate view of all the resources used by a specific Kubernetes namespace. + + +- Kubernetes/Views/Nodes: A view of all nodes with the Prometheus agent installed. + + +- Kubernetes/Views/Pods: A view of all the pods in a node with the Prometheus agent installed. + +
+ +:::info + +Use the filters to narrow down the information displayed. All Palette dashboards include the **project** and **cluster** filter. + +::: + + +We encourage you to check out the [Grafana](https://grafana.com/tutorials/) tutorials and learning resources to learn more about Grafana. + +## Terraform + +You can retrieve details about the Prometheus agent pack by using the following Terraform code. + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "pack-info" { + name = "prometheus-agent" + version = "19.0.2" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +# References + +- [Prometheus Operator pack](prometheus-operator.md) + + +- [Prometheus Helm Chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus#configuration) + + +- [Grafana Tutorials](https://grafana.com/tutorials/) diff --git a/docs/docs-content/integrations/prometheus-cluster-metrics.md b/docs/docs-content/integrations/prometheus-cluster-metrics.md new file mode 100644 index 0000000000..e2c90822e6 --- /dev/null +++ b/docs/docs-content/integrations/prometheus-cluster-metrics.md @@ -0,0 +1,66 @@ +--- +sidebar_label: 'Prometheus Cluster Metrics' +title: 'Prometheus Cluster Metrics' +description: "Use the Prometheus Cluster Metrics addon pack to expose Palette resource metrics" +type: "integration" +hide_table_of_contents: true +category: ['monitoring','amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/prometheus-operator/blobs/sha256:64589616d7f667e5f1d7e3c9a39e32c676e03518a318924e123738693e104ce0?type=image/png' +tags: ['packs', 'prometheus-cluster-metrics', 'monitoring'] +--- + + +The Prometheus Cluster Metrics pack exposes Palette-specific host cluster metrics to Prometheus. You can use this data to learn about the state of your clusters, resource utilization, and more. Use the [Spectro Cloud Grafana Dashboards](grafana-spectrocloud-dashboards.md) pack to access the metric data through Grafana dashboards. + + +## Versions Supported + +**3.4.X** + + +## Prerequisites + +* A host cluster that has the [Prometheus Operator pack](prometheus-operator.md) `v45.4.X` or greater installed. Check out the [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) for instructions on how to deploy a monitoring stack. + + +* A cluster profile with the [Prometheus Agent](prometheus-agent.md) pack `v19.0.X` or greater installed. + +## Usage + +The Prometheus Cluster Metrics requires no additional configuration and is designed to work out-of-the-box. + +You can learn how to add the Prometheus Cluster Metrics to your cluster by following the steps outlined in the [Enable Monitoring on Host Cluster](../clusters/cluster-management/monitoring/deploy-agent.md). + +Use the [Spectro Cloud Grafana Dashboards](grafana-spectrocloud-dashboards.md) pack to access the metric data through Grafana dashboards. + +## Terraform + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "cluster-metrics" { + name = "spectro-cluster-metrics" + version = "3.3.0" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +## References + +- [Enable Monitoring on Host Cluster](../clusters/cluster-management/monitoring/deploy-agent.md). + + +- [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) + + +- [Prometheus Operator pack](prometheus-operator.md) + + +- [Prometheus Agent](prometheus-agent.md) + + +- [Spectro Cloud Grafana Dashboards](grafana-spectrocloud-dashboards.md) \ No newline at end of file diff --git a/docs/docs-content/integrations/prometheus-operator.md b/docs/docs-content/integrations/prometheus-operator.md new file mode 100644 index 0000000000..c1e05fd186 --- /dev/null +++ b/docs/docs-content/integrations/prometheus-operator.md @@ -0,0 +1,1005 @@ +--- +sidebar_label: 'Prometheus Operator' +title: 'Prometheus Operator' +description: 'Prometheus Operator Monitoring pack in Spectro Cloud' +type: "integration" +hide_table_of_contents: true +category: ['monitoring', 'amd64', 'arm64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/prometheus-operator/blobs/sha256:64589616d7f667e5f1d7e3c9a39e32c676e03518a318924e123738693e104ce0?type=image/png' +tags: ['packs', 'prometheus-operator', 'monitoring'] +--- + +Prometheus is an open-source monitoring system that is designed to collect and analyze metrics from various sources, such as applications, servers, and networks. It is widely used in the DevOps world to monitor the health and performance of applications and infrastructure. Prometheus stores metrics in a time-series database and provides a query language for analyzing the data. It also includes a powerful alerting system that can notify operators when thresholds are breached. + +The Prometheus Operator is a tool that simplifies the deployment and management of Prometheus in a Kubernetes cluster. It automates tasks such as configuring Prometheus, creating and managing Prometheus rules and alerts and scaling Prometheus instances based on demand. The Operator uses Kubernetes custom resources to define and manage Prometheus instances and related resources, such as ServiceMonitors, which enable Prometheus to discover and monitor services running in the cluster. + + +You can use the Prometheus Operator to create a monitoring stack that other host clusters point to and forward metrics to. Check out the guide [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) to learn how to create a monitoring stack with Prometheus for your Palette environment. + +
+ +:::info + +We recommend you use version v44.3.x or greater moving forward for a simplified and improved user experience when creating a monitoring stack for your architecture. Starting with version v44.3.x the remote monitoring feature is supported. Check out the [Prometheus Remote Write Tuning](https://prometheus.io/docs/practices/remote_write/) to learn more about the remote monitoring feature. + +::: + +## Versions Supported + +
+ + + + + +## Prerequisites + +* Kubernetes v1.16 or greater. + + +* The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: + + Recommended size: + - 8 CPU + - 16 GB Memory + - 20 GB Storage + + + + As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: + + Each added agent: + - 0.1 CPU + - 250 MiB Memory + - 1 GB Storage + + + Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. + +## Parameters + +The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) documentation for details. + + +The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: + +
+ +```yaml +charts: + kube-prometheus-stack: + grafana: + adminPassword: "" +``` + +Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. + +Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. + +![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) + +Review the usage section below to learn more about each preset option. + +
+ +## Usage + +Check out the guide [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) to learn how to create a monitoring stack with Prometheus for your Palette environment. + +
+ +#### Email Alerts + +You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. + +
+ +```yaml +charts: + kube-prometheus-stack: + alertmanager: + config: + receivers: + - name: email-alert + email_configs: + - to: @.com + send_resolved: true + from: @.com + smarthost: smtp..com:587 + auth_username: @.com + auth_identity: @.com + auth_password: +``` + +Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. + +
+ +#### Grafana Ingress + +You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. + +If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. + + +Toggle the **Enable** button to enable the use of Ingress. + +
+ + +#### Thanos SideCar + +[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. + + +Toggle the **Enable** button to enable the use of Thanos. + +
+ +#### Object Store + +Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. + +
+ +```yaml +charts: + kube-prometheus-stack: + prometheus: + prometheusSpec: + thanos: + objstoreConfig: +``` + +#### Thanos Ruler Object Store + +By default, Thanos Ruler event data is saved in object storage specified for Thanos, but you can specify a different object storage for event data. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. + + +
+ +#### Remote Monitoring + +You can configure the Prometheus server to accept metrics from Prometheus agents and become a centralized aggregation point for all Kubernetes metrics. Enabling this feature will expose port 9090 of the *prometheus-operator-prometheus* service. Use the generated service URL to provide other Kubernetes clusters with the installed [Prometheus Agent](prometheus-agent.md) so that cluster metrics can be forwarded to the Prometheus server. + + + +The remote monitoring feature is configured with defaults to help you consume this feature out-of-the-box. You can change any configuration related to remote monitoring to fine-tune settings for your environment. + +Refer to the [Prometheus Remote Write Tuning](https://prometheus.io/docs/practices/remote_write/) resource to learn more about configuration options. + + +To get started with remote monitoring, check out the [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) guide. + +
+ +#### Palette Resources Monitoring + +You can access internal Palette metrics in Grafana by adding the [Prometheus Cluster Metrics](prometheus-cluster-metrics.md) pack to all your client clusters. Refer to the [Enable Monitoring on Host Cluster](../clusters/cluster-management/monitoring/deploy-agent.md) guide to learn more. + +
+ + +#### Persistent Storage + +You can configure the Prometheus Operator to use persistent storage. To enable persistent storage add the following code snippet to the `kube-prometheus-stack.prometheus.prometheusSpec.storageSpec` configuration block in the pack's YAML configuration file. The code snippet below creates a Persistent Volume Claim (PVC) for the Prometheus Operator. + +
+ + +```yaml +kube-prometheus-stack: + prometheus: + prometheusSpec: + storageSpec: + volumeClaimTemplate: + metadata: + name: prom-operator-pvc + spec: + storageClassName: spectro-storage-class + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 5Gi +``` + +### Dependencies + +The Prometheus Operator pack installs the following dependencies: + +* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) +* [Prometheus](https://prometheus.io/) +* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/) + +* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) +* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) +* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) +* Service monitors to scrape internal Kubernetes components + + +
+ + + +## Prerequisites + +* Kubernetes v1.16 or greater. + + +* The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: + + Recommended size: + - 8 CPU + - 16 GB Memory + - 20 GB Storage + + + + As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: + + Each added agent: + - 0.1 CPU + - 250 MiB Memory + - 1 GB Storage + + + Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. + +## Parameters + +The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) documentation for details. + + +The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: + +
+ +```yaml +charts: + kube-prometheus-stack: + grafana: + adminPassword: "" +``` + +Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. + +Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. + +![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) + +Review the usage section below to learn more about each preset option. + +
+ +## Usage + +Check out the guide [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) to learn how to create a monitoring stack with Prometheus for your Palette environment. + +
+ +#### Email Alerts + +You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. + +
+ +```yaml +charts: + kube-prometheus-stack: + alertmanager: + config: + receivers: + - name: email-alert + email_configs: + - to: @.com + send_resolved: true + from: @.com + smarthost: smtp..com:587 + auth_username: @.com + auth_identity: @.com + auth_password: +``` + +Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. + +
+ +#### Grafana Ingress + +You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. + +If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. + + +Toggle the **Enable** button to enable the use of Ingress. + +
+ + +#### Thanos SideCar + +[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. + + +Toggle the **Enable** button to enable the use of Thanos. + +
+ +#### Object Store + +Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. + +
+ +```yaml +charts: + kube-prometheus-stack: + prometheus: + prometheusSpec: + thanos: + objstoreConfig: +``` + +#### Thanos Ruler Object Store + +By default, Thanos Ruler event data is saved in object storage specified for Thanos, but you can specify a different object storage for event data. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. + + +
+ +#### Remote Monitoring + +You can configure the Prometheus server to accept metrics from Prometheus agents and become a centralized aggregation point for all Kubernetes metrics. Enabling this feature will expose port 9090 of the *prometheus-operator-prometheus* service. Use the generated service URL to provide other Kubernetes clusters with the installed [Prometheus Agent](prometheus-agent.md) so that cluster metrics can be forwarded to the Prometheus server. + + + +The remote monitoring feature is configured with defaults to help you consume this feature out-of-the-box. You can change any configuration related to remote monitoring to fine-tune settings for your environment. + +Refer to the [Prometheus Remote Write Tuning](https://prometheus.io/docs/practices/remote_write/) resource to learn more about configuration options. + + +To get started with remote monitoring, check out the [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) guide. + +
+ +#### Palette Resources Monitoring + +You can access internal Palette metrics in Grafana by adding the [Prometheus Cluster Metrics](prometheus-cluster-metrics.md) pack to all your client clusters. Refer to the [Enable Monitoring on Host Cluster](../clusters/cluster-management/monitoring/deploy-agent.md) guide to learn more. + +
+ +### Dependencies + +The Prometheus Operator pack installs the following dependencies: + +* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) +* [Prometheus](https://prometheus.io/) +* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/) + +* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) +* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) +* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) +* Service monitors to scrape internal Kubernetes components + + + +
+ + + +## Prerequisites + +* Kubernetes v1.16 or greater. + + +* The minimum required size for the Prometheus server is 4 CPU, 8 GB Memory, and 10 GB Storage. We recommend the monitoring stack have 1.5x to 2x the minimum required size: + + Recommended size: + - 8 CPU + - 16 GB Memory + - 20 GB Storage. + + + As new clusters with the Prometheus agent are added to your environment, review the resource utilization and consider increasing resources if needed. As the Prometheus documentation recommends, each additional agent requires the following resources from the monitoring stack: + + Each added agent: + - 0.1 CPU + - 250 MiB Memory + - 1 GB Storage. + + Refer to the [Prometheus Operational aspects](https://prometheus.io/docs/prometheus/latest/storage/#operational-aspects) documentation for additional guidance. + +## Parameters + +The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. + +The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: + +
+ +```yaml +charts: + kube-prometheus-stack: + grafana: + adminPassword: "" +``` + +Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. + +Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. + +![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) + +Review the usage section below to learn more about each preset option. + +
+ +## Usage + +Check out the guide [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) to learn how to create a monitoring stack with Prometheus for your Palette environment. + +
+ +#### Email Alerts + +You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. + +
+ +```yaml +charts: + kube-prometheus-stack: + alertmanager: + config: + receivers: + - name: email-alert + email_configs: + - to: @.com + send_resolved: true + from: @.com + smarthost: smtp..com:587 + auth_username: @.com + auth_identity: @.com + auth_password: +``` + +Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. + +
+ +#### Grafana Ingress + +You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. + +If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. + + +Toggle the **Enable** button to enable the use of Ingress. + +
+ + +#### Thanos SideCar + +[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. + + +Toggle the **Enable** button to enable the use of Thanos. + +
+ +#### Object Store + +Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. + +
+ +```yaml +charts: + kube-prometheus-stack: + prometheus: + prometheusSpec: + thanos: + objstoreConfig: +``` + +#### Thanos Ruler Object Store + +By default, Thanos Ruler event data is saved in object storage specified for Thanos, but you can specify a different object storage for event data. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. + + +
+ +#### Remote Monitoring + +You can configure the Prometheus server to accept metrics from Prometheus agents and become a centralized aggregation point for all Kubernetes metrics. Enabling this feature will expose port 9090 of the *prometheus-operator-prometheus* service. Use the generated service URL to provide other Kubernetes clusters with the [Prometheus Agent](prometheus-agent.md) installed so that cluster metrics can be forwarded to the Prometheus server. + + +The remote monitoring feature is configured with defaults to help you consume this feature out-of-the-box. You can change any configuration related to remote monitoring to fine-tune settings for your environment. + +Refer to the [Prometheus Remote Write](https://prometheus.io/docs/practices/remote_write/) resource to learn more about configuration options. + +To get started with remote monitoring, check out the [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) guide. + +### Dependencies + +The Prometheus Operator pack installs the following dependencies: + +* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) +* [Prometheus](https://prometheus.io/) +* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/). +* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) +* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) +* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) +* and the service monitors to scrape internal Kubernetes components. + + +
+ + + +## Prerequisites + +* Kubernetes v1.16 or greater. + +## Parameters + +The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. + +The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: + +
+ +```yaml +charts: + kube-prometheus-stack: + grafana: + adminPassword: "" +``` + +Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. + +Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. + +![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) + +Review the usage section below to learn more about each preset option. + +
+ +#### Email Alerts + +You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. + +
+ +```yaml +charts: + kube-prometheus-stack: + alertmanager: + config: + receivers: + - name: email-alert + email_configs: + - to: @.com + send_resolved: true + from: @.com + smarthost: smtp..com:587 + auth_username: @.com + auth_identity: @.com + auth_password: +``` + +Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. + +
+ +#### Grafana Ingress + +You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. + +If you do not enable the ingress option, by default a service with a load balancer will be created that exposes port 80. + + +Toggle the **Enable** button to enable the use of Ingress. + +
+ + +#### Thanos SideCar + +[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. + +Toggle the **Enable** button to enable the use of Thanos. + +
+ +#### Object Store + +Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. + +
+ +```yaml +charts: + kube-prometheus-stack: + prometheus: + prometheusSpec: + thanos: + objstoreConfig: +``` + +#### Thanos Ruler Object Store + +You can specify a different object storage to store the Thanos Ruler event data. Defaults to the object storage specified for Thanos. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. + +
+ + +### Dependencies + +The Prometheus Operator pack installs the following dependencies: + +* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) +* [Prometheus](https://prometheus.io/) +* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/). +* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) +* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) +* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) +* and the service monitors to scrape internal Kubernetes components. + +
+ + + +## Prerequisites + +* Kubernetes v1.16 or greater. + +## Parameters + +The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. + +The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: + +
+ +```yaml +charts: + kube-prometheus-stack: + grafana: + adminPassword: "" +``` + +Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. + +Additional parameters you should be aware of can be found by expanding the **Presets** view of the pack. You can modify the preset settings during the profile creation process or the cluster deployment process when reviewing the cluster profile. +![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) + +Review the usage section below to learn more about each preset option. + +
+ +#### Email Alerts + +You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. + +
+ +```yaml +charts: + kube-prometheus-stack: + alertmanager: + config: + receivers: + - name: email-alert + email_configs: + - to: @.com + send_resolved: true + from: @.com + smarthost: smtp..com:587 + auth_username: @.com + auth_identity: @.com + auth_password: +``` + +Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. + +
+ +#### Grafana Ingress + +You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. + +If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. + + +Toggle the **Enable** button to enable the use of Ingress. + +
+ + +#### Thanos SideCar + +[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. + + +Toggle the **Enable** button to enable the use of Thanos. + +
+ +#### Object Store + +Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. + +
+ +```yaml +charts: + kube-prometheus-stack: + prometheus: + prometheusSpec: + thanos: + objstoreConfig: +``` + +#### Thanos Ruler Object Store + +You can specify a different object storage to store the Thanos Ruler event data. Defaults to the object storage specified for Thanos. Refer to the [Thanos Ruler](https://prometheus-operator.dev/docs/operator/thanos/?#thanos-ruler) resource to learn more. + +
+ + +### Dependencies + +The Prometheus Operator pack installs the following dependencies: + +* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) +* [Prometheus](https://prometheus.io/) +* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/). +* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) +* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) +* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) +* and the service monitors to scrape internal Kubernetes components. + + +
+ + + +## Prerequisites + +* Kubernetes v1.16 or greater. + +## Parameters + +The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. + +The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: + +
+ +```yaml +charts: + kube-prometheus-stack: + grafana: + adminPassword: "" +``` + +Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. + +Additional parameters you should be aware of can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile or when you deploy the cluster and review the cluster profile. + +![A view of the pack's preset drawer expanded with radio buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) + +Review the usage section below to learn more about each preset option. + +
+ +#### Email Alerts + +You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. + +
+ +```yaml +charts: + kube-prometheus-stack: + alertmanager: + config: + receivers: + - name: email-alert + email_configs: + - to: @.com + send_resolved: true + from: @.com + smarthost: smtp..com:587 + auth_username: @.com + auth_identity: @.com + auth_password: +``` + +Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. + +
+ +#### Grafana Ingress + +You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. + +If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. + + +Toggle the **Enable** button to enable the use of Ingress. + +
+ + +#### Thanos SideCar + +[Thanos](https://prometheus-operator.dev/docs/operator/thanos/) is an open-source system for running large-scale, distributed, and highly available Prometheus setups. Thanos allows Prometheus to store data for extended periods in object storage, such as Amazon S3 or Google Cloud Storage, instead of a local disk. This enables Prometheus to scale horizontally without the risk of using up local storage space. + + +Toggle the **Enable** button to enable the use of Thanos. + +
+ +#### Object Store + +Select the Thanos object storage type you will use. Review the `thanos.objstoreConfig` parameters to configure the use of object storage with Thanos. Refer to the [Thanos Object Storage](https://github.com/thanos-io/thanos/blob/main/docs/storage.md) documentation to learn more about how to configure each object storage. + +
+ +```yaml +charts: + kube-prometheus-stack: + prometheus: + prometheusSpec: + thanos: + objstoreConfig: +``` +### Dependencies + +The Prometheus Operator pack installs the following dependencies: + +* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) +* [Prometheus](https://prometheus.io/) +* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager/). +* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) +* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) +* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) +* and the service monitors to scrape internal Kubernetes components. + + +
+ + + + +## Prerequisites + +* Kubernetes v1.16 or greater. + +## Parameters + +The Prometheus operator supports all the parameters exposed by the kube-prometheus-stack Helm Chart. Refer to the [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stackn) documentation for details. + +The Prometheus Operator pack has one parameter you must initialize `grafana.adminPassword`: + +
+ +```yaml +charts: + kube-prometheus-stack: + grafana: + adminPassword: "" +``` + +Use the `grafana.adminPassword` parameter to assign a password to the Grafana admin user `admin`. + +Additional parameters you should be aware can be found by expanding the **Presets** options. You can modify the preset settings when you create the profile creation or when you deploy the cluster and review the cluster profile. + +![A view of the pack's preset drawer expanded with radion buttons](/integrations_prometheus-operator_operator-preset-view-expanded.png) + +Review the usage section below to learn more about each preset option. + +
+ +#### Email Alerts + +You can configure the Prometheus server to send email alerts to a set of contacts. Toggle the **Email Alerts** button to enable email alerting. Update the `alertmanager.config.receivers` settings with all the required email setting values. + +
+ +```yaml +charts: + kube-prometheus-stack: + alertmanager: + config: + receivers: + - name: email-alert + email_configs: + - to: @.com + send_resolved: true + from: @.com + smarthost: smtp..com:587 + auth_username: @.com + auth_identity: @.com + auth_password: +``` + +Refer to the [Prometheus Alertmanager Configuration](https://prometheus.io/docs/alerting/latest/configuration/) documentation to learn more about Alertmanager. + +
+ +#### Grafana Ingress + +You can enable an ingress endpoint for Grafana that will deploy an NGINX ingress controller. This feature can be used to enable HTTPS and require authentication for all Prometheus API requests. + +If you do not enable the ingress option, then by default a service with a load balancer will be created that exposes port 80. + + +Toggle the **Enable** button to enable the use of Ingress. + +
+ +### Dependencies + +The Prometheus Operator pack installs the following dependencies: + +* [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) +* [Prometheus](https://prometheus.io) +* [Prometheus Alertmanager](https://www.prometheus.io/docs/alerting/latest/alertmanager). +* [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) +* [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) +* [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) +* and the service monitors to scrape internal Kubernetes components. + +
+ + + +All versions less than v30.2.X are considered deprecated. Upgrade to a newer version to take advantage of new features. + + + +
+ +## Terraform + +You can retrieve details about the Prometheus operator pack by using the following Terraform code. + +
+ +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "pack-info" { + name = "prometheus-opeartor" + version = "45.4.0" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +## References + +- [Deploy Monitoring Stack](../clusters/cluster-management/monitoring/deploy-monitor-stack.md) + + +- [Prometheus Operator GitHub](https://github.com/coreos/prometheus-operator) + + +- [Prometheus Remote Write Tuning](https://prometheus.io/docs/practices/remote_write) + + +- [Thanos & Prometheus](https://prometheus-operator.dev/docs/operator/thanos) + + +- [Prometheus FAQ](https://prometheus.io/docs/introduction/faq) + + +- [Prometheus Cluster Metrics](prometheus-cluster-metrics.md) diff --git a/docs/docs-content/integrations/rke2.md b/docs/docs-content/integrations/rke2.md new file mode 100644 index 0000000000..1aaa0b70d6 --- /dev/null +++ b/docs/docs-content/integrations/rke2.md @@ -0,0 +1,201 @@ +--- +sidebar_label: 'RKE2' +title: 'RKE2' +description: 'RKE2 pack in Palette' +hide_table_of_contents: true +type: "integration" +category: ['kubernetes', 'amd64', 'fips'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/kubernetes-rke2/blobs/sha256:47cde61005d9996f1571c132ba9f753982134a7a0d8e445e27001ab8519e6051?type=image/png' +--- + +[RKE2](https://docs.rke2.io/) is a fully conformant Kubernetes distribution focusing on security and compliance within the U.S. Federal Government sector. To meet the Kubernetes security and compliance goals required by the U.S. Federal Government, RKE2 establishes the following: + +
+ +1. Provides defaults and configuration options that allow clusters to pass the CIS Kubernetes Benchmark v1.6 with minimal operator intervention. + + +2. Enables Federal Information Processing Standard 140-2 (FIPS 140-2) compliance. + + +3. Scans components regularly for Common Vulnerabilities and Exposures (CVEs) using Trivy in the build pipeline. + + +RKE2 launches control plane components as static pods, managed by the kubelet instead of relying on Docker. Additionally, the embedded container runtime is containerd. + +You can deploy RKE2 by adding this pack to a cluster profile. Once the cluster profile is created, you can deploy the RKE2-based Kubernetes clusters through Palette. + + +
+ +:::caution + +RKE2 is only available for Edge host deployments. Refer to the [Edge](../clusters/edge/edge.md) documentation to learn more about Edge. + +::: + +## Versions Supported + +The following RKE2 versions are supported to work with Palette. + +
+ + + + + +## Prerequisites + +- A Linux operating system. Refer to the official [RKE2 requirements](https://docs.rke2.io/install/requirements) for more details on supported Linux distributions and versions. + +- 8 GB Memory + +- 4 CPU + +- An Edge host. Refer to the [Edge](../clusters/edge/edge.md) documentation to learn more about Edge. + + +## Usage + +You can add RKE2 to an Edge cluster profile as the Kubernetes layer. Refer to the [Create Cluster Profiles](../cluster-profiles/task-define-profile.md) guide to learn more. + +RKE2 offers several customization options, ranging from networking to security. We recommend you review the following RKE2 documentation: + +
+ + +- [Configuration Options](https://docs.rke2.io/install/configuration) + + +- [Inbound Network Rules](https://docs.rke2.io/install/requirements#inbound-network-rules) + + +- [Registries Configuration](https://docs.rke2.io/install/containerd_registry_configuration) + + +- [Advanced Options](https://docs.rke2.io/advanced) + + +Many of the Day-2 cluster management responsibilities are handled by Palette. Review the [Cluster Management](../clusters/cluster-management/cluster-management.md) reference resource to learn more about Palette and Day-2 operations. + +
+ + + +## Prerequisites + +- A Linux operating system. Refer to the official [RKE2 requirements](https://docs.rke2.io/install/requirements) for more details on supported Linux distributions and versions. + +- 8 GB Memory + +- 4 CPU + +- An Edge host. Refer to the [Edge](../clusters/edge/edge.md) documentation to learn more about Edge. + +## Usage + +You can add RKE2 to an Edge cluster profile as the Kubernetes layer. To learn more, refer to the [Create Cluster Profiles](../cluster-profiles/task-define-profile.md) guide. + +RKE2 offers several customization options, ranging from networking to security. We recommend you review the following RKE2 documentation: + +
+ + +- [Configuration Options](https://docs.rke2.io/install/configuration) + + +- [Inbound Network Rules](https://docs.rke2.io/install/requirements#inbound-network-rules) + + +- [Registries Configuration](https://docs.rke2.io/install/containerd_registry_configuration) + + +- [Advanced Options](https://docs.rke2.io/advanced) + + +Many of the Day-2 cluster management responsibilities are handled by Palette. Review the [Cluster Management](../clusters/cluster-management/cluster-management.md) reference resource to learn more about Palette and Day-2 operations. + +
+ + + +## Prerequisites + +- A Linux operating system. Refer to the official [RKE2 requirements](https://docs.rke2.io/install/requirements) for more details on supported Linux distributions and versions. + +- 8 GB Memory + +- 4 CPU + +- An Edge host. Refer to the [Edge](../clusters/edge/edge.md) documentation to learn more about Edge. + + +## Usage + +You can add RKE2 to an Edge cluster profile as the Kubernetes layer. To learn more, refer to the [Create Cluster Profiles](../cluster-profiles/task-define-profile.md) guide. + +RKE2 offers several customization options, ranging from networking to security. We recommend you review the following RKE2 documentation: + +
+ + +- [Configuration Options](https://docs.rke2.io/install/configuration) + + +- [Inbound Network Rules](https://docs.rke2.io/install/requirements#inbound-network-rules) + + +- [Registries Configuration](https://docs.rke2.io/install/containerd_registry_configuration) + + +- [Advanced Options](https://docs.rke2.io/advanced) + + +Many of the Day-2 cluster management responsibilities are handled by Palette. Review the [Cluster Management](../clusters/cluster-management/cluster-management.md) reference resource to learn more about Palette and Day-2 operations. + +
+ + + + +The following major versions of RKE2 are deprecated. + + +
+ + +- 1.23.x + + +- 1.22.x + + + + +
+
+ + + +## Terraform + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "k8s" { + name = "edge-rke2" + version = "1.25.2" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +## Resources + +- [RKE2 Documentation](https://docs.rke2.io) + + +- [RKE2 GitHub Repository](https://github.com/rancher/rke2) \ No newline at end of file diff --git a/docs/docs-content/integrations/rook-ceph.md b/docs/docs-content/integrations/rook-ceph.md new file mode 100644 index 0000000000..b186d90d64 --- /dev/null +++ b/docs/docs-content/integrations/rook-ceph.md @@ -0,0 +1,59 @@ +--- +sidebar_label: 'rook-ceph' +title: 'Rook Ceph' +description: 'Rook Ceph storage pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['storage','amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: ' https://registry.dev.spectrocloud.com/v1/csi-rook-ceph/blobs/sha256:2817270f4eecbc2eea0740c55c7611d1a538a3e17da610a3487bb11b067076d1?type=image/png' +tags: ['packs', 'rook-ceph', 'storage'] +--- + +Rook turns storage software into self-managing, self-scaling, and self-healing storage services. It automates deployment, bootstrapping, configuration, provisioning, scaling, upgrading, migration, disaster recovery, monitoring, and resource management. Rook uses the facilities provided by the underlying cloud-native container management, scheduling, and orchestration platform to perform its duties. + +The pack provides the following two configurations: +* A three-node Ceph cluster (recommended). +* A single node Ceph cluster. + +Please make sure that your worker node pool size satisfies the minimum nodes requirement for your Ceph cluster. Additional disks should be attached to your worker pool nodes to deploy a Ceph cluster. For example, suppose you are using existing appliances for your Kubernetes cluster (typical for edge clusters); you will need to ensure that additional disks (1 or 3 - based on your Ceph cluster settings) are attached to the appliance. The device filter needs to be configured in the pack settings for such cases. As an example, if the additional disks were sdd, sde, sdf, the following configuration would be required: + +Example YAML + +```yaml + storage: + useAllNodes: true + useAllDevices: false + deviceFilter: ^sd[d-f] + config: + osdsPerDevice: "1" # this value can be overridden at the node or device level +``` +## Versions Supported + + + + + +**1.9.2** + + + + + +**1.8.3** + + + + + + +**1.5.9** + + + + + + +## References + +- [Rook Cepth Documentation](https://rook.io/docs/rook/v1.10/Getting-Started/intro/) diff --git a/docs/docs-content/integrations/spectro-k8s-dashboard.md b/docs/docs-content/integrations/spectro-k8s-dashboard.md new file mode 100644 index 0000000000..48f80ebc2a --- /dev/null +++ b/docs/docs-content/integrations/spectro-k8s-dashboard.md @@ -0,0 +1,104 @@ +--- +sidebar_label: "Spectro Kubernetes Dashboard" +title: "Spectro Kubernetes Dashboard" +description: "Palette's pre-configured Kubernetes Dashboard Monitoring pack reduces the complexity of standing up the Kubernetes dashboard for a cluster." +hide_table_of_contents: true +type: "integration" +category: ["monitoring", 'arm64', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/k8s-dashboard/blobs/sha256:2de5d88b2573af42d4cc269dff75744c4174ce47cbbeed5445e51a2edd8b7429?type=image/png' +tags: ['packs', 'spectro-k8s-dashboard', 'monitoring'] +--- + + +Spectro Kubernetes Dashboard is a web-based UI for Kubernetes clusters that auto-enables the Kubernetes Dashboard using secure ports and conveniently includes the [Spectro Proxy](frp.md) pack. + + +## Versions Supported + +**2.7.x** + +
+ +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the [Spectro Proxy](frp.md) reverse proxy. + + +- Users or groups must be mapped to a Kubernetes RBAC role, either a *Role* or a *ClusterRole*. You can create a custom role through a manifest and use Palette's roleBinding feature to associate users or groups with the role. Refer to the [Create a Role Binding](../clusters/cluster-management/cluster-rbac.md#create-role-bindings) guide to learn more. + + +## Parameters + + + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `namespace` | The Kubernetes namespace to install the dashboard. | `kubernetes-dashboard` | +| `ClusterRole` | The ClusterRole to assign to the Spectro Kubernetes Dashboard. | `read-only` | +| `certDuration` | Self-signed certificate duration in hours. | 8760h (365d) | +| `certRenewal` | Self-signed certificate renewal in hours | 720h (30d) | +| `enableInsecureLogin` | RBAC ensures secure login. | `false` | +| `serviceType` | The service type for the dashboard. Supported values are `ClusterIP`, `LoadBalancer`, and `NodePort`. | `ClusterIP` | +| `skipLogin` | Enables or disables the skip login option on the Spectro Kubernetes Dashboard. | `false` | +| `enableInsecureLogin` | Enables non-Secure Sockets Layer (SSL) login. Dashboard login is always restricted to HTTP(S) + localhost or HTTPS and external domain. | `false` | +| `ingress.enabled` | Ingress configuration to access the `ClusterIP`, `LoadBalancer`, or `NodePort`. | `false` | + +## Usage + +To use the Spectro Kubernetes Dashboard pack, you have to add it to your cluster profile. Spectro Kubernetes Dashboard supports several public cloud and data center cloud environments. To learn more, refer to [Clusters](/clusters). + +Use the following information to find the Kubernetes Dashboard pack. +- **Pack Type**: Monitoring +- **Registry**: Public Repo +- **Pack Name**: Spectro Kubernetes Dashboard +- **Pack Version**: 2.7.0 or higher + +Spectro Kubernetes Dashboard has the following Access options. + +
+ +- **Proxied**: This option is useful for access to the Kubernetes Dashboard from anywhere and leverages the Spectro Proxy pack, which gets installed automatically. The Service resource for the Kubernetes Dashboard will be configured as ClusterIP and is only accessible through the proxy. To learn more, check out the [Spectro Proxy](frp.md) guide. + + +- **Direct**: This option is meant for a private configuration where a proxy is not implemented or not desired. The Service resource for the Kubernetes Dashboard will be configured as LoadBalancer, which you can access directly. This option requires you to be on a network that can reach the IP address given to the LoadBalancer service. + + +## Terraform + +You can reference the Spectro Proxy pack in Terraform with a data resource. + +
+ +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "k8s-dashboard" { + name = "spectro-k8s-dashboard" + version = "2.7.0" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + +## References + +- [Microsoft Access Control Using Kubernetes RBAC](https://learn.microsoft.com/en-us/azure/aks/azure-ad-rbac?toc=https%3A%2F%2Fdocs.micro[…]icrosoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json&tabs=portal) + + +- [Terraform Data Resource](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) + + diff --git a/docs/docs-content/integrations/splunk.md b/docs/docs-content/integrations/splunk.md new file mode 100644 index 0000000000..2b727a0075 --- /dev/null +++ b/docs/docs-content/integrations/splunk.md @@ -0,0 +1,29 @@ +--- +sidebar_label: 'Splunk' +title: 'Splunk' +description: 'Splunk Monitoring pack in Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['logging', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/splunk/blobs/sha256:1729cfced51a1ef8693997aee535f098a782f15fba9ca5232a8dfba68a8d4857?type=image/png' +tags: ['packs', 'splunk', 'logging'] +--- + + +Splunk Connect for Kubernetes imports and searches the Kubernetes loggings, objects, and metrics data in Splunk platform deployment. It supports the importing and searching of container logs. + +## Versions Supported + + + + + +**1.4.3** + + + + +## References + +- [Splunk Connect for Kubernetes GitHub](https://github.com/splunk/splunk-connect-for-kubernetes) diff --git a/docs/docs-content/integrations/ubuntu.md b/docs/docs-content/integrations/ubuntu.md new file mode 100644 index 0000000000..9e83538540 --- /dev/null +++ b/docs/docs-content/integrations/ubuntu.md @@ -0,0 +1,878 @@ +--- +sidebar_label: 'Ubuntu' +title: 'Ubuntu' +description: 'Choose Ubuntu Operating System pack in Palette.' +hide_table_of_contents: true +type: "integration" +category: ['operating system', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/ubuntu-vsphere/blobs/sha256:09a727f9005b79c69d8e60e12ce130880c63131315b49e7fb4cc44e53d34dc7a?type=image/png' +tags: ['packs', 'ubuntu', 'operating system'] +--- + + +[Ubuntu](https://ubuntu.com) is an open-source operating system based on the Linux kernel. Developed by Canonical Ltd., Ubuntu is a popular choice for desktops, servers, and cloud environments due to its ease of use, robustness, and versatility. + +Boasting a comprehensive package system, Ubuntu provides a wealth of pre-compiled software directly accessible for installation. With its regular release cycle, Ubuntu ensures updated software and security patches, making it a secure and reliable choice for various use cases. + +In addition to its stability, Ubuntu's community support, extensive documentation, and commitment to free software principles make it a widely favored choice among Linux distributions. + +You can use Ubuntu as the base Operating System (OS) when deploying a host cluster by using the Ubuntu pack when you create a [cluster profile](../cluster-profiles/cluster-profiles.md). + + +
+ +:::info + +Review [Maintenance Policy](maintenance-policy.md#os-packs) to learn about pack update and deprecation schedules. + +::: + + +## Version Supported + +
+ + + + + + +### Prerequisites + +- A minimum of 4 CPU and 4GB Memory + + + +- You can use Ubuntu with a supported Kubernetes version. The table lists Kubernetes dependencies. + + + |Kubernetes Version | Supports Kubernetes | +|------------|----------------------------| +1.26 | ✅ | +1.25 | ✅ | +1.24 | ❌ | + + +### Parameters + +The Ubuntu OS pack has no required parameters. + +You can customize the Ubuntu OS pack. The following configuration blocks are available for use within the `kubeadmconfig` configuration block in the YAML file. + +
+ +:::info + +Palette applies a default set of configuration options when deploying Ubuntu. You can override the defaults configurations by using the following parameters but it's not required. + +::: + + +| Field | Description | YAML Type | Required | +| --- | --- | --- | --- | +| `apiServer` | Extra settings for the Kube API server control plane component. Refer to [Kube API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) documentation for available options. | object | No | +| `controllerManager` | Extra settings for the Kubernetes controller manager control plane component. Review the [Kubernetes controller manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) documentation for more information. | object | No | +| `scheduler` | Extra settings for the Kubernetes scheduler control plane component. Refer to the [Kube scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler) documenation for more details. | object | No | +| `kubeletExtraArgs` | Extra arguments for kubelet. Refer to the [Kubeadm init](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init) documentation for more details. | map | No | +| `files` | Create or specify additional files for the `kubeadmconfig`. Refer to the [Customize Pack](/integrations/ubuntu?22.04.x#customizepack) section to learn more. | list | No | +| `preKubeadmCommands` | Extra commands to issue before kubeadm starts. | list | No | +| `postKubeadmCommands` | Extra commands to issue after kubeadm starts. | list | No | +| `imageRepository` | The container registry to pull images from. If empty, `k8s.gcr.io` will be used by default. | string | No | +| `etcd` | Configuration for etcd. This value defaults to a Local (stacked) etcd. You can specify configurations using [local etcd configuration files](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/), or you can reference [external etcd configurations](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability) or Certificate Authorities (CA). | object | No | +| `dns` | Options for the DNS add-on installed in the cluster. Refer to the [Customizing DNS Service](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/) to learn more. | object | No | + +The following code snippet is an example YAML using all the `kubeadmconfig` parameters listed in the table. The example YAML is only for learning purposes. + + +
+ +```yaml +kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + insecure-port: "0" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + dns: + type: CoreDNS + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.7.0-eks-1-18-1 + etcd: + local: + dataDir: /var/lib/etcd + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-18-1 + external: + endpoints: + - example.org + caFile: myCa.file + certFile: myCert.file + keyFile: myKey.file + imageRepository: public.ecr.aws/eks-distro/kubernetes + kubeletExtraArgs: + read-only-port : "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/privileged-psp.yaml + targetPath: /etc/kubernetes/hardening/privileged-psp.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + + preKubeadmCommands: + # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + postKubeadmCommands: + # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up + - 'export KUBECONFIG=/etc/kubernetes/admin.conf' + # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails + - '[ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"' +``` + +
+ +:::caution + +Review the parameter documentation before you make changes to the kubeadm configuration. Improper configurations can cause deployment failures. + + +::: + +Palette also supports Ubuntu Pro. Refer to the [Ubuntu Pro](ubuntu.md?22.04.x#ubuntupro) section below for more details. + +
+ +### Usage + + +To use the Ubuntu OS pack, add the pack to your cluster profile when you select the OS layer. Refer to the [Create Cluster Profile](../cluster-profiles/task-define-profile.md) guide to learn more about creating cluster profiles. + + + +
+ + +#### Customize Pack + + +You can customize the Ubuntu OS pack using the available configuration parameters in the YAML file. Use the parameters to customize the Kubernetes install process. + + +
+ + +##### Add Custom Files + + +You can create custom files that you define in the `files` section that precedes the `preKubeadmCommands` and `postKubeadmCommands` sections. The files are invoked during runtime. + + +
+ +```yaml +kubeadmconfig: + files: + - targetPath: /usr/local/share/ca-certificates/mycom.crt + targetOwner: "root:root" + targetPermissions: "0644" + content: | + -----BEGIN CERTIFICATE----- + MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl + cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE + AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA + nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz + qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN + fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2 + 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL + 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK + jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB + /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki + HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y + g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ + ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6 + b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56 + IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc= + -----END CERTIFICATE----- + preKubeadmCommands: + - echo "Executing pre kube admin config commands" + - update-ca-certificates + - 'systemctl restart containerd; sleep 3' + - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done' + postKubeadmCommands: + - echo "Executing post kube admin config commands" +``` + +In the next example, a configuration file is added to a folder. + +
+ +```yaml +kubeadmconfig: + files: + - targetPath: /etc/containerd/config.toml + targetOwner: "root:root" + targetPermissions: "0644" + content: | + version = 2 + imports = ["/etc/containerd/conf.d/*.toml"] + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.9" + device_ownership_from_security_context = true + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + preKubeadmCommands: + - 'echo "====> Applying pre Kubeadm commands"' + postKubeadmCommands: + - 'echo "====> Applying post Kubeadm commands"' +``` + + +#### Ubuntu Pro + +Ubuntu Pro is a security and maintenance subscription offering from Canonical that offers long-term security support and many other security hardening features. Ubuntu Pro offers several more benefits than the free Ubuntu offering: + + +
+ + +- Extended Security Maintenance + + +- Kernel Livepatch service to avoid reboots + + +- FIPS 140-2 Level 1 certified crypto modules + + +- Common Criteria EAL2 + +For more information, refer to the [Ubuntu Pro](https://ubuntu.com/pro) documentation from Canonical. + + + +You can enable Ubuntu Pro when deploying clusters with Palette. To enable Ubuntu Pro, select Ubuntu as the OS layer for a cluster profile and expand the **Preset Menu** to reveal the Ubuntu Pro parameters. + + +| Parameter| Description | Default Value | +|---|---|----| +|**token**| The Canonical subscription token for Ubuntu Pro. Refer to the Ubuntu Pro [subscribe page](https://ubuntu.com/pro/subscribe) to aquire a subscription token. | `""` | +|**esm-apps**| Expanded Security Maintenance (ESM) for Applications. Refer to the Ubuntu [ESM documentation](https://ubuntu.com/security/esm) to learn more. | Disabled | +| **livepatch** | Canonical Livepatch service. Refer to the Ubuntu [Livepatch](https://ubuntu.com/security/livepatch) documenation for more details. | Disabled | +| **fips** | Federal Information Processing Standards (FIPS) 140 validated cryptography for Linux workloads on Ubuntu. This installs NIST-certified core packages. Refer to the Ubuntu [FIPS](https://ubuntu.com/security/certifications/docs/2204) documentation to learn more. | Disabled | +| **fips-updates** | Install NIST-certified core packages with priority security updates. Refer to the Ubuntu [FIPS Updates](https://ubuntu.com/security/certifications/docs/fips-updates) documentation to learn more. | Disabled | +| **cis** | Gain access to OpenSCAP-based tooling that automates both hardening and auditing with certified content based on published CIS benchmarks. Refer to the Ubuntu [CIS](https://ubuntu.com/security/certifications/docs/2204/usg/cis) documentation to learn more. | Disabled | + + + +Use the following steps to enable Ubuntu Pro. + +
+ + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. Navigate to the left **Main Menu** and select **Profiles**. + + +3. Click on **Add Cluster Profile**. + + +4. Fill out the input fields for **Name**, **Version**, **Description**, **Type** and **Tags**. Click on **Next** to continue. + + + +5. Select the infrastructure provider and click on **Next**. + + +6. Select the OS layer and use the following information to find the Ubuntu pack: + + + - **Pack Type** - OS + + - **Registry** - Public Repo + + - **Pack Name** -Ubuntu + + - **Pack Version** - 20.04 or 22.04 + + +7. Modify the Ubuntu **Pack values** to activate the **Presets** options for the Ubuntu YAML file. Click on the **** button to reveal the YAML editor and expand the **Preset Drawer**. + + +
+ + ![A view of the cluster profile creation wizard for Ubuntu Pro](/integrations_ubuntu_ubuntu-pro-preset-drawer.png) + + +8. Click the **Ubuntu Advantage/Pro** checkbox to include the Ubuntu Pro parameters in the pack configuration file. + + +9. Toggle options on or off to enable or disable the various Ubuntu Pro services. + + + +10. Click the **Next layer** button to continue to the next layer. + + +11. Complete the remainder of the cluster profile creation wizard by selecting the next cluster profile layers. + + +
+ + + + +
+ + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory + + + +- You can use Ubuntu with a supported Kubernetes version. The table lists Kubernetes dependencies. + + + |Kubernetes Version | Supports Kubernetes | +|------------|----------------------------| +1.26 | ❌ | +1.25 | ❌ | +1.24 | ✅ | +1.23 | ✅ | + +
+ + +## Parameters + +The Ubuntu OS pack has no required parameters. + + + +You can customize the Ubuntu OS pack. The following configuration blocks are available for use within the `kubeadmconfig` configuration block in the YAML file. + + +
+ + +:::info + +Palette applies a default set of configuration options when deploying Ubuntu. You can override the defaults configurations by using the following parameters but it's not required. + +::: + + +| Field | Description | YAML Type | Required | +| --- | --- | --- | --- | +| `apiServer` | Extra settings for the Kube API server control plane component. Refer to [Kube API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) documentation for available options. | object | No | +| `controllerManager` | Extra settings for the Kubernetes controller manager control plane component. Review the [Kubernetes controller manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) documentation for more information. | object | No | +| `scheduler` | Extra settings for the Kubernetes scheduler control plane component. Refer to the [Kube scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler) documenation for more details. | object | No | +| `kubeletExtraArgs` | Extra arguments for kubelet. Refer to the [Kubeadm init](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init) documentation for more details. | map | No | +| `files` | Additional files to pass to kubeadmconfig. Refer to the [Customize Pack](/integrations/ubuntu?22.04.x#customizepack) section to learn more. | list | No | +| `preKubeadmCommands` | Extra commands to issue before kubeadm starts. | list | Yes - Auto generated | +| `postKubeadmCommands` | Extra commands to issue after kubeadm starts. | list | Yes - Auto generated | +| `imageRepository` | The container registry to pull images from. If empty, `k8s.gcr.io` will be used by default. | string | No | +| `etcd` | Configuration for etcd. This value defaults to a Local (stacked) etcd. You can specify configurations using [local etcd configuration files](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/), or you can reference [external etcd configurations](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability) or Certificate Authorities (CA). | object | No | +| `dns` | Options for the DNS add-on installed in the cluster. Refer to the [Customizing DNS Service](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/) to learn more. | object | No | + +The following code snippet is an example YAML using all the `kubeadmconfig` parameters listed in the table. The example YAML is only for learning purposes. + + +
+ +```yaml +kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + insecure-port: "0" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurityPolicy" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + dns: + type: CoreDNS + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.7.0-eks-1-18-1 + etcd: + local: + dataDir: /var/lib/etcd + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-18-1 + external: + endpoints: + - example.org + caFile: myCa.file + certFile: myCert.file + keyFile: myKey.file + imageRepository: public.ecr.aws/eks-distro/kubernetes + kubeletExtraArgs: + read-only-port : "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/privileged-psp.yaml + targetPath: /etc/kubernetes/hardening/privileged-psp.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + + preKubeadmCommands: + # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + postKubeadmCommands: + # Apply the privileged PodSecurityPolicy on the first master node ; Otherwise, CNI (and other) pods won't come up + - 'export KUBECONFIG=/etc/kubernetes/admin.conf' + # Sometimes api server takes a little longer to respond. Retry if applying the pod-security-policy manifest fails + - '[ -f "$KUBECONFIG" ] && { echo " ====> Applying PodSecurityPolicy" ; until $(kubectl apply -f /etc/kubernetes/hardening/privileged-psp.yaml > /dev/null ); do echo "Failed to apply PodSecurityPolicies, will retry in 5s" ; sleep 5 ; done ; } || echo "Skipping PodSecurityPolicy for worker nodes"' +``` + +
+ +:::caution + +Review the parameter documentation before you make changes to the kubeadm configuration. Improper configurations can cause deployment failures. + + +::: + +Palette also supports Ubuntu Pro. Refer to the [Ubuntu Pro](ubuntu.md?22.04.x#ubuntupro) section below for more details. + +
+ +## Usage + + +To use the Ubuntu OS pack, add the pack to your cluster profile when you select the OS layer. Refer to the [Create Cluster Profile](../cluster-profiles/task-define-profile.md) guide to learn more about creating cluster profiles. + + + +
+ + +### Customize Pack + + +You can customize the Ubuntu OS pack using the available configuration parameters in the YAML file. Use the parameters to customize the Kubernetes install process. + + +
+ + +#### Add Custom Files + + +You can create custom files that you define in the `files` section that precedes the `preKubeadmCommands` and `postKubeadmCommands` sections. The files are invoked during runtime. + + +
+ +```yaml +kubeadmconfig: + files: + - targetPath: /usr/local/share/ca-certificates/mycom.crt + targetOwner: "root:root" + targetPermissions: "0644" + content: | + -----BEGIN CERTIFICATE----- + MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl + cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE + AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA + nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz + qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN + fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2 + 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL + 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK + jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB + /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki + HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y + g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ + ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6 + b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56 + IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc= + -----END CERTIFICATE----- + preKubeadmCommands: + - echo "Executing pre kube admin config commands" + - update-ca-certificates + - 'systemctl restart containerd; sleep 3' + - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done' + postKubeadmCommands: + - echo "Executing post kube admin config commands" +``` + +In the next example, a configuration file is added to a folder. + +
+ +```yaml +kubeadmconfig: + files: + - targetPath: /etc/containerd/config.toml + targetOwner: "root:root" + targetPermissions: "0644" + content: | + ## template: jinja + + # Use config version 2 to enable new configuration fields. + # Config file is parsed as version 1 by default. + version = 2 + + imports = ["/etc/containerd/conf.d/*.toml"] + + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.9" + device_ownership_from_security_context = true + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + preKubeadmCommands: + - 'echo "====> Applying pre Kubeadm commands"' + postKubeadmCommands: + - 'echo "====> Applying post Kubeadm commands"' +``` + + +### Ubuntu Pro + +Ubuntu Pro is a security and maintenance subscription offering from Canonical that offers long-term security support and many other security hardening features. Ubuntu Pro offers several more benefits than the free Ubuntu offering: + + +
+ + +- Extended Security Maintenance + + +- Kernel Livepatch service to avoid reboots + + +- FIPS 140-2 Level 1 certified crypto modules + + +- Common Criteria EAL2 + +For more information, refer to the [Ubuntu Pro](https://ubuntu.com/pro) documentation from Canonical. + + + +You can enable Ubuntu Pro when deploying clusters with Palette. To enable Ubuntu Pro, select Ubuntu as the OS for a cluster profile and expand the **Preset Menu** to reveal the Ubuntu Pro parameters. + + +| Parameter| Description | Default Value | +|---|---|----| +|**token**| The Canonical subscription token for Ubuntu Pro. Refer to the Ubuntu Pro [subscribe page](https://ubuntu.com/pro/subscribe) to aquire a subscription token. | `""` | +|**esm-apps**| Expanded Security Maintenance (ESM) for Applications. Refer to the Ubuntu [ESM documentation](https://ubuntu.com/security/esm) to learn more. | Disabled | +| **livepatch** | Canonical Livepatch service. Refer to the Ubuntu [Livepatch](https://ubuntu.com/security/livepatch) documenation for more details. | Disabled | +| **fips** | Federal Information Processing Standards (FIPS) 140 validated cryptography for Linux workloads on Ubuntu. This installs NIST-certified core packages. Refer to the Ubuntu [FIPS](https://ubuntu.com/security/certifications/docs/2204) documentation to learn more. | Disabled | +| **fips-updates** | Install NIST-certified core packages with priority security updates. Refer to the Ubuntu [FIPS Updates](https://ubuntu.com/security/certifications/docs/fips-updates) documentation to learn more. | Disabled | +| **cis** | Gain access to OpenSCAP-based tooling that automates both hardening and auditing with certified content based on published CIS benchmarks. Refer to the Ubuntu [CIS](https://ubuntu.com/security/certifications/docs/2204/usg/cis) documentation to learn more. | Disabled | + + + +Use the following steps to enable Ubuntu Pro. + +
+ + +1. Log in to [Palette](https://console.spectrocloud.com). + + + +2. Navigate to the left **Main Menu** and select **Profiles**. + + +3. Click on **Add Cluster Profile**. + + +4. Fill out the input fields for **Name**, **Version**, **Description**, **Type** and **Tags**. Click on **Next** to continue. + + + +5. Select the infrastructure provider and click on **Next**. + + +6. Select the OS layer and use the following information to find the Ubuntu pack: + + - **Pack Type** - OS + + - **Registry** - Public Repo + + - **Pack Name** -Ubuntu + + - **Pack Version** - 20.04 or 22.04 + + +7. Modify the Ubuntu **Pack values** to activate the **Presets** options for the Ubuntu YAML file. Click on the **** button to reveal the YAML editor and expand the **Preset Drawer**. + + +
+ + ![A view of the cluster profile creation wizard for Ubuntu Pro](/integrations_ubuntu_ubuntu-pro-preset-drawer.png) + + +8. Click the **Ubuntu Advantage/Pro** checkbox to include the Ubuntu Pro parameters in the pack configuration file. + + +9. Toggle options on or off to enable or disable the various Ubuntu Pro services. + + + +10. Click the **Next layer** button to continue to the next layer. + + +11. Complete the remainder of the cluster profile creation wizard by selecting the next cluster profile layers. + + +
+ + +
+ + + +All Ubuntu versions less than v20.04.x are considered deprecated. Upgrade to a newer version to take advantage of new features. + + +
+ + +## Terraform + +You can reference Ubuntu in Terraform with the following code snippet. + +
+ + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "ubuntu" { + name = "edge-native-ubuntu" + version = "22.04" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "ubuntu" { + name = "ubuntu-maas" + version = "22.04" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "ubuntu" { + name = "ubuntu-vsphere" + version = "22.04" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "ubuntu" { + name = "ubuntu-openstack" + version = "22.04" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "ubuntu" { + name = "ubuntu-coxedge" + version = "22.04" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "ubuntu" { + name = "ubuntu-aws" + version = "22.04" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "ubuntu" { + name = "ubuntu-azure" + version = "22.04" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + +```hcl +data "spectrocloud_registry" "public_registry" { + name = "Public Repo" +} + +data "spectrocloud_pack_simple" "ubuntu" { + name = "ubuntu-gcp" + version = "22.04" + type = "helm" + registry_uid = data.spectrocloud_registry.public_registry.id +} +``` + + + + + + + + +## References + + +- [Create a Cluster Profile](../cluster-profiles/task-define-profile.md) + + +- [Ubuntu Documentation](https://docs.ubuntu.com) + + +- [Ubuntu Pro Documentation](https://ubuntu.com/server/docs) + + +- [Kubernetes API Server Configuration](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver) diff --git a/docs/docs-content/integrations/vault.md b/docs/docs-content/integrations/vault.md new file mode 100644 index 0000000000..f6d0dc0040 --- /dev/null +++ b/docs/docs-content/integrations/vault.md @@ -0,0 +1,115 @@ +--- +sidebar_label: 'Vault' +title: 'Vault' +description: 'Integration of the Vault add on into Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['security', 'amd64', 'arm64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.spectrocloud.com/v1/vault/blobs/sha256:1abda0173be1fd4ddfeccd2ff15089edd38a25e433ad7bb562a770d92992c7af?type=image/png' +tags: ['packs', 'vault', 'security'] +--- + +[Vault](https://www.vaultproject.io/) helps secure, store, and tightly control access to tokens, passwords, certificates, encryption keys for protecting secrets, and other sensitive data using a UI, CLI, or HTTP API. + +## Versions Supported + + + + + +* **0.22.0** + + + + + +* **0.20.1** + + + + +* **0.11.0** + + + + + + * **0.9.0** + + + + + + +* **0.6.0** + + + + + +* **0.3.1** + + + + + +## Components + +Vault integration has the following components: + +* Vault server. +* UI (Optional). +* [Agent injector](https://www.vaultproject.io/docs/platform/k8s/injector/) (Optional). + + + +## Supported Use cases + +1. Running a Vault Service: + * Vault is set up to run in **Dev mode** by default and so, Vault will be unsealed and initialized. + * For production use cases, we recommend disabling Dev mode and enable HA. + * Also, see [Production Checklist](https://www.vaultproject.io/docs/platform/k8s/helm/run#architecture) recommendations. +1. Injecting application secrets from an external Vault into pods (**Agent Injector**). + * For running agent injector alone in the cluster, use v0.6.0 of Vault pack. + * Make sure to set `injector.externalVaultAddr` to point to the external Vault server. + +## How secrets are injected in deployments? + +In Kubernetes clusters with Vault integrated, secrets can be injected into the application pods by adding the following annotations: + +```yaml +vault.hashicorp.com/agent-inject: "true" +vault.hashicorp.com/agent-inject-secret-: /path/to/secret +vault.hashicorp.com/role: "" +``` + +More information on consuming Vault secrets can be found in [Vault docs](https://www.vaultproject.io/docs/platform/k8s/injector) + +## Ingress + +Follow below steps to configure Ingress on Vault Server + +1. Make sure serviceType is not set for Vault Server. That way, serviceType will default to ClusterIP + * Version 0.6.0 - line #289 + * Version 0.3.1 - line #96 +2. Ingress + * Enable Ingress ; Change enabled from false to "true" + * Set Ingress rules like annotations, path, hosts etc. + * Version 0.6.0 - line #146 + * Version 0.3.1 - line #96 + +With these config changes, you can access Vault service on the Ingress Controller LoadBalancer hostname / IP + +## References + +- [Vault Agent injector](https://www.vaultproject.io/docs/platform/k8s/injector/) + + +- [Injecting Vault Secrets Into Kubernetes Pods via a Sidecar - Blog](https://www.hashicorp.com/blog/injecting-vault-secrets-into-kubernetes-pods-via-a-sidecar/) + + +- [Vault Agent Injector Examples](https://www.vaultproject.io/docs/platform/k8s/injector/examples/) + + +- [Vault on Kubernetes Guide](https://www.vaultproject.io/docs/platform/k8s/helm/run) diff --git a/docs/docs-content/integrations/vsphere-csi.md b/docs/docs-content/integrations/vsphere-csi.md new file mode 100644 index 0000000000..307a47251c --- /dev/null +++ b/docs/docs-content/integrations/vsphere-csi.md @@ -0,0 +1,74 @@ +--- +sidebar_label: 'vSphere-csi' +title: 'vSphere Storage Interface (CSI)' +description: 'vSphere CSI storage add on into Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['storage', 'amd64','fips'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-vsphere-volume/blobs/sha256:2cd106b353cb492d4647a1562fe59db6a1aeb792333900fe4e15237f899298b5?type=image/png' +tags: ['packs', 'vSphere-csi', 'storage'] +--- + +The main goal of vSphere Container Storage Interface (CSI) is to expose vSphere storage and features to Kubernetes users. It offers an in-tree volume driver that has been actively used on various Kubernetes platforms by service providers, including on-prem. Cloud Native Storage (CNS) is a result of evolution and productization of vSphere Storage for Kubernetes and is also enterprise ready. + + +## vSphere CSI Driver Components + +The vSphere CSI driver includes the following components: +* vSphere CSI Controller +* vSphere CSI Node +* Syncer + +* Metadata Syncer +* Full Sync + +## vSphere CSI Compatibility Matrix + +|Palette Release| CSI-Driver Version| Kubernetes Versions | +|---------------|-------------------|---------------------| +| 3.0 | 2.6.0 | 1.20.x to 1.24.x | +| 2.2 | 2.3.0 | 1.20.x to 1.23.x | +| 2.8 | 2.5.2 | 1.20.x to 1.23.x | + + +## Versions Supported + + + + + + + + + + + + + + + + + + + + +## Troubleshooting + +Storage classes created by Spectro Cloud are named "spectro-storage-class" and can be fetched from kubectl using the following CLI command: + +```bash +kubectl get storageclass +``` + + +## References + + +- [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes) + + +- [CSI Driver Documentation](https://vsphere-csi-driver.sigs.k8s.io/) + + +- [CSI Driver GitHub](https://github.com/kubernetes-sigs/vsphere-csi-driver) diff --git a/docs/docs-content/integrations/vsphere-volume.md b/docs/docs-content/integrations/vsphere-volume.md new file mode 100644 index 0000000000..da6489be8c --- /dev/null +++ b/docs/docs-content/integrations/vsphere-volume.md @@ -0,0 +1,50 @@ +--- +sidebar_label: 'vSphere-Volume' +title: 'vSphere Volume' +description: 'vSphere volume storage add on into Spectro Cloud' +hide_table_of_contents: true +type: "integration" +category: ['storage', 'amd64'] +sidebar_class_name: "hide-from-sidebar" +logoUrl: 'https://registry.dev.spectrocloud.com/v1/csi-vsphere-volume/blobs/sha256:2cd106b353cb492d4647a1562fe59db6a1aeb792333900fe4e15237f899298b5?type=image/png' +tags: ['packs', 'vSphere-Volume', 'storage'] +--- + + +vSphere volume virtualizes SAN/NAS arrays, enabling an efficient operational model optimized for virtualized environments and centered on the application instead of the infrastructure There are two types of provisioners for vSphere storage classes: + +* CSI provisioner: csi.vsphere.vmware.com +* vCP provisioner: kubernetes.io/vsphere-volume + +## Versions Supported + + + + +* **1.0.0** + + + + +## Parameters + +| Name | Supported Values | Default Value | Description | +| --- | --- | --- | --- | +| diskformat | thin, zeroedthick and eagerzeroedthick | zeroedthick | The storage account type to use | +| datastore | Datastore Name | | If specified, the volume will be created on the datastore specified in the storage class | +| isDefaultClass | true, false | true | Flag to denote if this StorageClass will be the default | + + + +## Troubleshooting + +Storage classes created by Spectro will be with the name "spectro-storage-class" and can be fetched from kubectl using the following CLI command: + +```bash +kubectl get storageclass +``` + +## References + +- [vSphere Storage Class](https://kubernetes.io/docs/concepts/storage/storage-classes/#vsphere) + diff --git a/docs/docs-content/introduction/_category_.json b/docs/docs-content/introduction/_category_.json new file mode 100644 index 0000000000..094470741d --- /dev/null +++ b/docs/docs-content/introduction/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 10 +} diff --git a/docs/docs-content/introduction/introduction.md b/docs/docs-content/introduction/introduction.md new file mode 100644 index 0000000000..fcb3b3eb41 --- /dev/null +++ b/docs/docs-content/introduction/introduction.md @@ -0,0 +1,78 @@ +--- +sidebar_label: "What is Palette?" +title: "What is Palette?" +description: "Learn what Spectro Cloud's Palette platform is, and how it reduce the complexities your encounter today with Kubernetes." +icon: "" +hide_table_of_contents: false +slug: / +--- + + +Palette is a complete and integrated platform that enables organizations to effectively manage the entire lifecycle of any combination of new or existing, simple or complex, small or large Kubernetes environments, whether in a data center or the cloud. + +With a unique approach to managing multiple clusters, Palette gives IT teams complete control, visibility, and production-scale efficiencies to provide developers with highly curated Kubernetes stacks and tools based on their specific needs, with granular governance and enterprise-grade security. + + Palette VerteX edition is also available to meet the stringent requirements of regulated industries such as government and public sector organizations. Palette VerteX integrates Spectro Cloud’s Federal Information Processing Standards (FIPS) 140-2 cryptographic modules. To learn more about FIPS-enabled Palette, check out [Palette VerteX](../vertex/vertex.md). + +![Palette product high level overview eager-load](/docs_introduction_product-overview.png) + +## What Makes Palette Different? + +
+ + +### Full-Stack Management + +Unlike rigid and prepackaged Kubernetes solutions, Palette allows users to construct flexible stacks from OS, Kubernetes, container network interfaces (CNI), and container storage interfaces (CSI) to additional add-on application services. As a result, the entire stack - not just the infrastructure - of Kubernetes is deployed, updated, and managed as one unit, without split responsibility from virtual machines, base OS, Kubernetes infra, and add-ons. + +### End-to-End Declarative Lifecycle Management + +Palette offers the most comprehensive profile-based management for Kubernetes. It enables teams to drive consistency, repeatability, and operational efficiency across multiple clusters in multiple environments with comprehensive day 0 - day 2 management. + +### Any Environment + +Palette has the richest coverage in supported environments that includes: +- Public Clouds: AWS, Azure, and Google Cloud (both IaaS and managed Kubernetes services EKS/AKS/GKE) +- Data Centers: VMware, OpenStack +- Bare Metal: Canonical MaaS +- Edge + + + +## What is Under the Hood? + +Palette uniquely extends and integrates the Cloud Native Computing Foundation (CNCF) open-source Cluster API project. Palette does this by providing comprehensive full-stack modeling and orchestration, governance, security, and day 0 - day 2 management capabilities. + +With Palette’s Cluster Profiles, teams can define full-stack clusters that include both the Kubernetes infrastructure and any add-on application services. Cluster Profiles enable a repeatable way to deploy and reuse clusters across any environment. Palette also enables importing of existing Kubernetes environments and creating equivalent Cluster Profiles. + + +![2-what-is-sc](/docs_introduction_palette-components.png) + +## Who Can Benefit From Palette? + + +
+ + +### Developers + +Development teams will get the flexibility and freedom they are looking for to increase the speed of innovation, whether it is the cluster template with the add-on application services or choosing a Kubernetes version with integrations like logging, monitoring, and service mesh for your application development. They need not worry about Kubernetes configurations but focus on the stuff that matters. + +### IT Operations and SREs + +Declarative management makes life easier for IT teams, with consistency, repeatability, and all the enterprise-grade controls and governance they need - especially when moving to production [Cluster Profiles](/glossary-all#clusterprofile) enable them to define and re-use full-stack clusters and support them across the entire lifecycle without having to write scripts, as well as integrate with existing tools and methodologies. + + +### IT Executives + +With an open and enterprise-grade platform, IT leaders can get peace of mind without being locked into proprietary orchestration technologies or one-size-fits-all solutions. This helps lower the total cost of ownership (TCO) and reduce operational risk. + + +## Next Steps +Learn more about Palette and how it can improve your Kubernetes experience and those in your organization. Try [Palette](https://console.spectrocloud.com/) for free today and experience a better way of working with Kubernetes. + +- [Try Palette for Free](../getting-started/palette-freemium.md) + +- [App Mode and Cluster Mode](palette-modes.md) + +- [Palette Architecture](../architecture/architecture-overview.md) diff --git a/docs/docs-content/introduction/palette-modes.md b/docs/docs-content/introduction/palette-modes.md new file mode 100644 index 0000000000..d86bb5ca92 --- /dev/null +++ b/docs/docs-content/introduction/palette-modes.md @@ -0,0 +1,91 @@ +--- +sidebar_label: "App Mode and Cluster Mode" +title: "App Mode and Cluster Mode" +description: "Learn about the two modes available in Palette and how they benefit your Kubernetes experience." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["app mode", "cluster mode"] +--- + +## Palette Modes +Palette supports two consumption modes - each aimed at different use cases and, potentially, different personas. The two modes are _App Mode_ and _Cluster Mode_. These modes can be used separately but often work together by sharing resources or relying on resources that each provides. + +![App Mode and Cluster Mode](/docs_introduction_palette-modes.png) + +### What is Cluster Mode? + +Cluster mode gives you the ability to provision Kubernetes clusters to various platforms and cloud providers through Palette. Palette manages and maintains the lifecycle of these Kubernetes clusters. We call a Kubernetes cluster that Palette manages and deploys a [_Host Cluster_](../glossary-all.md#host-cluster). + +Cluster mode is frequently leveraged by personas such as platform engineers, infrastructure engineers, system administrators, and others who are in a role that requires them to support infrastructure. These personas frequently leverage cluster mode to specify attributes that should make up the Kubernetes cluster, and where and how the cluster should be deployed. These operators leverage a concept we call [_Cluster Profiles_](../cluster-profiles/cluster-profiles.md). Other users such as developers, can also leverage cluster mode and cluster profiles to deploy a Kubernetes cluster for ad-hoc purposes, such as research efforts. + +When you operate in cluster mode, you have the ability to specify projects to control the scope of the Kubernetes cluster. The ability to specify projects is beneficial when segmenting resources for different teams. For example, a project titled “ml-modeling” could belong to a team focused on machine learning. In the project “modeling,” you could deploy various Kubernetes clusters for the machine learning team to conduct their work. These Kubernetes clusters could also be grouped together (Cluster Group) if grouping of similar resources is needed. + +Other teams could be prevented from accessing the resources that belong to the project “modeling” by not being a member of the project. Palette offers role-based access control (RBAC) that enables more fine-grained control of resources. Lastly, you can also view the utilization of resources from the project level, which is helpful when understanding utilization and reviewing costs. + +Another important feature of cluster mode is the ability to allow specific host clusters to support Palette Virtual Clusters. Virtual clusters are Kubernetes clusters that run as nested clusters within an existing host cluster. A virtual cluster looks and feels like a normal Kubernetes cluster, except that it resides inside a larger Kubernetes cluster or host cluster often deployed in a cloud provider or on-prem. You can control the resources a virtual cluster is allocated, such as CPU, memory, and storage. + +Virtual clusters are powerful and beneficial to teams due to their characteristics: +- Look and feel exactly like a host cluster +- Are up and running within minutes +- Removes the infrastructure overhead for downstream consumers +- Reduces the need to set up or manage complicated Kubernetes namespaces and roles. Teams can instead receive their own virtual cluster without worrying about permissions or affecting other teams’ resources. + +Virtual clusters help reduce development time by allowing downstream consumers to focus more on application development versus addressing infrastructure overhead. You can also [pause and resume](../devx/palette-virtual-clusters/pause-restore-virtual-clusters.md) virtual clusters, which helps significantly in reducing costs. App mode heavily leverages virtual clusters. + + +### What is App Mode? + +App Mode is a unique experience that Palette provides in that it removes Kubernetes infrastructure overhead as much as possible. In App mode, you can focus on creating and managing [_App Profiles_](../devx/app-profile/app-profile.md). App profiles are declarative templates that you use to define all the required services, containers, and databases that make up an application. Once you define an app profile, you can deploy your application to any Palette Virtual Cluster by specifying the respective app profile. + +App mode comes with an out-of-the-box cluster group managed by us here at Spectro Cloud called _beehive_. This cluster group, which under the cover is a collection of Kubernetes clusters, is configured to support Palette Virtual Clusters. As a consumer, you can deploy a new virtual cluster to the beehive cluster group and get started with a Kubernetes cluster in minutes. + +App mode's ability to get you started with a Kubernetes cluster in minutes makes it a powerful development tool. You can use the virtual clusters temporarily, such as for testing, ad-hoc development, or any other scenario where you want a short-lived Kubernetes environment up and running quickly. + +Alternatively, you could use app mode to offer your own Palette-managed host clusters as a PaaS experience to downstream consumers. This concept is easier explained through an example. Assume you are a system administrator, and you want to expose Kubernetes to various in-house development teams. You could deploy several Kubernetes clusters to various platforms and create a " development " cluster group. You also ensured every cluster is enabled for Palette Virtual Cluster by selecting the option before deployment. You can now direct your organization members to use app mode and create Palette Virtual Clusters as needed, or you can create virtual clusters ahead of time for them. The organization members or downstream consumers can now focus on creating app profiles and deploying their applications. You have essentially enabled a Kubernetes PaaS experience for your organization. + +As the consumer of app mode, you simply focus on deploying your application to a Kubernetes cluster by specifying the app profile. The overhead of managing infrastructure has essentially been removed for you, thus freeing up your time to focus on what matters the most, developing an application that solves business problems. + + +### How to Access Each Mode? + +You can quickly toggle between **App Mode** and **Cluster Mode** by navigating to the **User Menu** at top right and selecting the mode you want. + + +### App Mode or Cluster Mode? + +You might ask yourself, "How do I know which mode I should use?” The answer comes down to your objective. + +- Choose cluster mode if you want to enable Kubernetes capabilities for others or configure Palette. Cluster mode, provides all the configuration options a power user wants. +- Choose app mode if you want to simply deploy an application using Kubernetes without the infrastructure overhead. If you want to simply try out Palette, app mode is a good starting point. + + +App Mode may not meet your needs if your application requires a lot of resources. The Palette-managed cluster group, called Beehive, imposes a resource limitation that could prevent a resource-heavy application from launching successfully. Review the [Resource Quota](../devx/manage-dev-engine/resource-quota.md) documentation to understand App Mode limits. If you already have Palette-managed Kubernetes host clusters deployed or available to you as a cluster group with Palette Virtual Clusters enabled, then leveraging App Mode is a great fit so you can focus on the developer experience aspect. + +Below are some of the characteristics of each mode. Use this to help you better understand the differences between the two modes. + + +- App Mode + - Optimized for the developer experience + - You’re a builder that is not part of an organization and needs quick access to a Kubernetes cluster + - Expose a PaaS-like experience to organizational members by leveraging Palette-managed Kubernetes clusters + - Deploy applications without worrying about Kubernetes infrastructure + - Scope of concerns limited to app profiles + + +- Cluster Mode + - Optimized for power users and those that are comfortable with deploying Kubernetes clusters + - Used to deploy host clusters to different platforms (VMware, AWS, GCP, Azure, etc) + - Deploy Edge clusters + - Create cluster groups + - Create cluster profiles + - Create projects, workspaces, teams, + - Leverage specialized hardware for Kubernetes workload + - Audit logging + - Enable cloud providers and other platforms + - Configure registries + + +## Next Steps + +Get started with [Palette](https://console.spectrocloud.com/) today and deploy an application though [app mode](../devx/devx.md). Or create a Kubernetes cluster on your favorite platform and let Palette handle the challenges of maintaining Kubernetes clusters by leveraging cluster mode and [cluster profiles](../cluster-profiles/cluster-profiles.md). \ No newline at end of file diff --git a/docs/docs-content/kubernetes-knowlege-hub/_category_.json b/docs/docs-content/kubernetes-knowlege-hub/_category_.json new file mode 100644 index 0000000000..69ddc6a521 --- /dev/null +++ b/docs/docs-content/kubernetes-knowlege-hub/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 270 +} diff --git a/docs/docs-content/kubernetes-knowlege-hub/how-to/how-to-retrieve-images-from-private-registry.md b/docs/docs-content/kubernetes-knowlege-hub/how-to/how-to-retrieve-images-from-private-registry.md new file mode 100644 index 0000000000..b994a0d95b --- /dev/null +++ b/docs/docs-content/kubernetes-knowlege-hub/how-to/how-to-retrieve-images-from-private-registry.md @@ -0,0 +1,205 @@ +--- +sidebar_label: "Retrieve Images from a Private Registry" +title: "Retrieve Images from a Private Registry" +description: "Create a Kubernetes Secret to retrieve images from a private registry." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +hiddenFromNav: false +tags: ["how-to","k8s-tips"] +--- + + + + +## How To Retrieve Images from a Private Registry in Kubernetes + +Kubernetes is an open-source container orchestration platform that enables efficient management, deployment, and scaling of containerized applications. + +By default, Docker and Kubernetes allow a limited number of unauthenticated pulls from a Docker registry, such as Docker Hub. When you exceed this limit, you will not be able to pull any more images until the limit resets. + +The limit is based on the IP address of the machine that is making the pulls, so it applies to all containers running on that machine. + +To avoid this issue, we recommend that you authenticate with the Docker registry before pulling images, especially if you are pulling from a private registry. This ensures you have access to the images you need and can pull them without restrictions or limitations. + +To log into a Docker registry from Kubernetes, you must create a secret that contains your registry credentials. You can use this secret in a Kubernetes deployment configuration to pull images from the registry. + +In this how-to guide, you will log into a private docker registry to pull existing images of an application that you will deploy in Kubernetes. + +## Prerequisites + +- The kubectl [command-line tool](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/). Kubectl allows you to connect to, configure and work with your clusters through the command line. +- Access to a private registry. [DockerHub](https://hub.docker.com/) offers a single private registry on the free tier. If you do not have a personal registry account, you can use DockerHub. +- Access to a running Kubernetes cluster. To learn how to create clusters in different environments using Palette, review guides listed under [Clusters](../../clusters/clusters.md) or visit the [Palette Onboarding Workflow](../../getting-started/onboarding-workflow.md#palette-workflow) guide. To learn how to create a Kubernetes cluster from scratch, check out the [Create a Cluster](https://kubernetes.io/docs/tutorials/kubernetes-basics/create-cluster/) Kubernetes resource. + +The following example explains how you can create a secret and use it in a Kubernetes deployment. + +## Create a Credentials JSON File + +First, create a file called **registry-creds.json** that contains your registry credentials in the following format. + +
+ +```json +{ + "auths": { + "example.registry.com": { + "username": "username", + "password": "password" + } + } +} +``` + +Keeping passwords in plain text is unsafe. Kubernetes automatically encodes passwords used to create a secret in base64. Encoding passwords does not mean your passwords cannot be decoded. + +## Create a Kubernetes Secret + +Use the `kubectl` command-line tool to generate a secret from the **registry-creds.json** file. + +
+ +```bash +kubectl create secret generic myregistrykey --from-file=registry-creds.json +``` + +You can use the command below to view the secret created in detail. + +
+ +```bash +kubectl get secret/myregistrykey --output json +``` + +The command output displays the content of the **registry-creds.json** file as base 64 encoded. + +
+ +```json +{ + "apiVersion": "v1", + "data": { + "registry-creds.json": "ewogICJhdXRocyI6IHsKICAgICJleGFtcGxlLnJlZ2lzdHJ5LmNvbSI6IHsKICAgICAgInVzZXJuYW1lIjogInRlc3RfdXNlcm5hbWUiLAogICAgICAicGFzc3dvcmQiOiAidGVzdF9wYXNzd29yZCIKICAgIH0KICB9Cn0K" + }, + "kind": "Secret", + "metadata": { + "creationTimestamp": "2023-03-22T08:44:26Z", + "name": "myregistrykey", + "namespace": "default", + "resourceVersion": "1552285", + "uid": "ccfb047b-67c8-446b-a69a-6eb762c3100f" + }, + "type": "Opaque" +} +``` + +Invoke the following command to decode the secret you created to verify that secrets are not secure. + +
+ +```bash +kubectl get secret myregistrykey --output jsonpath='{.data.registry-creds\.json}' | base64 --decode +``` + +The output of issuing the command above is the content of the JSON file you used to create the secret. + +
+ +```json +{ + "auths": { + "example.registry.com": { + "username": "username", + "password": "password" + } + } +} +``` + +## Add Secret to Deployment Config + +In your Kubernetes deployment configuration, specify the name of the secret you just created for the imagePullSecrets parameter. + +
+ +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-deployment +spec: + replicas: 3 + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - name: my-container + image: registry.example.com/my-image + imagePullSecrets: + - name: myregistrykey +``` + +## Apply the Deployment Configuration + +
+ +```bash +kubectl apply --file deployment.yaml +``` + +With this configuration in place, Kubernetes will use the registry credentials in the `myregistrykey` secret to log into the registry and pull the specified image when deploying the application. + +## Other Docker Registry Authentication Methods + +An alternative way to log into a Docker registry from Kubernetes is by using the command line. + +Authenticate to the private registry. Here’s an example of how to do this. + +
+ +```bash +$ kubectl create secret docker-registry \ + --docker-server= \ + --docker-username= \ + --docker-password= \ + --docker-email= +``` + +In the snippet above, **``** refers to a unique name for the secret, **``** is the URL of the private registry. Replace the **``** with the username for authentication and **``** with the password for authentication. Also, replace **``** + with the email associated with the authentication credentials. + +Add the secret created in the previous step to the default service account with the following code. + +
+ +```bash +kubectl patch serviceaccount default \ + --port '{"imagePullSecrets": [{"name": ""}]}' +``` + +Replace **``** with the secret created in the previous step. + +Once you are authenticated and have added the secret to your default service account, you can use the `kubectl` command to pull images from the registry and deploy them to your Kubernetes cluster as follows. + +
+ +```bash +kubectl run \ + --image=/: \ + --port= +``` + +The line above will create a new deployment using the image specified from the private registry. + +## Next Steps + +Accessing images from a private registry in Kubernetes can be challenging due to the need to authenticate with the registry. + +To solve this challenge, you have learned how to create a Kubernetes secret with your Docker registry credentials and use it in a Kubernetes deployment configuration. This allows you to pull images from your private registry without restrictions or limitations. + +To learn more about Kubernetes and how to use it to deploy your application, check out [Palette's Dev Engine](../../devx/apps/deploy-app.md) and how it can reduce the challenges often encountered with deploying apps to Kubernetes. You can also read about [how to deploy a stateless frontend application](/kubernetes-knowlege-hub/tutorials/deploy-stateless-frontend-app) on Kubernetes or join our [slack channel](https://join.slack.com/t/spectrocloudcommunity/shared_invite/zt-1mw0cgosi-hZJDF_1QU77vF~qNJoPNUQ). Learn from other Kubernetes users and get to know fellow community members. diff --git a/docs/docs-content/kubernetes-knowlege-hub/how-to/how-to.md b/docs/docs-content/kubernetes-knowlege-hub/how-to/how-to.md new file mode 100644 index 0000000000..ca76427de0 --- /dev/null +++ b/docs/docs-content/kubernetes-knowlege-hub/how-to/how-to.md @@ -0,0 +1,23 @@ +--- +sidebar_label: "How to" +title: "Get started with a quick Kubernetes How-to" +description: "Kubernetes School" +hide_table_of_contents: false +hiddenFromNav: false +sidebar_custom_props: + icon: "book" +tags: ["k8s-tips"] +--- + + + + +## How To + +Learn about core Kubernetes concepts and how you can apply them on Spectro Cloud Palette. + +## Core Kubernetes +- [How To Retrieve Images from a Private Registry in Kubernetes](how-to-retrieve-images-from-private-registry.md) + + +
diff --git a/docs/docs-content/kubernetes-knowlege-hub/kubernetes-knowlege-hub.md b/docs/docs-content/kubernetes-knowlege-hub/kubernetes-knowlege-hub.md new file mode 100644 index 0000000000..edb758f09b --- /dev/null +++ b/docs/docs-content/kubernetes-knowlege-hub/kubernetes-knowlege-hub.md @@ -0,0 +1,26 @@ +--- +sidebar_label: "Kubernetes Knowledge Hub" +title: "Kubernetes Knowledge Hub" +description: "Kubernetes Knowledge Hub Repository" +hide_table_of_contents: true +sidebar_custom_props: + icon: "bookmark" +--- + + + + +# Welcome to the Spectro Cloud Knowledge Portal + +Welcome to the Spectro Cloud Kubernetes Knowledge Hub. You will find core Kubernetes tutorials, how-tos, frequently asked questions, and community curated resources. + +If you have a topic in mind you would like to see, use the Feedback app on the lower-right-hand corner. +
+ +- [How-To](/kubernetes-knowlege-hub/how-to) + + +- [Tutorials](/kubernetes-knowlege-hub/tutorials) + + +
diff --git a/docs/docs-content/kubernetes-knowlege-hub/tutorials/deploy-stateless-frontend-app.md b/docs/docs-content/kubernetes-knowlege-hub/tutorials/deploy-stateless-frontend-app.md new file mode 100644 index 0000000000..e93b636688 --- /dev/null +++ b/docs/docs-content/kubernetes-knowlege-hub/tutorials/deploy-stateless-frontend-app.md @@ -0,0 +1,236 @@ +--- +sidebar_label: 'Deploy a Stateless Frontend Application With Kubernetes' +title: 'Deploy a Stateless Frontend Application With Kubernetes' +description: 'One of the key benefits of using Kubernetes is that it provides a consistent and reliable way to deploy applications across different environments, including on-premises data centers and cloud infrastructure. Learn how to deploy a stateless frontend application in Kubernetes.' +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +hiddenFromNav: false +tags: ["tutorial","k8s-tips"] +--- + + + + +# Deploy a Stateless Frontend App with Kubernetes + +Kubernetes is a container orchestration platform that is widely used for deploying and managing containerized applications. + +One of the key benefits of using Kubernetes is that it provides a consistent and reliable way to deploy applications across different environments, including on-prem data centers and cloud infrastructure. + +Deploying a stateless frontend application with Kubernetes can be a straightforward process, although it requires an understanding of the key concepts and best practices of Kubernetes. + +In this tutorial, you will containerize a date suggester app built in React and deploy it with Kubernetes. This application is bootstrapped with [Create React App](https://create-react-app.dev/). + +## Prerequisites + +- An installation of [Node.js and NPM](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) on your machine. Node is a Javascript runtime environment and will enable React to run on your machine. + + +- A clone of the application from the [date suggestions app](https://github.com/Princesso/date-buddy.git) on GitHub. Cloning the application will enable you to follow this tutorial step by step. + + +- A Docker account and a [Docker installation](https://docs.docker.com/engine/install/ubuntu/) on your machine. + + +- An active Kubernetes cluster. Check out the [Deploy a Cluster with Palette](../../clusters/public-cloud/deploy-k8s-cluster.md) tutorial to get started. + + +- An installation of the [kubectl command-line tool](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) on your machine and connected to your cluster. +- A LoadBalancer. You can [create a LoadBalancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) with a public cloud provider, or use the [minikube tunnel](https://minikube.sigs.k8s.io/docs/commands/tunnel/) to trick a local cluster into exposing a resource. + +## About the Application + +The date suggester app is written in React. It takes a single date input on when a user will like to go on a date and displays a date idea for the selected date. + +The app data comes from a JSON file that lives on the frontend app. + +## Clone the Application. + +Use the command shown below to clone the application from GitHub. + +
+ +```bash +git clone https://github.com/spectrocloud/date-buddy +``` + +If you prefer to use a different stateless frontend app, you can do so. You may, however, get different results than in this tutorial. This tutorial only serves as a guide. + +## Create a Dockerfile on the App’s Root Directory. + +Before continuing this step, ensure Docker is installed on your machine. In the app's root directory, create a file named **Dockerfile**. + +
+ +```bash +touch Dockerfile +``` + +In a text editor, add the lines below to the Dockerfile. + +
+ +```bash +FROM node:12 + +WORKDIR /date-suggestions + +COPY package*.json ./ + +RUN npm install + +COPY . . + +EXPOSE 3000 + +CMD ["npm", "start"] +``` + +Also, create a **.dockerignore** file and add the following lines to it. + +```bash +/node_modules +/.pnp +.pnp.js +/coverage +``` + +## Build a Docker Image of the Application. + +This step packages the application into a portable image. To build the app’s image, run the Docker `build` command as shown. + +
+ +```bash +docker build --tag date-suggestions . +``` + +## Create a Kubernetes Deployment. + +Before continuing with this step, ensure that you have access to a Kubernetes cluster, as explained in the [prerequisites](#prerequisites). + +In the application's root directory, create a Kubernetes Deployment file using the `kubectl` command below. + +
+ +```bash +kubectl create deploy date-suggestions --image=date-suggestions --replicas=2 --port=3000 --dry-run=client --output yaml +``` + +The command output is a YAML representation of the deployment, similar to the lines below. + +
+ +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: date-suggestions +spec: + selector: + matchLabels: + app: date-suggestions + replicas: 2 + template: + metadata: + labels: + app: date-suggestions + spec: + containers: + - name: date-suggestions + image: date-suggestions + ports: + - containerPort: 3000 +``` + + +You can use the output YAML to create a deployment file. Use the redirect operator `>` to turn the command output into a **deployment.yaml** file. + +
+ +```bash +kubectl create deploy date-suggestions --image=date-suggestions --replicas=2 --port=3000 --dry-run=client --output yaml > deployment.yaml +``` + +Alternatively, you can use the `touch` command to create the **deployment.yaml** file, and then copy the YAML output from the command to create a deployment to it. + +
+ +```bash +touch deployment.yaml +``` + +## Create a Kubernetes Service. + +Create and populate a Kubernetes Service file in the app's root directory. By default, your application will only be accessible within the cluster. You'll need to create a Kubernetes service resource to expose the application to resources outside the Kubernetes cluster. A service resource creates an abstraction over a set of pods that provides discovery and routing between them. + +To create a service, use the `kubectl expose` command as shown below. + +
+ +```bash +kubectl expose deployment date-suggestions --type=LoadBalancer --port=80 --target-port=3000 --name=date-suggestion-service --dry-run=client --output yaml +``` + +The output of running the command will be similar to the YAML below. + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: date-suggestions-service +spec: + type: LoadBalancer + selector: + app: date-suggestions + ports: + - protocol: TCP + port: 80 + targetPort: 3000 +``` + +If everything looks good, modify the command to redirect the output YAML to the file **service.yaml**. + +```bash +kubectl expose deployment date-suggestions --type=LoadBalancer --port=80 --target-port=3000 --name=date-suggestion-service --dry-run=client --output yaml > service.yaml +``` + +You can also create a YAML file with the `touch` command and add the output of the `kubectl expose` command to it. + +
+ +```bash +touch service.yaml +``` + +Copy and paste the following line of code to the service file. + +## Deploy the Application. +Use the kubectl command-line connected to the cluster you created earlier, and deploy the application by applying the file's content to Kubernetes. + +
+ +```bash +kubectl apply --file deployment.yaml --file service.yaml +``` + +## Confirm that deployment was successful. + +Once the deployment and service files have been applied, you should be able to access your app by issuing the following command. + +
+ +```bash +kubectl get service date-suggestions-service --output=jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + +This will display the URL of your app that you can use to can access it in a web browser. + +## Next Steps + +Deploying a stateless frontend application with Kubernetes can be a straightforward process if you understand the fundamental concepts of Kubernetes. + +In this tutorial, you containerized a stateless React-based app and deployed it with Kubernetes by creating a Dockerfile, building a Docker image, creating a Kubernetes deployment, and creating a Kubernetes service. + +To learn more about Kubernetes, you can join our [slack channel](https://join.slack.com/t/spectrocloudcommunity/shared_invite/zt-1mw0cgosi-hZJDF_1QU77vF~qNJoPNUQ). Learn from other Kubernetes users and get to know fellow community members. \ No newline at end of file diff --git a/docs/docs-content/kubernetes-knowlege-hub/tutorials/tutorials.md b/docs/docs-content/kubernetes-knowlege-hub/tutorials/tutorials.md new file mode 100644 index 0000000000..5bc0e2e52f --- /dev/null +++ b/docs/docs-content/kubernetes-knowlege-hub/tutorials/tutorials.md @@ -0,0 +1,20 @@ +--- +sidebar_label: "Tutorials" +title: "Tutorials" +description: "Kubernetes School with How to" +hide_table_of_contents: false +hiddenFromNav: false +sidebar_custom_props: + icon: "book" +--- + + +Learn about core Kubernetes concepts and how you can apply them on Spectro Cloud Palette. + +
+ + +## Core Kubernetes + +- [Deploy a Stateless Frontend Application on Kubernetes](deploy-stateless-frontend-app.md) + diff --git a/docs/docs-content/legal-licenses.md b/docs/docs-content/legal-licenses.md new file mode 100644 index 0000000000..294a1c9ecf --- /dev/null +++ b/docs/docs-content/legal-licenses.md @@ -0,0 +1,500 @@ +--- +sidebar_label: "Legal & Acknowledgments" +title: "Legal & Acknowledgments" +description: "Review the legal and open source components used in Palette." +sidebar_position: 280 +hide_table_of_contents: false +sidebar_custom_props: + icon: "gavel" +tags: ["legal", "licenses"] +--- + + + + +The following table lists the open-source licenses tied to the libraries and modules currently in use by Palette. If you have any questions or concerns, contact us at support@spectrocloud.com + +| Library | License| +|------- | -------| +|api | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0)| +| apimachinery | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| appengine | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| assert | [MIT](https://opensource.org/license/mit/) +| atomic | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| aws-sdk-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| azure-sdk-for-go | [MIT](https://opensource.org/license/mit/) +| backoff | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| beego | [MIT](https://opensource.org/license/mit/) +| cast | [MIT](https://opensource.org/license/mit/) +| cert-manager | [MIT](https://opensource.org/license/mit/) +| client-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| client_golang | [MIT](https://opensource.org/license/mit/) +| client_model | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cloud.google.com/go/container | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-bootstrap-provider-microk8s | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-control-plane-provider-microk8s | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-provider-aws | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-provider-azure | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-provider-coxedge | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-provider-gcp | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-provider-maas | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-provider-openstack | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-provider-vcluster | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-api-provider-vsphere | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cluster-bootstrap | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| common | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| component-base | [MIT](https://opensource.org/license/mit/) +| compress | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| concurrent | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| containerized-data-importer | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| controller-runtime | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| corefile-migration | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| crypto | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| easyjson | [MIT](https://opensource.org/license/mit/) +| emperror.dev/errors | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| errors | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| errwrap | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| exp | [MIT](https://opensource.org/license/mit/) +| flect | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| fsnotify | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| ginkgo | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) +| github.com/andybalholm/brotli | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/apparentlymart/go-cidr | [MIT](https://opensource.org/license/mit/) +| github.com/avast/retry-go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/Azure/go-autorest/autorest/to | [MIT](https://opensource.org/license/mit/) +| github.com/Azure/go-autorest/autorest/validation | [MIT](https://opensource.org/license/mit/) +| github.com/blang/semver/v4 | [MIT](https://opensource.org/license/mit/) +| github.com/coredns/caddy | [MIT](https://opensource.org/license/mit/) +| github.com/docker/distribution | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/dsnet/compress | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/emicklei/go-restful/v3 | [MIT](https://opensource.org/license/mit/) +| github.com/evanphx/json-patch/v5 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/go-errors/errors | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/golang-jwt/jwt/v4 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/google/gnostic | [MIT](https://opensource.org/license/mit/) +| github.com/go-openapi/analysis | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/go-openapi/errors | [MIT](https://opensource.org/license/mit/) +| github.com/go-openapi/loads | [MIT](https://opensource.org/license/mit/) +| github.com/go-openapi/runtime | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/go-openapi/strfmt | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/go-openapi/validate | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/gophercloud/gophercloud | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/grpc-ecosystem/grpc-gateway/v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/jasonlvhit/gocron | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/juliangruber/go-intersect | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/klauspost/pgzip | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/lib/pq | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/Masterminds/goutils | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| github.com/Masterminds/sprig/v3 | [MIT](https://opensource.org/license/mit/) +| github.com/mholt/archiver/v3 | [MIT](https://opensource.org/license/mit/) +| github.com/minio/highwayhash | [MIT](https://opensource.org/license/mit/) +| github.com/[MIT](https://opensource.org/license/mit/)chellh/copystructure | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/[MIT](https://opensource.org/license/mit/)chellh/hashstructure | [MIT](https://opensource.org/license/mit/) +| github.com/[MIT](https://opensource.org/license/mit/)chellh/reflectwalk | [MIT](https://opensource.org/license/mit/) +| github.com/nats-io/jwt/v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/nats-io/nats.go | [MIT](https://opensource.org/license/mit/) +| github.com/nats-io/nkeys | [MIT](https://opensource.org/license/mit/) +| github.com/nats-io/nuid | [MIT](https://opensource.org/license/mit/) +| github.com/nwaples/rardecode | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/opencontainers/go-digest | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/openshift/custom-resource-status | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/PaesslerAG/gval | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/pborman/uuid | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| github.com/pierrec/lz4/v4 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/shopspring/decimal | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/tidwall/pretty | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/ulikunitz/xz | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/xi2/xz | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| go | [MIT](https://opensource.org/license/mit/) +| goautoneg | [MIT](https://opensource.org/license/mit/) +| go-autorest | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| go-client | public-domain +| go-cmp | [MIT](https://opensource.org/license/mit/) +| go-difflib | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| gofuzz | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-genproto | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-humanize | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-internal | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-jmespath | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| golang-lru | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| golang_protobuf_extensions | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| gomega | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| gomodules.xyz/jsonpatch/v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go.mongodb.org/mongo-driver | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-multierror | [MIT](https://opensource.org/license/mit/) +| google-cloud-go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| go.opentelemetry.io/otel/exporters/jaeger | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go.opentelemetry.io/otel/internal/metric | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) +| go-spew | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| gotest.tools | [MIT](https://opensource.org/license/mit/) +| govalidator | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-wildcard | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-yaml | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) +| groupcache | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| grpc-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| harbor | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| httpsnoop | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| inf.v0 | [ISC](https://opensource.org/license/isc-license-txt) +| intern | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| ip-address-manager | [MIT](https://opensource.org/license/mit/) +| json | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| jsonpath | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| jsonpointer | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| jsonreference | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| k8s.io/apiextensions-apiserver | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| klog | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| kube-openapi | [MIT](https://opensource.org/license/mit/) +| kubevirt.io/controller-lifecycle-operator-sdk/api | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| logr | [MIT](https://opensource.org/license/mit/) +| logrus | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| mapstructure | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| martian | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| mergo | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| metrics | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| multierr | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| mux | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| mysql | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| net | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| oauth2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| opentelemetry-go | [MIT](https://opensource.org/license/mit/) +| opentelemetry-go-contrib | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| opentelemetry-proto-go | [MIT](https://opensource.org/license/mit/) +| opentracing-go | [MIT](https://opensource.org/license/mit/) +| perks | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| pflag | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| procfs | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| protobuf | [MIT](https://opensource.org/license/mit/) +| reflect2 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| semver | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) +| shutdown | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| sigs.k8s.io/gateway-api | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| snappy | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| spec | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| stdr | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| structured-merge-diff | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| swag | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| sys | [MIT](https://opensource.org/license/mit/) +| system-upgrade-controller | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| tail | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| term | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| testify | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| text | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| time | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| tomb.v1 | [MIT](https://opensource.org/license/mit/) +| ulid | [MIT](https://opensource.org/license/mit/) +| utils | [MIT](https://opensource.org/license/mit/) +| uuid | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| wrangler | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| xstrings | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| xxhash | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| yaml | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| yaml.v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| yaml.v3 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/vmware/govmomi | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| sigs.k8s.io/controller-runtime | [MIT](https://opensource.org/license/mit/) +| ajv | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| antd | [MIT](https://opensource.org/license/mit/) +| @ant-design/compatible | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| @ant-design/icons | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| axios | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| clipboard | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| color | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| connected-react-router | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| @fortawesome/fontawesome-svg-core | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @fortawesome/free-solid-svg-icons | [MIT](https://opensource.org/license/mit/) +| @fortawesome/react-fontawesome | [MIT](https://opensource.org/license/mit/) +| history | [MIT](https://opensource.org/license/mit/) +| i18next | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| i18next-browser-languagedetector | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| i18next-xhr-backend | [MIT](https://opensource.org/license/mit/) +| immer | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| lodash | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| lscache | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| moment | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| monaco-editor | [MIT](https://opensource.org/license/mit/) +| monaco-themes | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| monaco-yaml | [MIT](https://opensource.org/license/mit/) +| @nivo/core | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @nivo/line | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @nivo/pie | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| node-fetch | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| normalizr | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| prettier | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| prop-types | [MIT](https://opensource.org/license/mit/) +| query-string | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| react | [MIT](https://opensource.org/license/mit/) +| react-dom | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| react-i18next | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| react-js-cron | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| react-redux | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| react-router | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| react-router-dom | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| @react-spring/core | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @react-spring/three | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @react-spring/web | [MIT](https://opensource.org/license/mit/) +| react-teleporter | [MIT](https://opensource.org/license/mit/) +| @react-three/fiber | [MIT](https://opensource.org/license/mit/) +| react-transition-group | [MIT](https://opensource.org/license/mit/) +| redux | [MIT](https://opensource.org/license/mit/) +| redux-debounce-thunk | [MIT](https://opensource.org/license/mit/) +| redux-devtools-extension | [MIT](https://opensource.org/license/mit/) +| redux-thunk | [MIT](https://opensource.org/license/mit/) +| reselect | [MIT](https://opensource.org/license/mit/) +| styled-components | [MIT](https://opensource.org/license/mit/) +| three | [MIT](https://opensource.org/license/mit/) +| @typescript-eslint/eslint-plugin | [MIT](https://opensource.org/license/mit/) +| @typescript-eslint/parser | [MIT](https://opensource.org/license/mit/) +| validator | [MIT](https://opensource.org/license/mit/) +| cryptography | [MIT](https://opensource.org/license/mit/) +| github.com/flynn/go-shlex | [MIT](https://opensource.org/license/mit/) +| k8s.io/klog | [MIT](https://opensource.org/license/mit/) +| aec | [MIT](https://opensource.org/license/mit/) +| bugsnag-go | [MIT](https://opensource.org/license/mit/) +| cli | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| containerd | [MIT](https://opensource.org/license/mit/) +| docker-credential-helpers | [MIT](https://opensource.org/license/mit/) +| ghw | [MIT](https://opensource.org/license/mit/) +| github.com/docker/docker | [MIT](https://opensource.org/license/mit/) +| github.com/docker/go-metrics | [MIT](https://opensource.org/license/mit/) +| github.com/gdamore/encoding | [MIT](https://opensource.org/license/mit/) +| github.com/gomodule/redigo | [MIT](https://opensource.org/license/mit/) +| github.com/go-ole/go-ole | [MIT](https://opensource.org/license/mit/) +| github.com/jessevdk/go-flags | [MIT](https://opensource.org/license/mit/) +| github.com/kardianos/osext | [MIT](https://opensource.org/license/mit/) +| github.com/lucasb-eyer/go-colorful | [MIT](https://opensource.org/license/mit/) +| github.com/otiai10/copy | [MIT](https://opensource.org/license/mit/) +| github.com/power-devops/perfstat | [MIT](https://opensource.org/license/mit/) +| github.com/rivo/uniseg | [MIT](https://opensource.org/license/mit/) +| github.com/shirou/gopsutil | [MIT](https://opensource.org/license/mit/) +| github.com/shirou/gopsutil/v3 | [MIT](https://opensource.org/license/mit/) +| github.com/StackExchange/wmi | [MIT](https://opensource.org/license/mit/) +| github.com/tklauser/go-sysconf | [MIT](https://opensource.org/license/mit/) +| github.com/tklauser/numcpus | [MIT](https://opensource.org/license/mit/) +| github.com/yusufpapurcu/wmi | [MIT](https://opensource.org/license/mit/) +| go-ansiterm | [MIT](https://opensource.org/license/mit/) +| go-connections | [MIT](https://opensource.org/license/mit/) +| go-homedir | [MIT](https://opensource.org/license/mit/) +| gophercloud | [MIT](https://opensource.org/license/mit/) +| gopsutil | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| go-runewidth | [MIT](https://opensource.org/license/mit/) +| go-units | [ISC](https://opensource.org/license/isc-license-txt) +| go.uuid | [MIT](https://opensource.org/license/mit/) +| go-windows-terminal-sequences | [MIT](https://opensource.org/license/mit/) +| howett.net/plist | [MIT](https://opensource.org/license/mit/) +| image-spec | [MIT](https://opensource.org/license/mit/) +| json-patch | [MIT](https://opensource.org/license/mit/) +| k8s.io/metrics | [MIT](https://opensource.org/license/mit/) +| k8sutil | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| libtrust | [MIT](https://opensource.org/license/mit/) +| libvirt-go-module | [MIT](https://opensource.org/license/mit/) +| libvirt-go-xml | [ISC](https://opensource.org/license/isc-license-txt) +| locker | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| oras.land/oras-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| panicwrap | [MIT](https://opensource.org/license/mit/) +| pcidb | Python-2.0 +| purell | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| retry-go | [MIT](https://opensource.org/license/mit/) +| stack | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| sync | [MIT](https://opensource.org/license/mit/) +| tcell | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| tview | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| urlesc | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/google/go-github | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-i18n | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| validator.v2 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| websocket | [MIT](https://opensource.org/license/mit/) +| check.v1 | [MIT](https://opensource.org/license/mit/) +| emperror | [MIT](https://opensource.org/license/mit/) +| gax-go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/Azure/go-autorest/autorest/azure/cli | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/dimchansky/utfbom | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/googleapis/enterprise-certificate-proxy | [MIT](https://opensource.org/license/mit/) +| gnostic | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-jose.v2 | [MIT](https://opensource.org/license/mit/) +| google-api-go-client | [MIT](https://opensource.org/license/mit/) +| libvirt.org/go/libvirt | [MIT](https://opensource.org/license/mit/) +| logur | [MIT](https://opensource.org/license/mit/) +| maas-client-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| opencensus-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| pretty | [MIT](https://opensource.org/license/mit/) +| tencentcloud-sdk-go | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| xerrors | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| clockwork | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| connectproxy | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| crypt2go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| ftp | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/Azure/azure-sdk-for-go/sdk/azidentity | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/beevik/etree | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/bxcodec/faker/v3 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/coreos/go-oidc/v3 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/dgraph-io/ristretto | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/gorhill/cronexpr | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/hashicorp/go-version | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/huandu/xstrings | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/kelseyhightower/envconfig | [MIT](https://opensource.org/license/mit/) +| github.com/kylelemons/godebug | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/Masterminds/semver | [MIT](https://opensource.org/license/mit/) +| github.com/mattermost/xml-roundtrip-validator | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/montanaflynn/stats | [MIT](https://opensource.org/license/mit/) +| github.com/pkg/browser | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/robfig/cron | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/stripe/stripe-go/v71 | [MIT](https://opensource.org/license/mit/) +| github.com/youmark/pkcs8 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| glog | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| goconvey | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| gofpdi | [MIT](https://opensource.org/license/mit/) +| gopdf | [MIT](https://opensource.org/license/mit/) +| gopkg.in/alexcesaro/quotedprintable.v3 | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| gopkg.in/mail.v2 | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| gosaml2 | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| goxmldsig | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) +| mail | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| microsoft-authentication-library-for-go | [MIT](https://opensource.org/license/mit/) +| mongo-tools | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| mongo-tools-common | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| pbkdf2 | [MIT](https://opensource.org/license/mit/) +| rateli[MIT](https://opensource.org/license/mit/)er | [MIT](https://opensource.org/license/mit/) +| scram | [MIT](https://opensource.org/license/mit/) +| stringprep | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| axios-retry | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| base-64 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @datadog/browser-logs | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| fast-deep-equal | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @fullstory/browser | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| i18next-http-backend | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| leaflet | [MIT](https://opensource.org/license/mit/) +| leaflet.markercluster | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| less | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| netmask | [MIT](https://opensource.org/license/mit/) +| @nivo/bar | [MIT](https://opensource.org/license/mit/) +| react-calendar | [MIT](https://opensource.org/license/mit/) +| react-clipboard.js | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| react-dev-utils | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| react-helmet | [MIT](https://opensource.org/license/mit/) +| @stripe/react-stripe-js | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @stripe/stripe-js | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| typescript | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @types/node | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @types/react | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @types/react-dom | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @types/react-redux | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @types/react-router-dom | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @types/styled-components | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| unique-names-generator | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| url | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @visx/axis | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| @visx/event | [MIT](https://opensource.org/license/mit/) +| @visx/gradient | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| @visx/grid | [MIT](https://opensource.org/license/mit/) +| @visx/group | [MIT](https://opensource.org/license/mit/) +| @visx/hierarchy | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| @visx/mock-data | [MIT](https://opensource.org/license/mit/) +| @visx/responsive | [MIT](https://opensource.org/license/mit/) +| @visx/scale | [MIT](https://opensource.org/license/mit/) +| @visx/shape | [MIT](https://opensource.org/license/mit/) +| @visx/tooltip | [MIT](https://opensource.org/license/mit/) +| afero | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| apiextensions-apiserver | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/AzureAD/microsoft-authentication-library-for-go | [MIT](https://opensource.org/license/mit/) +| github.com/Azure/azure-pipeline-go | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/Azure/azure-sdk-for-go/sdk/azcore | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/Azure/azure-sdk-for-go/sdk/internal | [MIT](https://opensource.org/license/mit/) +| github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute | [MIT](https://opensource.org/license/mit/) +| github.com/Azure/azure-storage-blob-go | [MIT](https://opensource.org/license/mit/) +| github.com/Azure/go-autorest/autorest/azure/auth | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/golang-jwt/jwt | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/go-logr/zapr | [MIT](https://opensource.org/license/mit/) +| github.com/mattn/go-ieproxy | [MIT](https://opensource.org/license/mit/) +| goformation | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| mod | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| vcluster | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| zap | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cobra | public-domain +| gojsonschema | [MIT](https://opensource.org/license/mit/) +| handlers | [MIT](https://opensource.org/license/mit/) +| logrus-logstash-hook | [MIT](https://opensource.org/license/mit/) +| apiserver | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| btree | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cli-runtime | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| console | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| cursor | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| diskv | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| distribution | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| emission | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| etcd | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| filepath-securejoin | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| fuzzysearch | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/aybabtme/rgbterm | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/chai2010/gettext-go | [MIT](https://opensource.org/license/mit/) +| github.com/cheggaaa/pb | [MIT](https://opensource.org/license/mit/) +| github.com/containerd/containerd | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/exponent-io/jsonpath | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/gobwas/glob | [MIT](https://opensource.org/license/mit/) +| github.com/go-gorp/gorp/v3 | [MIT](https://opensource.org/license/mit/) +| github.com/gosuri/uitable | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/jmoiron/sqlx | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/joho/godotenv | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/lann/builder | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/lann/ps | [MPL-2.0](https://www.mozilla.org/en-US/MPL/2.0) +| github.com/liggitt/tabwriter | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/MakeNowJust/heredoc | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/Masterminds/squirrel | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/mattn/go-sqlite3 | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| github.com/[MIT](https://opensource.org/license/mit/)chellh/colorstring | [MIT](https://opensource.org/license/mit/) +| github.com/[MIT](https://opensource.org/license/mit/)chellh/go-wordwrap | [ISC](https://opensource.org/license/isc-license-txt) +| github.com/monochromegane/go-gitignore | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/rancher/wrangler | [MIT](https://opensource.org/license/mit/) +| github.com/rubenv/sql-migrate | [MIT](https://opensource.org/license/mit/) +| github.com/russross/blackfriday | [MIT](https://opensource.org/license/mit/) +| github.com/skip2/go-qrcode | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/vbatts/tar-split | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| github.com/xlab/treeprint | [BSD-2-Clause](https://opensource.org/license/bsd-2-clause) +| go-colorable | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-containerregistry | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| go-isatty | [MIT](https://opensource.org/license/mit/) +| gojq | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| gojsonpointer | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| gojsonreference | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-pluggable | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| go.starlark.net | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| gotenv | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-toml | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| go-vfs | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| hcl | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| helm | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| httpcache | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| image | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| image2ascii | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| imaging | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| ini.v1 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| jwalterweatherman | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| kairos | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| keyboard | [MIT](https://opensource.org/license/mit/) +| kubectl | [MIT](https://opensource.org/license/mit/) +| kubernetes | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| kustomize | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| lumberjack.v2 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| machineid | [MIT](https://opensource.org/license/mit/) +| mousetrap | [MIT](https://opensource.org/license/mit/) +| objx | [MIT](https://opensource.org/license/mit/) +| pixterm | [MIT](https://opensource.org/license/mit/) +| progressbar | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| properties | [MIT](https://opensource.org/license/mit/) +| pterm | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| resize | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| runtime | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| shlex | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| spdystream | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| stargz-snapshotter | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| terminal-dimensions | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| terminfo | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| timefmt-go | [MIT](https://opensource.org/license/mit/) +| toml | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) +| tools | [MIT](https://opensource.org/license/mit/) +| viper | [BSD-3-Clause](https://opensource.org/license/bsd-3-clause) +| yaml.v1 | [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0) + +
+ + \ No newline at end of file diff --git a/docs/docs-content/palette-cli/_category_.json b/docs/docs-content/palette-cli/_category_.json new file mode 100644 index 0000000000..fa4c219dfb --- /dev/null +++ b/docs/docs-content/palette-cli/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 180 +} diff --git a/docs/docs-content/palette-cli/commands.md b/docs/docs-content/palette-cli/commands.md new file mode 100644 index 0000000000..d244d7f551 --- /dev/null +++ b/docs/docs-content/palette-cli/commands.md @@ -0,0 +1,374 @@ +--- +sidebar_label: "Commands" +title: "Commands" +description: "Reference resource for all the supported Palette CLI commands." +hide_table_of_contents: false +sidebar_position: 10 +tags: ["palette-cli"] +--- + + + +You start the Palette CLI with a single command, `palette`. The CLI accepts various subcommands such as `pde`, `help`, and more. The Palette CLI will return a non-zero exit status during error scenarios. You can use the CLI flags `-h` and `--help` to learn more about each subcommand. + +The complete list of subcommands is: + +
+ + * `completion` - Generate the autocompletion script for the specified shell. + + + * `help` - Help with any command. + + + * [`ec`](#ec) - Palette Enterprise Cluster installation & pre-validation. + + + * [`login`](#login) - Login to Palette. + + + * [`pcg`](#pcg) - Private Cloud Gateway installation & pre-validation. + + + * [`pde`](#pde) - Palette Developer Experience. + + + * [`project`](#project) - Manage Palette Projects. + + + + +## Global Flags + +Palette CLI supports the following global flags. + +| Short Flag | Long Flag | Description | Type | +|------------|------------------------|--------------------------------------------------------------------------|---------| +| `-c` | `--config` | Config file location. | string | +| `-h` | `--help` | Help with any command. | N/A | +| `-l` | `--log-level` | Log level. Allowed values: `panic` `fatal` `error` `warn` `info` `debug` `trace` (default `info`) | string | +| `-w` | `--workspace` | Workspace location for staging runtime configurations and logs (default `$HOME/.palette`) | string | + + +## EC + +The `ec` subcommand installs a self-hosted Palette Enterprise Cluster (EC) in your target environment. The installation is conducted through an interactive wizard that guides you through the various install configurations available. A local kind cluster is created to facilitate creating the Enterprise cluster in the target environment. You do not need to install kind or any other dependencies. The CLI includes all the required dependencies to stand up the kind cluster. You can use the `ec` command to install a [self-hosted Palette](../enterprise-version/deploying-an-enterprise-cluster.md) instance or a self-hosted [VerteX](../vertex/install-palette-vertex/install-palette-vertex.md) instance. + +The `ec` subcommand exposes the following subcommand. + +
+ + * `install` - Install a Palette Enterprise Cluster through an interactive wizard. A container runtime is required to install an EC cluster. + +### Install + +The `install` subcommand installs a Palette Enterprise Cluster in your target environment. You can install Palette or Palette VerteX using the `install` subcommand. The `install` subcommand can be used in interactive mode, which prompts you for required values. Alternatively, you can use flags to generate a configuration file. + +
+ + | Short Flag | Long Flag | Description | Type | + |------------|------------------------|--------------------------------------------------------------------------|---------| + | `-f` | `--config-file` | Install using a configuration file (optional). Use `-config-only` to generate a configuration file. | string | + | `-o` | `--config-only` | Generate configuration file only. This command will not proceed with installation. | boolean | + | `-v` | `--custom-values-file` | Enterprise Cluster custom values.yaml configuration file (optional). Use this to customize the cluster profile of the Enterprise Cluster. Refer to the [custom value file](#custom-value-file) section for more information. | string | + | `-p` | `--update-passwords` | Update passwords only. Do not proceed with installation. The `--config-file` flag must also be provided. | string | + + + #### Examples + + Install an Enterprise Cluster in interactive mode. + +
+ + ```shell + palette ec install + ``` +
+ + Create a configuration file for the Enterprise Cluster installation. + +
+ + ```shell + palette ec install --config-only + ``` + +
+ + Install an Enterprise Cluster using a configuration file. The configuration file is generated using the `--config-only` flag. + +
+ + ```shell hideCliboard + palette ec install --config-file ~/.palette/ec/ec-20230807143205/ec.yaml + ``` + +
+ + Update the passwords of an Enterprise Cluster using a configuration file. The configuration file is generated using the `--config-only` flag. + +
+ + ```shell hideCliboard + palette ec install --config-file ~/.palette/ec/ec-20230807143205/ec.yaml --update-passwords + ``` + + + + +### Custom Value File + +You can customize the [Cluster Profile](../glossary-all.md#cluster-profile) that makes up the Enterprise Cluster by providing a custom **values.yaml** file that contains values for the various Cluster Profile layers that make up the Enterprise Cluster. The custom **values.yaml** file is used to customize the Enterprise Cluster to your specific needs. This is an advanced feature and should only be used by advanced users or when explicitly instructed by our support team. + + +The **values.yaml** file is made up of the following components: + +
+ +- `os` The operating system layer of the Enterprise Cluster. This layer contains the values for the operating system that will be used to install the Enterprise Cluster. + + +- `k8s` The Kubernetes layer of the Enterprise Cluster. This layer contains the configuration values for the Kubernetes cluster that is created as part of the Enterprise Cluster installation. + + +- `csi` The Container Storage Interface (CSI) layer of the Enterprise Cluster. This layer contains the configuration values for the CSI driver that is used to provide persistent storage to the Enterprise Cluster. + + +- `cni` The Container Network Interface (CNI) layer of the Enterprise Cluster. This layer contains the configuration values for the CNI driver that is used to provide networking to the Enterprise Cluster. + + +- `mgmt` The management layer of the Enterprise Cluster. This layer contains the configuration values for the internal management components of the Enterprise Cluster. + + + + You can provide one or more layers in the **values.yaml** file. When you provide a layer configuration, the new configuration will be used instead of the default configuration. For example, if you provide a custom **values.yaml** file that contains the `os` layer, it will replace the default operating system configuration. The Enterprise Cluster profile as follows The **values.yaml** must use the following format: + +
+ + ```yaml hideClipboard + os: |- + # ... values.yaml for OS layer go here. + k8s: |- + # ... values.yaml for K8s layer go here. + csi: |- + # ... values.yaml for CSI layer go here. + cni: |- + # ... values.yaml for CNI layer go here. + mgmt: |- + # ... values.yaml for spectro-mgmt layer go here. + ``` + +The following example shows a custom **values.yaml** file that contains the `os` layer. The `os` layer contains the configuration for the operating system that will be used to install the Enterprise Cluster. + +
+ + ```yaml hideClipboard + os: |- + kubeadmconfig: + preKubeadmCommands: + - echo "Executing pre kube admin config commands" + - update-ca-certificates + - 'systemctl restart containerd; sleep 3' + - 'while [ ! -S /var/run/containerd/containerd.sock ]; do echo "Waiting for containerd..."; sleep 1; done' + postKubeadmCommands: + - echo "Executing post kube admin config commands" + files: + - targetPath: /usr/local/share/ca-certificates/mycom.crt + targetOwner: "root:root" + targetPermissions: "0644" + content: | + -----BEGIN CERTIFICATE----- + MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl + cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE + AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA + nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz + qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN + fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2 + 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL + 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK + jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB + /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki + HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y + g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ + ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6 + b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56 + IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc= + -----END CERTIFICATE----- + ``` + +## Login + +The `login` subcommand authenticates the Palette CLI with Palette. The `login` subcommand can be used in interactive mode, which prompts you for required values. Or, you can use flags to provide the subcommand with all the required values such as the API key, the organization ID, and the Palette URL. + +
+ +| Flag | Description | Type | | +|-----------------------|--------------------------------------------------------------------------------------|---------| +| `--api-key` | Palette API key (omit for interactive login). | string | +| `--cluster-group-name`| Palette Cluster Group name (optional). Specifies the active Cluster Group. | string | +| `--cluster-group-scope`| Palette Cluster Group scope. Required with `--cluster-group-name`. Allowed values are: `project`, `tenant` , and `system`. |string | +| `--console-url` | Palette URL (omit for interactive login). | string | +| `--help` | Help for the `login` subcommand. | - | +| `--insecure` | Skip Transport Layer Security (TLS) (bypass x509 verification). | - | +| `--org` | Palette Organization name (omit for interactive login). | string | +| `--project` | Palette Project name (optional). Specifies the active Project. | string | + + +#### Examples + +
+ +```shell hideClipboard +palette login --api-key 123456789 --org demo-org --console-url https://console.spectrocloud.com +``` + +If you want to target a specific project when using the `login` command, use the `--project` flag. + +
+ +```shell hideClipboard +palette login \ + --api-key 123456789 \ + --org demo-org \ + --console-url https://console.spectrocloud.com \ + --project dev-team +``` + + +Upon successful login, a local configuration file named **palette.yaml** is created. This file contains the metadata for CLI operations and is created in your $HOME directory under the folder name **.palette**. The following output is an example of a **palette.yaml** configuration file. Sensitive values, such as passwords, tokens, and API keys are encrypted at rest. + +
+ +```yaml hideClipboard +paletteConfig: + organization: demo-org + scope: tenant + projectName: dev-team + projectUid: 6342eab2faa0813ead9082e0 + clusterGroupName: beehive + clusterGroupUid: 635669ba4583891d109fe6c0 + tenantUid: 40b8a9a7f724831be814e5734ea744ed + ubuntuConfig: + enablefips: false + token: "" + scarConfig: + scarLoc: "" + scarUsername: "" + scarPassword: "" + mgmt: + apikey: 2abVsxDfFcJpYZ08+6dNWhkk + endpoint: https://console.spectrocloud.com + insecure: false + pairingcode: "" +runLoc: /Users/demo/.palette/ +workspaceLoc: /Users/demo/.palette +``` + +## PCG + +The `pcg` subcommand supports Private Cloud Gateway (PCG) operations, such as installing a PCG cluster and validating its installation. A local [kind](https://kind.sigs.k8s.io/) cluster is created to facilitate creating the PCG cluster in the target environment. You do not need to install kind or any other dependencies, the CLI includes all the required dependencies to stand up the kind cluster. + + +The `pcg` command exposes the following subcommand. + +
+ + * `install` - Install a PCG through an interactive wizard. A container runtime is required to install a PCG cluster. + + +
+ +### Install + +Use the `install` subcommand to install a PCG cluster in the following environments. The following flags are supported by the `install` subcommand. + +
+ + | Short Flag | Long Flag | Description | Type | + |------------|------------------------|--------------------------------------------------------------------------|---------| + | `-f` | `--config-file` | Install using a configuration file (optional). Use `-config-only` to generate a configuration file. | string | + | `-o` | `--config-only` | Generate configuration file only. This command will not proceed with installation. | boolean | + | `-i` | `--inspect-only` | Validate prerequisites for environment. Do not proceed with installation. | boolean | + + +| Platform | Install Guide | +|---|---| +| MAAS | [Link](../clusters/data-center/maas/install-manage-maas-pcg.md#install-pcg) | +| OpenStack | [Link](../clusters/data-center/openstack.md#installing-private-cloud-gateway---openstack) | +| VMware | [Link](../clusters/data-center/vmware.md#create-vmware-cloud-gateway) | + + + +## PDE + +The `pde` subcommand interacts with the Palette Dev Engine (PDE) platform and its resources. You can use the `pde` command to log in to Palette, manage virtual clusters, and switch the project scope. + +The `pde` command exposes the following subcommands. + +
+ + * `cluster-group` - Manage Palette Cluster Groups. + + + * `project` - Manage Palette Projects. + + + * `virtual-cluster` - Manage Palette Virtual Clusters. + + +### Cluster Group + +Use the `cluster-group` command to change the cluster group that commands will target. You can also list all available clusters. The `cluster-group` supports the following subcommands. + +
+ + * `list` - List Palette Cluster Groups. + + + + * `switch` - Switch your active Palette Cluster Group. + + + +### Virtual Cluster + +You can use the `virtual-cluster` subcommand to manage Palette Virtual Clusters. Below is a list of the supported subcommands you can use. Use the `--help` flag to learn more about each subcommand. + +
+ +- `create` - Create a Palette Virtual Cluster. + + +- `delete` - Delete a Palette Virtual Cluster. + + +- `download-kubeconfig` - Download the kubeconfig for a Palette Virtual Cluster. + + +- `events` - View events for a Palette Virtual Cluster. + + +- `lifecycle` - Pause or resume a Palette Virtual Cluster. + + +- `list` - List Palette Virtual Clusters. + + +- `resize` - Resize a Palette Virtual Cluster. + +## Project + +Use the `project` command to manage projects, the project scope for the CLI, and list all available projects. The `project` command supports the following subcommands. + + +
+ + * `deactivate` - Deactivate your active Palette project. This command requires you to have tenant admin privileges. + + + * `list` - List Palette projects. Only projects you have access to are listed. + + + * `switch` - Switch your active Palette project. You can only switch to projects you have access to. + +
\ No newline at end of file diff --git a/docs/docs-content/palette-cli/install-palette-cli.md b/docs/docs-content/palette-cli/install-palette-cli.md new file mode 100644 index 0000000000..6cf7f11e66 --- /dev/null +++ b/docs/docs-content/palette-cli/install-palette-cli.md @@ -0,0 +1,77 @@ +--- +sidebar_label: "Install" +title: "Install" +description: "Learn how to install the Palette CLI and how you can use the CLI with Palette Dev Engine." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["palette-cli"] +--- + + + + + +# Installation + +Use the following steps to install and set up the Palette CLI. + + +## Prerequisites + +- A Palette account. Click [here](https://console.spectrocloud.com/) to create a Palette account. + + +- A Palette API key. Refer to the [API Key](../user-management/user-authentication.md#api-key) reference page to learn how to create an API key. + + + +## Download and Setup + +1. Visit the [Downloads](../spectro-downloads.md#palette-cli) page and download the Palette CLI by using the URL provided. + + +2. Open up a terminal session on your local system. + + +3. Navigate to your default download folder. For Mac and Linux environments the default location is **~/Downloads**. + + +4. Move the binary to a folder that is part of your system's `PATH` environment variable. Use the following command to move the binary to the **/usr/local/bin** folder. + +
+ + ```shell + sudo mv ~/Downloads/palette /usr/local/bin/palette && \ + chmod +x /usr/local/bin/palette + ``` + +
+ + +5. If you will use Palette Dev Engine (PDE) complete this step. Otherwise you can skip to [Validation](#validate). Log in to Palette by using the `login` command. Replace `` with your Palette API key. If you are using a Palette self-hosted instance, replace the `--console-url` with your custom Palette URL. + +
+ + ```shell + palette pde login --api-key --console-url https://console.spectrocloud.com/ + ``` + +## Validate + +Verify the Palette CLI is part of your system path by issuing the Palette CLI `version` command. + +
+ + ```shell + palette version + ``` + + Output: + ```shell + Palette CLI version: 4.0.0 + ``` + + + ## Next Steps + +Start exploring the Palette CLI by using the `--help` command with the various commands. The Palette CLI will continue to receive more functionality, so you will want to keep it updated by downloading the newest version and replacing the current binary. \ No newline at end of file diff --git a/docs/docs-content/palette-cli/palette-cli.md b/docs/docs-content/palette-cli/palette-cli.md new file mode 100644 index 0000000000..11ad10fc2f --- /dev/null +++ b/docs/docs-content/palette-cli/palette-cli.md @@ -0,0 +1,26 @@ +--- +sidebar_label: "Palette CLI" +title: "Palette CLI" +description: "Learn how to use the Palette CLI." +hide_table_of_contents: false +sidebar_custom_props: + icon: "terminal" +tags: ["palette-cli"] +--- + + + +The Palette CLI contains various functionalities that you can use to interact with Palette and manage resources. The Palette CLI is well suited for Continuous Delivery/Continuous Deployment (CI/CD) pipelines and recommended for automation tasks, where Terraform or direct API queries are not ideal. + +To get started with the Palette CLI, check out the [Install](install-palette-cli.md) guide. + + + +## Resources + +- [Install](install-palette-cli.md) + + +- [Commands](commands.md) + +
\ No newline at end of file diff --git a/docs/docs-content/projects.md b/docs/docs-content/projects.md new file mode 100644 index 0000000000..abadcb472f --- /dev/null +++ b/docs/docs-content/projects.md @@ -0,0 +1,113 @@ +--- +sidebar_label: "Projects" +title: "Concept: Projects" +description: "Understanding what Spectro Cloud projects are" +hide_table_of_contents: false +sidebar_position: 110 +sidebar_custom_props: + icon: "cog" +tags: ["projects"] +--- + + + + +# Projects + +A **Project** helps you organize the cluster resources in a logical grouping. The resources that are created within a project are scoped to that project and not available to other projects. You can also assign users and teams with specific roles to specific projects. + +## Project Dashboard + +The **Tenant Admin** > **Projects** page displays the project-related dashboard cards for all projects in the tenant. + + +## Project Card + +The **Project card** shows the status and relevant details of a cluster, grouping information about healthy, unhealthy, and errored clusters. It calculates cluster health by evaluating the health of each node, taking into account factors such as memory and CPU utilization, disk pressure, and network availability. Additionally, it displays the number of clusters imported and those provisioned by Palette. +### Cores per Project Usage + +By default, the active worker node usage of CPU **Cores** is grouped across all projects and shown as an hourly interval. You can change the interval value to days or months. + + +## Create a Project + +Use the following steps to create a new project. + +
+ +:::info + +You can associate users and teams with a project. Check out the [Project Association](/user-management/project-association) page to learn more. + +::: + +## Prerequisites + +* Tenant admin access + +## Enablement + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to **Tenant Admin** > **Projects** and click the **Create Project** button. + + +3. Fill out the following fields: **Name**, **Description**, and **Tags** to create a Project. + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to **Tenant Admin** > **Projects** + +Your newly created project is listed along with other existing projects. + + +## Delete a Project + + +You can remove projects by following these steps. + +## Prerequisites + +* Tenant admin access. + +* No active clusters in the project. + +## Remove Project + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Switch to **Tenant Admin** scope. + + +3. Navigate to the left **Main Menu** and select **Projects**. + + +4. Locate the project card for the project you want to remove. + + +5. Click on the **three-dot Menu** and select **Delete**. + + +6. A pop-up box will ask you to confirm the action. Confirm the deletion. + + +:::caution + +You can delete projects with force as long as there are no active clusters. Force deleting will eliminate all resources linked to the project, such as app profiles, cluster profiles, workspaces, audit logs, and custom project settings. However, if a project has active clusters, you must remove them first before deleting the project. + +::: + + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to **Tenant Admin** > **Projects** . + +The project you deleted is no longer displayed and available for interaction. \ No newline at end of file diff --git a/docs/docs-content/registries-and-packs/_category_.json b/docs/docs-content/registries-and-packs/_category_.json new file mode 100644 index 0000000000..0e005246ca --- /dev/null +++ b/docs/docs-content/registries-and-packs/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 130 +} diff --git a/docs/docs-content/registries-and-packs/add-custom-packs.md b/docs/docs-content/registries-and-packs/add-custom-packs.md new file mode 100644 index 0000000000..68f482554c --- /dev/null +++ b/docs/docs-content/registries-and-packs/add-custom-packs.md @@ -0,0 +1,517 @@ +--- +sidebar_label: "Add a Custom Pack" +title: "Add a Custom Pack" +description: "How to create and use custom made packs and registries in Spectro Cloud" +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +--- + + + + +# Add a Custom Pack + +Custom packs are built by users and deployed to custom registries using the Spectro Cloud CLI tool. To get started with Spectro Cloud CLI, review the Spectro Cloud CLI installation [instructions](spectro-cli-reference.md). + +## Prerequsites + +The following items are required to create a custom pack. + +- [Spectro Cloud CLI](spectro-cli-reference.md) +- A Spectro Cloud [account](https://www.spectrocloud.com/) +- [Custom Pack registry](adding-a-custom-registry.md) + +## JSON Schema + +Each pack contains a metadata file named `pack.json`. The table below explains in greater detail the JSON schema attributes. + + +| Property Name | Data type | Required | Description | +| --- | --- | --- | --- | +| name | String | True | Name of the pack | +| displayName | String | True | Name of the pack as it is to be displayed on the Palette management console. | +| layer | String | True | Relevant layer that this pack should be part of; such as os, k8s, cni, csi, addon | +| addonType | String | False | Addon-type must be set for packs that have the layer set to Addon. The value must be one of the following: logging, monitoring, load balancer, authentication, ingress, security. Setting a relevant correct addon type ensures packs are organized correctly on the management console making it easy for profile authors to find packs. | +| version | String | True | A Semantic version for the pack. It is recommended that the pack version be the same as the underlying integration it is being created for. For example, the version for the pack that will install Prometheus 2.3.4, should set to 2.3.4. | +| cloudTypes | Array | True | You can provide one or more types for a pack. Supported values are as follows:

**all**, **aws**, **azure**, **gcp**, **tencent**, **vsphere**, **openstack**, **baremetal**, **maas**, **aks**, **eks**, **tke**, **edge**, **edge-native**, **coxedge**, and **libvirt** (virtualized edge). +| group | String | False | Optional categorization of packs. For example, LTS can be set for Ubuntu OS packs. | +| annotations | Array | False | Optional key-value pairs required during pack installation. Typically, custom packs do not need to set annotations. Some packs like the ones for OS require annotations that need to be set with an image id. | +| eol | String | False | End of life date for integration. | +| KubeManifests | Array | False | Relative path to Kubernetes manifest yaml files. | +| ansibleRoles | Array | False | Relative part to the Ansible role folders. These folders should contain all the artifacts required by Ansible. Please refer to the Ansible documentation for more details on how Ansible roles are constructed. | +| | | | In Palette, Ansible roles are used to customize the OS image used for cluster nodes. Typically, these are roles that perform tasks like hardening the OS, installing monitoring agents, etc. | +| charts | Array | False | Relative path to the helm chart archives. | + +The following is the JSON schema for packs. Review the schema to ensure your JSON configuration is defined correctly. + +
+ + +```json +{ + "type": "object", + "required": [ + "name", + "displayName", + "version", + "layer" + ], + "properties": { + "name": { + "$ref": "#/definitions/nonEmptyString" + }, + "displayName": { + "$ref": "#/definitions/nonEmptyString" + }, + "version": { + "$ref": "#/definitions/nonEmptyString" + }, + "layer": { + "$ref": "#/definitions/layer" + }, + "group": { + "type": "string" + }, + "cloudTypes": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "all", + "aws", + "azure", + "gcp", + "vsphere", + "openstack", + "baremetal", + "maas", + "aks", + "eks", + "tencent", + "tke", + "edge", + "libvirt", + "edge-native", + "coxedge" + ] + } + }, + "cloudType": { + "type": "string", + "enum": [ + "all", + "aws", + "azure", + "gcp", + "vsphere", + "openstack", + "baremetal", + "maas", + "aks", + "eks", + "tencent", + "tke", + "edge", + "libvirt", + "edge-native", + "coxedge" + ] + }, + "eol": { + "type": "string" + }, + "addonType": { + "type": "string" + }, + "addonSubType": { + "type": "string" + }, + "ansibleRoles": { + "type": "array", + "items": { + "type": "string" + } + }, + "charts": { + "type": "array", + "items": { + "type": "string" + } + }, + "kubeManifests": { + "type": "array", + "items": { + "type": "string" + } + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "constraints": { + "$ref": "#/definitions/constraints" + } + }, + "definitions": { + "nonEmptyString": { + "type": "string", + "minLength": 1 + }, + "layer": { + "type": "string", + "enum": [ + "kernel", + "os", + "k8s", + "cni", + "csi", + "addon" + ] + }, + "constraints": { + "type": "object", + "properties": { + "dependencies": { + "type": "array", + "items": { + "$ref": "#/definitions/dependency" + } + }, + "resources": { + "type": "array", + "items": { + "$ref": "#/definitions/resource" + } + } + } + }, + "dependency": { + "type": "object", + "required": [ + "packName", + "layer", + "type" + ], + "properties": { + "packName": { + "$ref": "#/definitions/nonEmptyString" + }, + "layer": { + "$ref": "#/definitions/layer" + }, + "minVersion": { + "type": "string" + }, + "maxVersion": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "required", + "optional", + "notSupported", + "upgrade" + ] + } + } + }, + "resource": { + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "cpu", + "memory", + "diskSize" + ] + }, + "minLimit": { + "type": "number" + }, + "components": { + "type": "array", + "items": { + "$ref": "#/definitions/component" + } + } + } + }, + "component": { + "type": "object", + "required": [ + "scheduleType" + ], + "properties": { + "scheduleType": { + "type": "string", + "enum": [ + "all", + "master", + "worker" + ] + }, + "resourceRequestParamRef": { + "type": "string" + }, + "replicaCountParamRef": { + "type": "string" + } + } + } + } +} +``` + +## Create a Custom Pack + +Follow the steps below to create a custom pack. + +1. Create a directory with a suitable name for all the pack contents. + + Example: `prometheus_1_0` + + +2. Create a metadata file named `pack.json` to describe the pack. + + Example: + + ```json + { + "annotations": { + "name": "value" + }, + "ansibleRoles": [], + "displayName": "", + "eol": "2028-04-30", + "group": "", + "kubeManifests": [ + "manifests/deployment.yaml" + ], + "layer": "", + "name": "", + "version": "" + } + ``` + +3. Create a file named `values.yaml`. This file consists of configurable parameters that need to be exposed to the end-users during the creation of a cluster profile. + +:::info + +A values.yaml file is mandatory for every pack. For an OS pack, there are typically no configurable parameters, but an empty file still needs to be added to the OS pack. + +::: + +Parameters for all charts, manifests, and Ansible roles defined in the pack are defined in the `values.yaml` file. +*Helm* charts natively support values override. Any values defined are merged with those defined within a chart. *Manifests* and *Ansible* roles need to be explicitly templatized if parameter configuration is desired. + +```yaml + pack: + namespace : + charts: + chart1: + + chart2: + + manifests: + manifest1: + + manifest2: + + ansibleRoles: + role1: + + role2: + +``` + +4. A pack must have the logo file named `logo.png` and must be copied into the pack directory. + + +5. Login to the pack registry using the following command: + + ```bash + spectro registry login [REGISTRY_SERVER] + ``` + +6. Push the newly defined pack to the pack registry using the following command: + + ```bash + spectro pack push [PACK_DIR_LOCATION] --registry-server [REGISTRY_SERVER] + ``` + +7. To overwrite contents of a previously deployed pack, use the force option as follows: + + ```bash + spectro pack push [PACK_DIR_LOCATION] -f --registry-server [REGISTRY_SERVER] + ``` + +## Adding an OS Pack + +The OS is one of the Core Layers in a cluster profile. An OS pack can be built to use a custom OS image for cluster nodes. This might be desirable if an organization wants to use an approved hardened OS image for their infrastructure. There are typically the following two scenarios for the OS image: + +
+ +1. **Pre-Installed Kubernetes** - The OS image has the desired version of Kubernetes components like kubelet, kubectl, etc installed. + + +2. **Vanilla OS Image** - Kubernetes components are not installed. + +Additionally, for both scenarios additional components or packages may need to be installed at runtime to prepare the final OS image. This can be done by specifying one or more Ansible roles in the pack. The following are a few examples of building custom OS pack to cover the some of these scenarios. + +A few sample pack manifests for building a custom OS pack are shown in the following examples. These are examples for images that do not have Kubernetes components pre-installed. Palette installs these components at the time of provisioning. The version of Kubernetes that gets installed depends on the Kubernetes pack configuration in the cluster profile. If Kubernetes is pre-installed in the image, set the flag `skipK8sInstall` to true. + +## Examples + + + + + +### AWS Custom-OS Pack + +```yaml +{ + "annotations": { + "cloudRegion": "us-east-1", + "imageId": "ami-071f6fc516c53fca1", + "imageOwner": "421085264799", + "osName": "centos", + "os_spectro_version": "0", + "sshUsername": "centos", + "skipK8sInstall": "false" + }, + "ansibleRoles": [ + "harden_os" + ], + "cloudTypes": ["aws"], + "displayName": "CentOS", + "eol": "2024-06-30", + "group": "", + "kubeManifests": [], + "layer": "os", + "name": "golden-centos-aws", + "version": "7.7.1908" +} +``` + + + + + +### Azure Custom OS Pack + +```yaml +{ + "annotations": { + "imageOffer": "CentOS", + "imagePublisher": "OpenLogic", + "imageSKU": "7.7", + "osName": "centos", + "os_spectro_version": "0", + "sshUsername": "centos", + "skipK8sInstall": "true" + }, + "ansibleRoles": [ + "harden_os" + ], + "cloudTypes": ["azure"], + "displayName": "CentOS", + "eol": "2024-06-30", + "group": "", + "kubeManifests": [], + "layer": "os", + "name": "golden-centos-azure", + "version": "7.7.1908" +} +``` + + + + + +### VMware Custom OS Pack - Local Image + +```yaml +{ + "annotations": { + "folder": "spectro-templates", + "imageId": "/Datacenter/vm/spectro-templates/base-images/centos-7-vanilla-with-vm-tools", + "osName": "centos", + "os_spectro_version": "0", + "sshPassword": "password", + "sshUsername": "root", + "skipK8sInstall": "false" + }, + "ansibleRoles": [ + "harden_os" + ], + "cloudTypes": ["vsphere"], + "displayName": "CentOS", + "eol": "2024-06-30", + "group": "", + "kubeManifests": [], + "layer": "os", + "name": "golden-centos-vsphere", + "version": "7.7.1908" +} +``` + +### VMware Custom OS Pack - Remote Image + +```yaml +{ + "annotations": { + "folder": "spectro-templates", + "imageId": "https://cloud-images.ubuntu.com/releases/18.04/release/ubuntu-18.04-server-cloudimg-amd64.ova", + "osName": "ubuntu", + "os_spectro_version": "0", + "sshUsername": "ubuntu", + "skipK8sInstall": "false" + }, + "ansibleRoles": [ + "harden_os" + ], + "cloudTypes": ["vsphere"], + "displayName": "Ubuntu", + "eol": "2028-04-30", + "group": "LTS", + "kubeManifests": [], + "layer": "os", + "name": "golden-ubuntu-vsphere", + "version": "18.04.4" +} +``` + + + + + +## Ansible Roles + +In all the previous examples, additional customization in the form of an Ansible role called `harden_os` is specified in the pack manifest. The tasks and other files for the implementation of this role need to be included in the pack. The final directory structure of for the pack would be as follows: + +``` +./pack.json +./logo.png +./values.yaml +./harden_os +./harden_os/tasks +./harden_os/tasks/main.yml +./harden_os/files +./harden_os/files/sec_harden.sh +``` + +Ansible roles are optional and only required if additional runtime customization is required. Once an OS pack is constructed, push it to the pack registry using the Spectro CLI tool. + +:::caution + +During the image customization phase of a cluster deployment, failures related to missing packages or package version mismatch could occur when using a custom OS pack. These errors are presented on the console. The image needs to be updated to resolve any such issues. + +::: diff --git a/docs/docs-content/registries-and-packs/adding-a-custom-registry.md b/docs/docs-content/registries-and-packs/adding-a-custom-registry.md new file mode 100644 index 0000000000..7d01d0ae69 --- /dev/null +++ b/docs/docs-content/registries-and-packs/adding-a-custom-registry.md @@ -0,0 +1,306 @@ +--- +sidebar_label: "Add a Custom Registry" +title: "Add a Custom Registry" +description: "Learn how to create and use custom made packs and registries in Spectro Cloud" +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +--- + + + +# Add Custom Registries + +Setting up a custom pack registry is a two-step process. The first step is to deploy a pack registry server using a Docker image provided by us. While deploying a pack registry server, you can employ a TLS certificate from a Certificate Authority (CA) or a self-signed certificate. The current guide will provide instructions for both methods - using TLS and self-signed certificates. You can check out the [Advanced Configuration](advanced-configuration.md) guide to learn about the customization options while deploying a pack registry server. + +After deploying a pack registry server, the next step is configuring the pack registry server in Palette. Once you finish configuring the pack registry server in Palette, Palette will synchronize the pack contents from the pack registry server periodically. + +## Prerequisites + +* Ensure you have a Docker container runtime Docker to be installed on the machine. + + +* The HTTP utility *htpasswd* is required to be installed for user authentication encryption. + + +* The minimum machine compute specifications are 1 vCPU and 2 GB Memory. + + +* Firewall ports 443/80 are required to be opened on the machine to allow traffic from the Palette console and Spectro CLI tool. + +* [OpenSSL](https://www.openssl.org/source/) if creating a self-signed certificate. Refer to the [Self-Signed Certificates](#self-signed-certificates) section below for more guidance. + +:::caution + +Please ensure that the ports 443 and 80 are exclusively allocated to the registry server and are not in use by other processes. + +::: + +## Deploy Pack Registry Server with Let's Encrypt + +We provide a Docker image for setting up a pack registry server. Use the following steps to deploy a pack registry server using the designated Docker image and a TLS certificate issued by [Let's Encrypt](https://letsencrypt.org/). +
+ +1. Create a folder that contains an httppasswd file. +
+ + ```bash + mkdir spectropaxconfig + ``` + +2. Create a htpasswd file. +
+ + ```shell + htpasswd -Bbn admin "yourPasswordHere" > spectropaxconfig/htpasswd-basic + ``` + + +3. Create a pax registry configuration file titled **myconfig.yml** in the **spectropaxconfig** directory. The YAML code block below displays the sample content for the **myconfig.yml** file. The current example assumes that your pack registry server will be hosted at `yourhost.companydomain.com` and the email id for notifications is `you@companydomain.com`. Replace the `host` and `email` attribute values as applicable to you. +
+ + ```yaml + version: 0.1 + log: + level: debug + storage: inmemory + http: + addr: :5000 + tls: + letsencrypt: + cachefile: /etc/spectropaxconfig/le-cache + email: you@companydomain.com + hosts: + - yourhost.companydomain.com + auth: + htpasswd: + realm: basic-realm + path: /etc/spectropaxconfig/htpasswd-basic + ``` + +3. Start the container image with the following flags. + +
+ + ```bash + docker run \ + --rm \ + --port 443:5000 \ + --name spectro-registry \ + --volume $(pwd)/spectropaxconfig/:/etc/spectropaxconfig/ \ + gcr.io/spectro-images-public/release/spectro-registry:4.0.2 \ + serve /etc/spectropaxconfig/myconfig.yml + ``` + +You can now access the pack registry at `https://yourhost.companydomain.com/v1/`. +You will be prompted to give the user admin and the password of your choice. + +## Deploy a Pack Registry Server with Self-Signed Certificates + +The following steps need to be performed to deploy the pack registry server using self-signed certificates: + +1. Configure the user credentials by using the `htpasswd` utility and store the credentials in a file locally. This file will be mounted inside the pack registry docker container. +
+ + ```bash + mkdir -p /root/auth + ``` + +2. For admin users, the command below has a placeholder to specify your unique secure password for admin users. +
+ + ```bash + htpasswd -Bbn admin "yourPasswordHere" > /root/auth/htpasswd-basic + ``` + +3. For other users. The following command has the placeholder to specify your unique secure password for read-only users. +
+ + ```bash + htpasswd -Bbn spectro "yourPasswordHere" >> /root/auth/htpasswd-basic + ``` + +4. If HTTPS mode is used, create a directory called `certs`. +
+ + ```shell + mkdir -p /root/certs + ``` + +5. Copy the **tls.crt** and **tls.key** files from the CA into the **/roots/certs** directory. This directory will be mounted inside the registry Docker container. + + +6. Pack contents in a pack registry can be stored locally on the host or an external file system. +An external file system is recommended so that the pack contents can be mounted on another pack +registry instance in the event of restarts and failures. +Create a directory or mount an external volume to the desired storage location. Example: `/root/data` + + +7. Issue the following command to pull the pack registry server image. The image will help you instantiate a Docker container as a pack registry server. +
+ + ```shell + docker pull gcr.io/spectro-images-public/release/spectro-registry:3.4.0 + ``` + +8. Use the `docker run` command to instantiate a Docker container. If you encounter an error while instantiating the Docker container, below are some common scenarios and troubleshooting tips. + + * The Registry CLI login command fails with the error message `x509: cannot validate certificate for ip_address, because it doesn't contain any IP SANs`. The error occurs when a self-signed certificate is created using an IP address rather than a hostname. To resolve the error, recreate the certificate to include an IP SAN or use a DNS name instead of an IP address. + + * The Registry CLI login command fails with the error message `x509: certificate signed by unknown authority`. The error occurs when the self-signed certificate is invalid. To resolve the error, you must configure the host where CLI is installed to trust the certificate. + + + + + + +```bash +docker run -d \ + -p 443:5000 \ + --restart=always \ + --name spectro-registry \ + --mount type=bind,source=/root/auth,target=/auth,readonly \ + --mount type=bind,source=/root/data,target=/data \ + --mount type=bind,source=/root/certs,target=/certs,readonly \ + -e REGISTRY_LOG_LEVEL=info \ + -e REGISTRY_AUTH=htpasswd \ + -e REGISTRY_AUTH_HTPASSWD_REALM="Registry Realm" \ + -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd-basic \ + -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/tls.crt \ + -e REGISTRY_HTTP_TLS_KEY=/certs/tls.key \ + gcr.io/spectro-images-public/release/spectro-registry:4.0.2 +``` + + + + + + +```shell +docker run -d \ + -p 80:5000 \ + --restart=always \ + --name spectro-registry \ + --mount type=bind,source=/root/auth,target=/auth,readonly \ + --mount type=bind,source=/root/data,target=/data \ + -e REGISTRY_LOG_LEVEL=info \ + -e REGISTRY_AUTH=htpasswd \ + -e REGISTRY_AUTH_HTPASSWD_REALM="Registry Realm" \ + -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd-basic \ + gcr.io/spectro-images-public/release/spectro-registry:4.0.2 + ``` + +
+ +:::caution + +Registry servers configured in HTTP mode require the `--insecure` CLI flag when using the Spectro Cloud CLI's `login` command. + +
+ +```shell +spectro registry login --insecure http://example.com:5000 +``` + +::: + +
+ +
+ +
+ + +9. Expose the container host's port publicly to allow the console to interact with the pack registry. + This would be typically done via environment-specific constructs like Security Groups, Firewalls, etc. + + +10. Verify the installation by invoking the pack registry APIs using the curl command. This should result in a 200 response. + + + + + + ```bash + curl --cacert tls.crt -v [REGISTRY_SERVER]/health + curl --cacert tls.crt -v -u [USERNAME] [REGISTRY_SERVER]/v1/_catalog + ``` + + + + + + ```bash + curl -v [REGISTRY_SERVER]/health + curl -v -u [USERNAME] [REGISTRY_SERVER]/v1/_catalog + ``` + + + + + + + +## Configure a Custom Pack Registry in Palette + +Once you deploy the pack registry server, use the following steps to configure the pack registry server in Palette. +
+ +1. Log in to Palette, and switch to the tenant admin view. + + +2. Navigate to the **Tenant Settings** > **Registries** > **Pack Registries** section. + + +3. Click on the **Add New Pack Registry**. Palette will open a pop-up window asking for the fields to configure a pack registry server, as highlighted in the screenshot below. + + ![A screenshot highlighting the fields to configure a custom pack registry. ](/registries-and-packs_adding-a-custom-registry-tls_certificate.png) + + +4. Provide the pack registry server name, endpoint, and user credentials in the pop-up window. Ensure to use an "https://" prefix in the pack registry server endpoint. + + +5. If you want Palette to establish a secure and encrypted HTTPS connection with your pack registry server, upload the certificate in the **TLS Configuration** section. The certificate file must be in the PEM format and have a complete trust chain. + + If you used a TLS certificate issued by a CA while configuring the pack registry server, check with your CA to obtain a certificate chain. If you used a self-signed certificate, upload the entire certificate trust chain. The file content must have the server, the intermediate, and the root certificates. + + Once you upload the *.pem* certificate file and click the **Validate** button, Palette will perform the TLS verification to affirm the certificate's authenticity before establishing a communication channel. + + +6. Select the **Insecure Skip TLS Verify** checkbox if you do not want an HTTPS connection between Palette and your pack registry server. If you upload a TLS certificate and also select the **Insecure Skip TLS Verify** checkbox. The **Insecure Skip TLS Verify** checkbox value will take precedence in that case. + + +7. Click the **Confirm** button to finish configuring the pack registry server. After you finish the configuration, Palette will periodically synchronize with the pack registry server to download pack updates, if any. + + +# Self-Signed Certificates + +For self-signed certificates, use the following command to generate certificates. + +
+ + ```bash + openssl req \ + -newkey rsa:4096 -nodes -sha256 -keyout tls.key \ + -x509 -days 1825 -out tls.crt + ``` + +Provide the appropriate values while ensuring that the Common Name matches the registry hostname. + +
+ + ```text hideClipboard + Country Name (2 letter code) [XX]: + State or Province Name (full name) []: + Locality Name (eg, city) [Default City]: + Organization Name (eg, company) [Default Company Ltd]: + Organizational Unit Name (eg, section) []: + Common Name (eg, your name or your server's hostname) []:[REGISTRY_HOST_DNS] + Email Address []: + + Example: + REGISTRY_HOST_DNS - registry.com + ``` + + +
\ No newline at end of file diff --git a/docs/docs-content/registries-and-packs/adding-add-on-packs.md b/docs/docs-content/registries-and-packs/adding-add-on-packs.md new file mode 100644 index 0000000000..a25669f8a6 --- /dev/null +++ b/docs/docs-content/registries-and-packs/adding-add-on-packs.md @@ -0,0 +1,424 @@ +--- +sidebar_label: 'Add an Add-on Pack' +title: 'Add an Add-on Pack' +description: 'How to create custom made packs using Helm Charts and Manifests in Spectro Cloud' +icon: '' +hide_table_of_contents: false +sidebar_position: 30 +--- + + +An Add-on Pack defines deployment specifics of a Kubernetes application to be installed on a running Kubernetes cluster. Palette provides several Add-on packs out of the box for various layers of the Kubernetes stack. For example: + + +* **Logging** - elastic search, fluentd + + +* **Monitoring** - Kubernetes dashboard, Prometheus + + +* **Load Balancers** - Citrix + + +* **Security** - Dex, Vault, Permissions manager + + +* **Service Mesh** - Istio + + +Custom Add-on packs can be built to extend the list of integrations. Two different methods are used in the following examples to create custom add-on packs. + +- [Helm Charts](#helm-charts) + +- [Manifests](#manifests) + +
+ + + +## Helm Charts + +The following example shows how to build the Prometheus-Grafana monitoring pack and push to a pack registry server using the Spectro Cloud CLI: + +1. Create the pack directory named *prometheus-grafana*. + + +2. Create the metadata file named `pack.json`. + + ```json + { + "addonType": "monitoring", + "annotations": { + }, + "ansibleRoles": [ + ], + "cloudTypes": ["all"], + "displayName": "Prometheus-Grafana", + "eol": " ", + "group": " ", + "kubeManifests": [ + ], + "charts": [ + "charts/prometheus-grafana.tgz" + ], + "layer":"addon", + "name": "prometheus-grafana", + "version": "9.7.2" + } + ``` + +3. Download the desired version of the Prometheus-Grafana Helm charts archive. + + +4. Create a subdirectory called **charts** and copy the downloaded Helm chart archive to this directory. Refer to the relative location of this archive in the pack manifest file, `pack.json` as shown in step 2. + + +5. Create a file called `values.yaml` for configurable chart parameters. This can be a subset of the `values.yaml` file shipped within the chart. Copy the entire file as is, if all chart parameters need to be made configurable. For the Prometheus-Grafana pack, the `values.yaml` could look like this: + + ```yaml + pack: + #The namespace (on the target cluster) to install this chart + #When not found, a new namespace will be created + namespace: "monitoring" + + charts: + prometheus-operator: + + # Default values for prometheus-operator. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + + ## Provide a name in place of prometheus-operator for `app:` labels + ## + nameOverride: "" + + ## Provide a name to substitute for the full names of resources + ## + fullnameOverride: "prometheus-operator" + + ## Labels to apply to all resources + ## + commonLabels: {} + # scmhash: abc123 + # myLabel: aakkmd + + ## Create default rules for monitoring the cluster + ## + defaultRules: + create: true + rules: + alertmanager: true + etcd: true + general: true + k8s: true + kubeApiserver: true + kubePrometheusNodeAlerting: true + kubePrometheusNodeRecording: true + kubernetesAbsent: true + kubernetesApps: true + kubernetesResources: true + kubernetesStorage: true + kubernetesSystem: true + kubeScheduler: true + network: true + node: true + prometheus: true + prometheusOperator: true + time: true + + ## Labels for default rules + labels: {} + ## Annotations for default rules + annotations: {} + + ## Provide custom recording or alerting rules to be deployed into the cluster. + ## + additionalPrometheusRules: [] + # - name: my-rule-file + # groups: + # - name: my_group + # rules: + # - record: my_record + # expr: 100 * my_record + + ## + global: + rbac: + create: true + pspEnabled: true + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + + ## Configuration for alertmanager + ## ref: https://prometheus.io/docs/alerting/alertmanager/ + ## + alertmanager: + + ## Deploy alertmanager + ## + enabled: true + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + + ## Configure pod disruption budgets for Alertmanager + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## This configuration is immutable once created and will require the PDB to be deleted to be changed + ## https://github.com/kubernetes/kubernetes/issues/45398 + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + ## Alertmanager configuration directives + ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file + ## https://prometheus.io/webtools/alerting/routing-tree-editor/ + ## + config: + global: + resolve_timeout: 5m + route: + group_by: ['job'] + group_wait: 30s + group_interval: 5m + repeat_interval: 12h + receiver: 'null' + routes: + - match: + alertname: Watchdog + receiver: 'null' + receivers: + - name: 'null' + + ## Pass the Alertmanager configuration directives through Helm's templating + ## engine. If the Alertmanager configuration contains Alertmanager templates, + ## they'll need to be properly escaped so that they are not interpreted by + ## Helm + ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function + ## https://prometheus.io/docs/alerting/configuration/#%3Ctmpl_string%3E + ## https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + tplConfig: false + + ## Alertmanager template files to format alerts + ## ref: https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + ## + templateFiles: {} + # + ## An example template: + # template_1.tmpl: |- + # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} + # + # {{ define "slack.myorg.text" }} + # {{- $root := . -}} + # {{ range .Alerts }} + # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` + # *Cluster:* {{ template "cluster" $root }} + # *Description:* {{ .Annotations.description }} + # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> + # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> + # *Details:* + # {{ range .Labels.SortedPairs }} * *{{ .Name }}:* `{{ .Value }}` + # {{ end }} + + ingress: + enabled: false + ... + ``` + +6. Log in to the pack registry using the following command: + + ```bash + spectro registry login [REGISTRY_SERVER] + ``` + +7. Using the Spectro CLI, push the newly built pack to the pack registry: + + ```bash + spectro pack push prometheus-grafana --registry-server [REGISTRY-SERVER] + ``` + +
+ + + +## Manifests + +Add-on packs can be built using Kubernetes manifests as well. These manifests contain deployment specifications for Kubernetes objects like pods, services, deployments, namespaces, or secrets. + +The example below shows how to build the Permission Manager auth pack and push to the pack registry server using the Spectro Cloud CLI. + +1. Create the pack directory named **permission-manager**. + + +2. Create the metadata file named `pack.json`. + + ```json + { + "addonType":"authentication", + "cloudTypes": ["all"], + "displayName": "Permission Manager", + "kubeManifests": [ + "manifests/permission-manager.yaml" + ], + "layer": "addon", + "name": "permission-manager", + "version": "1.1.0" + } + ``` + +3. Create a sub-directory called **manifests**. + + +4. Copy the desired manifest files to the **manifests** directory and reference them in `pack.json` as shown in step 2. If the configurability of the manifest is desired, then the manifest files must be templatized to introduce parameters. For example, _{{.Values.namespace}}_. These parameters are defined with default values in the `values.yaml` file and can be overridden in the cluster profile. + + **permission-manager.yaml (partial)** + ```yaml + apiVersion: v1 + kind: Namespace + metadata: + name: {{ .Values.namespace | quote }} + + --- + + apiVersion: v1 + kind: Secret + metadata: + name: auth-password-secret + namespace: {{ .Values.namespace | quote }} + type: Opaque + stringData: + password: {{ .Values.authPassword }} + + --- + + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: template-namespaced-resources___operator + rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" + + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: template-namespaced-resources___developer + rules: + - apiGroups: + - "*" + resources: + # - "bindings" + - "configmaps" + - "endpoints" + # - "limitranges" + - "persistentvolumeclaims" + - "pods" + - "pods/log" + - "pods/portforward" + - "podtemplates" + - "replicationcontrollers" + - "resourcequotas" + - "secrets" + # - "serviceaccounts" + - "services" + # - "controllerrevisions" + # - "statefulsets" + # - "localsubjectaccessreviews" + # - "horizontalpodautoscalers" + # - "cronjobs" + # - "jobs" + # - "leases" + - "events" + - "daemonsets" + - "deployments" + - "replicasets" + - "ingresses" + - "networkpolicies" + - "poddisruptionbudgets" + # - "rolebindings" + # - "roles" + verbs: + - "*" + + --- + + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: template-namespaced-resources___read-only + rules: + - apiGroups: + - "*" + resources: + - "configmaps" + - "endpoints" + - "persistentvolumeclaims" + - "pods" + - "pods/log" + - "pods/portforward" + - "podtemplates" + - "replicationcontrollers" + - "resourcequotas" + - "secrets" + - "services" + - "statefulsets" + - "cronjobs" + - "jobs" + - "events" + - "daemonsets" + - "deployments" + - "replicasets" + - "ingresses" + - "networkpolicies" + - "poddisruptionbudgets" + verbs: ["get", "list", "watch"] + + --- + ... + ``` + +5. Create a file called `values.yaml` to provide configurable manifest parameters. + + **values.yaml:** + + ```yaml + manifests: + permission-manager: + + #Namespace under which permission-manager will be deployed + namespace: "permission-manager" + + #Log in password for permission-manager + authPassword: "welcome123" + ``` + +6. Log in to the pack registry using the following command: + + ```bash + spectro registry login [REGISTRY_SERVER] + ``` + +7. Using Spectro Cloud CLI push the newly built pack to the pack registry: + + ```bash + spectro pack push permission-manager --registry-server [REGISTRY-SERVER] + ``` + diff --git a/docs/docs-content/registries-and-packs/advanced-configuration.md b/docs/docs-content/registries-and-packs/advanced-configuration.md new file mode 100644 index 0000000000..05b9d28ed5 --- /dev/null +++ b/docs/docs-content/registries-and-packs/advanced-configuration.md @@ -0,0 +1,232 @@ +--- +sidebar_label: 'Advanced Configuration' +title: 'Advanced Configuration' +description: 'Learn how to apply advanced concepts by customizing the deployments of the Packs registry.' +icon: '' +hide_table_of_contents: false +sidebar_position: 80 +--- + + +You can modify the deployment of the pack registry by providing a YAML configuration file. You can also override default configuration options through the usage of environment variables. + +The configuration file is divided into keys and values. The following is an example of a YAML configuration. + +
+ +```yaml +version: 0.1 +log: + level: info + fields: + environment: test +``` + + +The key `version` has a number value. The `log` key has a value with multiple keys, which in turn have more keys. + +To override the value of `log.level` you can specify an environment variable named +`REGISTRY_LOG_LEVEL`. + +
+ +```shell +export REGISTRY_LOG_LEVEL=debug +``` + +
+ +## Default Configuration + +The docker image for the registry contains the following default configuration values. + +
+ +```yaml +version: 0.1 +log: + fields: + service: registry +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /data/.spectro-server +http: + addr: :5000 + headers: + X-Content-Type-Options: [ nosniff ] + Strict-Transport-Security: [ max-age=63072000; includeSubdomains; preload ] + Content-Security-Policy: [ img-src 'self'; script-src 'self'; style-src 'self ] + X-Frame-Options: [ DENY ] + X-XSS-Protection: [ 1; mode=block ] + Referrer-Policy: [ same-origin ] +auth: + htpasswd: + realm: basic-realm + path: /auth/htpasswd-basic +``` + +The server is started with the command `registy serve /etc/spectro/config.yml`. +You can override the default values with specific values through environment +variables, or you can use your own configuration file. + +For example, you can start the docker container image with the following environment by using the +variables to override the basic auth realm and logging level. In the following example, the `-e` flag is used to provide environment variables to the container. + +
+ +```bash +docker run -d \ + --rm \ + --port 443:5000 \ + --name spectro-registry\ + --volume $(pwd)/spectropaxconfig/:/etc/spectropaxconfig/ + -e REGISTRY_LOG_LEVEL=debug \ + -e REGISTRY_AUTH=htpasswd \ + -e REGISTRY_AUTH_HTPASSWD_REALM="My Enterprise Realm" \ + gcr.io/spectro-images-public/release/spectro-registry:3.4.0 +``` + +Alternatively, you can start the container by mounting a directory with a new configuration file and pointing the server command to the configuration file. + +
+ +```shell +docker run -d \ + --rm \ + --port 443:5000 \ + --name spectro-registry \ + --volume $(pwd)/myconfig.yml:/etc/myconfig.yml \ + gcr.io/spectro-images-public/release/spectro-registry:3.4.0 \ + serve /etc/spectropaxconfig/myconfig.yml +``` +## Storage Backend + +The pack registry can store data on a file system through a mounted +volume, or you can specify object storage such as AWS S3. + +The following is an example of a configuration using a file system backend. + +```yaml +storage: + cache: + blobdescriptor: inmemory + filesystem: + rootdirectory: /tmp/registry/data/.spectro-server +``` + +If you are using S3 Storage, ensure you specify the required S3 parameters. + +
+ +```yaml +storage: + cache: + blobdescriptor: inmemory + s3: + region: us-east-1 + bucket: my-bucket + rootdirectory: /registry + encrypt: true|false + secure: false|true + accesskey: SAMPLEACCESSKEY + secretkey: SUPERSECRET + host: OPTIONAL_MINIO_HOST_IF_USING + port: OPTIONAL_MINIO_PORT_IF_USING +``` + +You can also use ephemeral storage. We recommend using ephemeral storage for testing purposes. Production environments should use object storage or a file system. + +
+ +```yaml +storage: inmemory +``` + +## Authentication + +You can configure basic HTTP Auth. Basic Auth requires providing the pack registry server with an httppasswd file containing the credentials. + +
+ +```yaml +auth: + htpasswd: + realm: basic-realm + path: /auth/htpasswd-basic +``` + +## HTTP + +The following options are available for modifying HTTP transport: + +### Server and Port + +For serving content on all interfaces on port 5000: + +
+ +```yaml +http: + addr: :5000 +``` +Alternatively, the server can bind to a single IP and different port: + +
+ +```yaml +http: + addr: 192.168.122.77:25000 +``` +### HTTP Headers + +The following headers are the default, and can be overridden: + +
+ +```yaml +http: + headers: + X-Content-Type-Options: [ nosniff ] + Strict-Transport-Security: [ max-age=63072000; includeSubdomains; preload ] + Content-Security-Policy: [ img-src 'self'; script-src 'self'; style-src 'self ] + X-Frame-Options: [ DENY ] + X-XSS-Protection: [ 1; mode=block ] + Referrer-Policy: [ same-origin ] +``` +### TLS + +TLS can be configured using [Let's Encrypt](https://letsencrypt.org) or custom TLS certificates: + +When using Let's Encrypt, your registry server must be assigned to a public IP address accessible for HTTP-based validation by the Let's Encrypt services. Check out the [Deploy Pack Registry Server with Let's Encrypt](adding-a-custom-registry.md#deploy-pack-registry-server-with-lets-encrypt) guide to learn more. + +
+ +```yaml +http: + addr: :5000 + tls: + letsencrypt: + cachefile: le-cache + email: oz@spectrocloud.com + hosts: + - pax-registry.spectrocloud.com +``` + +Let's Encrypt limits the number of free certificates issued for each domain for a set time. +We recommend you mount a volume where the certificates are permanently stored. Use the +option `cachefile` to enable this behavior. + +You can specify custom certificates by providing the file path to the certificate files. + +
+ +```yaml +http: + tls: + certificate: /path/to/x509/certificate/file + key: /pat/to/x509/key/file/which contains/private key/for x509 certificate above + clientcas: /path/to/file/with one or more/CA certificates encoded as PEM + minimumtls: minimum tls version to use +``` diff --git a/docs/docs-content/registries-and-packs/deploy-pack.md b/docs/docs-content/registries-and-packs/deploy-pack.md new file mode 100644 index 0000000000..4c9b54e670 --- /dev/null +++ b/docs/docs-content/registries-and-packs/deploy-pack.md @@ -0,0 +1,953 @@ +--- +sidebar_label: 'Deploy a Custom Pack' +title: 'Deploy a Custom Pack' +description: 'How to create and deploy a custom pack using the manifest files or Helm charts in Spectro Cloud.' +icon: '' +hide_table_of_contents: false +toc_min_heading_level: 2 +toc_max_heading_level: 2 +sidebar_position: 40 +tags: ["packs", "tutorial"] +--- + + +Custom add-on packs allow you to deploy Kubernetes applications in clusters and reuse them in multiple deployments. This ensures uniformity across your clusters. The primary use cases for creating custom packs are: + +- Aggregated configuration and application dependencies simplify deployment and consumption. + +- Open-source contributors can add new Kubernetes applications to a custom add-on pack for the community. + +- Enterprises can add proprietary Kubernetes applications to a custom add-on pack. + +In this tutorial, you will create a custom add-on pack to package a sample Kubernetes application, [Hello Universe](https://github.com/spectrocloud/hello-universe#hello-universe), and deploy that application to a cluster. You will learn to create the pack in two ways, using manifest files and Helm charts. + +After defining the custom pack, you will set up a registry server, publish the pack to that registry, and configure the registry server in Palette. Lastly, you will create a cluster profile that contains your custom pack and apply the profile to a cluster using either Palette or Terraform. + + +## Prerequisites +To complete the tutorial, you will need the following items: +
+ +1. A Spectro Cloud account. Visit [https://console.spectrocloud.com](https://console.spectrocloud.com) to create an account. + + +2. Tenant admin access to Palette for the purpose of adding a new registry server. + + +3. A cloud account, such as AWS, Azure, or GCP, added to your Palette project settings. + + +4. An SSH key created in the region where you will deploy the cluster. + + +5. [Docker Desktop](https://docs.docker.com/get-docker/) installed on your local machine to start the tutorials container. + + +6. Basic knowledge of Docker containers and Kubernetes manifest file attributes. + + + + +## Set Up the Tutorial Environment + +You will work in a Docker container pre-configured with the necessary tools for this tutorial. However, you can practice this tutorial in any `linux/amd64` or `x86_64` environment by installing the [necessary tools](https://github.com/spectrocloud/tutorials/blob/main/docs/docker.md#docker) and cloning the [GitHub repository](https://github.com/spectrocloud/tutorials/) that contains the tutorial files. Here are the steps to start the tutorials container. +
+ +Start the Docker Desktop on your local machine and ensure the daemon is available by issuing a command to list the currently active containers. + +
+ +```bash +docker ps +``` + +Download the `ghcr.io/spectrocloud/tutorials:1.0.4` image to your local machine. The Docker image includes the necessary tools. +
+ +```bash +docker pull ghcr.io/spectrocloud/tutorials:1.0.4 +``` + +Next, start the container, and open a bash session into it. +
+ +```bash +docker run --name tutorialContainer --publish 7000:5000 --interactive --tty ghcr.io/spectrocloud/tutorials:1.0.4 bash +``` + +If port 7000 on your local machine is unavailable, you can use any other port of your choice. +
+ +:::caution + +Wait to exit the container until the tutorial is complete. Otherwise, you may lose your progress. + +::: + + + +### Tools and Starter Code +After opening a bash session in the active container, verify that the tools necessary for this tutorial are installed. +
+ +Check the Spectro CLI version. +
+ +```bash +spectro version +``` + +Check the Spectro registry server version. +
+ +```bash +registry --version +``` + +Check the Terraform version. +
+ +```bash +terraform --version +``` + +In addition to these tools, the tutorials container has other tools, such as `ngrok`, `git`, and `nano`. + +Examine the directories that pertain to the current tutorial in the **root** directory. +
+ +```bash +. +├── packs +│ └── hello-universe-pack # Contains the pack files +└── terraform + └── pack-tf # Contains the .tf files for creating Spectro Cloud resources +``` +The **packs** directory contains the pack files. The **terraform** directory contains the Terraform files used to create Spectro Cloud resources, which you will use later in this tutorial. + + +## Build a Pack + +Building a custom pack requires defining specific files. +As outlined in the [Adding Add-on Packs](adding-add-on-packs.md) guide, you can define a custom pack in two ways: using manifest files or Helm charts. The file structure varies for manifest-based packs and Helm chart-based packs. Below is the reference file structure for each: +
+ + + + + +
+ +```bash +. +├── pack.json # Mandatory +├── values.yaml # Mandatory +├── manifests # Mandatory + ├── manifest-1.yaml + ├── manifest-2.yaml +│ └── manifest-3.yaml +├── logo.png # Mandatory +└── README.md # Optional +``` + +
+ + + +
+ +```bash +. +├── pack.json # Mandatory +├── values.yaml # Mandatory. Pack-level values.yaml file. +├── charts # Mandatory +│ ├── chart-1 # Can have nested charts +│ │ ├── Chart.yaml +│ │ ├── templates +│ │ │ ├── template-1.yaml +│ │ │ └── template-2.yaml +│ │ └── values.yaml # Chart-level values.yaml file. +│ ├── chart-1.tgz +│ ├── chart-2 +│ │ ├── Chart.yaml +│ │ ├── templates +│ │ │ ├── template-1.yaml +│ │ │ └── template-2.yaml +│ │ └── values.yaml # Chart-level values.yaml file. +│ └── chart-2.tgz +├── logo.png # Mandatory +└── README.md # Optional +``` + +
+ +
+ +
+ +To simplify this tutorial, we provide you with the manifest file for the *Hello Universe* application in the **packs/hello-universe-pack** folder. Change the directory to the **packs/hello-universe-pack** folder. +
+ +```bash +cd /packs/hello-universe-pack +``` +Ensure you have the following files in the current directory. +
+ +```bash +. +├── pack.json # Mandatory +├── values.yaml # Mandatory +├── manifests # Mandatory +│ └── hello-universe.yaml +├── logo.png # Mandatory +└── README.md # Optional +``` +
+ +### Pack File Structure + +Go ahead and review each of the following five files in the pack. + +* **pack.json** - This file contains the pack metadata such as `addonType`, `cloudTypes`, and the `kubeManifests` array that contains the list of manifest files: `layer`, `name`, and `version`. Refer to the [JSON Schema](add-custom-packs.md#json-schema) for a list of attributes and respective data types. The schema validation will happen when you push a pack to the registry. + + + + ```json + { + "addonType":"app services", + "cloudTypes": [ "all" ], + "displayName": "Hello Universe", + "kubeManifests": [ + "manifests/hello-universe.yaml" + ], + "layer": "addon", + "name": "hellouniverse", + "version": "1.0.0" + } + ``` + +
+ + + +* **values.yaml** - This file contains configurable parameters you can define while adding the current pack to a cluster profile. In the **values.yaml** file for this tutorial, the `pack/namespace` attribute specifies the namespace on the target cluster to deploy the pack. If the **values.yaml** specifies a namespace value, then Palette first checks to see if the namespace has been created. If so, Palette uses the existing namespace. If the namespace has not been created, Palette creates a new one using the value specified in the YAML file. + + If the **values.yaml** does not specify a namespace value, Palette deploys the application to the default namespace. + + The `manifests` section exposes the configurable parameters for each manifest file listed in the **manifests** directory. For example, in the sample code snippet below, the `hello-universe` attribute exposes the `registry`, `repository`, and `tag` parameters. +
+ + ```yaml + pack: + namespace: "hello-universe" + manifests: + hello-universe: + registry: ghcr.io + repository: spectrocloud/hello-universe + tag: 1.0.12 + ``` + +
+ + You can optionally define *presets*, which are predefined values to use in the **values.yaml**. You define presets in a separate **presets.yaml** file. The presets become available when you create the cluster profile. Presets facilitate configuring the profile and avoid errors that can happen by manually editing the **values.yaml** file. Refer [Pack Presets](pack-constraints.md#pack-presets) for details and examples of how to define presets. + + The example below shows the parameters you can configure in the **values.yaml** for the `hello-universe` manifest when you create the cluster profile. + +
+ + ![Screenshot of the configurable parameters in the values.yaml file.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-values-yaml.png ) + +
+ +* **manifests** - This directory contains the manifest files for your Kubernetes application. This tutorial has only one file, **hello-universe.yaml**. Note that the **values.yaml** file has a corresponding `manifests/hello-universe` element with the same name as the YAML file. +
+ +* **logo.png** - This file contains a logo that displays when you create a cluster profile. +
+ + +* **README.md** - This file may contain the pack description, purpose, authors, and other relevant information. The README in the current example introduces the application used in the pack. +
+ + +After finalizing all files in the pack directory, the next step is to set up a registry server and publish the pack to that registry, where you can access it directly from Palette. + +
+ +## Set Up the Registry Server + +The tutorials environment already has the Spectro registry service and other necessary tools available. The following sections will guide you to start the registry server, expose the service to the external world using [Ngrok](https://ngrok.com/) reverse proxy, and log in to the registry server to push your custom add-on pack to it. + +### Start and Expose the Registry Server +Start the registry server by issuing the following command from the bash session you opened into the tutorials container. +
+ +```bash +registry serve /etc/spectro/config.yml > /var/log/registry.log 2>&1 & +``` + +The registry server will start in HTTP mode (not HTTPS). Refer to the [Add a Custom Registry](adding-a-custom-registry.md) guide to learn more about deploying an HTTPS registry server. + + +Next, expose the registry server to the public so that you can configure it later in Palette. Use Ngrok reverse proxy to expose the registry server listening on port 5000 via an HTTP tunnel using the following command. +
+ +```bash +ngrok http 5000 --log-level debug +``` + +The command above will reserve the current bash session and display the status of each HTTP request made to the Ngrok server later in this tutorial. The screenshot below shows the registry server successfully exposed via Ngrok. + +
+ +![Screenshot of registry server exposed via ngrok](/tutorials/deploy-pack/registries-and-packs_deploy-pack_ngrok-start.png ) + +
+ +Verify the registry server is accessible from outside the tutorials container by visiting the `/health` endpoint. Access the *https://Your-URL-Here/health* in your host browser. Replace the base URL with the Ngrok URL output you received. You should receive a `{"status":"UP"}` response. + +
+ +### Log in to the Registry Server +Once the registry server's `/health` endpoint shows `UP` status, the next step is to log in and then push the pack to it. The pack you will push is in the tutorials container. Open another bash session into the tutorials container from your local terminal. +
+ +```bash +docker exec -it tutorialContainer bash +``` + +Log in to the registry server using Ngrok's public URL assigned to you. Issue the command below, but replace the URL with your Ngrok URL. The command below uses these credentials to log in to the registry server: `{username: admin, password: admin}`. +
+ +```bash +spectro registry login --insecure --default --username admin --password admin \ +f59e-49-36-220-143.ngrok-free.app +``` + +:::caution + +Do not use https:// or http:// keyword in the Ngrok URL. Using either of these keywords will result in an authorization issue. + +::: + + +You will receive a `Login Succeeded` response upon successful login. +
+ +```bash +# Output condensed for readability +WARNING! Your password will be stored unencrypted in /root/.spectro/config.json. +Login Succeeded +``` +
+ + +### Push the Pack to the Registry Server +When you are logged in, push the pack to the registry server using the following command. +
+ +```bash +spectro pack push /packs/hello-universe-pack/ +``` + +You can verify that the pack is in the registry by using the `ls` command. This command lists all packs in the registry. +
+ +```bash +spectro pack ls +``` + +Verify the pack you pushed is listed, as shown in the screenshot below. + +
+ +![Screenshot of spectro pack ls](/tutorials/deploy-pack/registries-and-packs_deploy-pack_pack-push.png) + +
+ +If you need help with the Spectro CLI commands, such as deleting a pack, refer to the [Spectro CLI commands](spectro-cli-reference.md#commands) guide. +
+ +### Configure the Registry Server in Palette +After you push the pack to the registry server, log in to Palette and configure the registry service so that you can access it when you create your cluster profile. + + +Log in to [Palette](https://console.spectrocloud.com), and switch to the tenant admin view. +
+ +![Screenshot of Palette tenant settings.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_tenant-admin.png) + +
+ + + +Navigate to **Tenant Settings** > **Registries** > **Pack Registries** section. Click on the **Add New Pack Registry**. Palette will open a pop-up window asking for the fields to configure a custom pack registry, as highlighted in the screenshot below. + +![A screenshot highlighting the fields to configure a custom pack registry. ](/registries-and-packs_adding-a-custom-registry-tls_certificate.png) + + +Provide the pack registry name, endpoint, and user credentials in the pop-up window. For a consistent experience in this tutorial, we suggest using the name **private-pack-registry**. Use your Ngrok URL as the pack registry endpoint. Ensure to use an "https://" prefix in the pack registry endpoint. + +In the **TLS Configuration** section, select the **Insecure Skip TLS Verify** checkbox. This tutorial does not establish a secure HTTPS connection between Palette and your pack registry server. Therefore, you can skip the TLS verification. Instead, this tutorial uses an unencrypted HTTP connection. However, in a production environment, you can upload your certificate in the **TLS Configuration** section if you need Palette to have a secure HTTPS connection while communicating with the pack registry server. + +Click on **Validate** to ensure the URL and credentials are correct, then click on **Confirm** to finish configuring the registry server. + +
+ +![Screenshot of registry server edit option in Palette tenant settings.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-edit.png) + +
+ + +Palette syncs the registry server periodically. However, you can sync it manually the first time you add a server by clicking the **three-dot Menu** next to the registry server name and selecting **Sync**. + +
+ +![Screenshot of registry server sync in Palette](/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-sync.png) + +
+ + +### Create a Cluster Profile and Deploy a Cluster + +This tutorial guides you to create a cluster profile for AWS. However, you can choose any other cloud service provider, provided you configure the following two items: +
+ +* **Cloud account**: A cloud account added to your Palette project settings. + + The AWS cloud account name in this tutorial example is **spectro-cloud**. You can choose another name if desired. The screenshot below shows how to add and verify the AWS cloud account with your project. Navigate to **Project Settings** > **Cloud Accounts** > **AWS** > **Add AWS Account** in Palette. Check out the [Register and Manage AWS Accounts](../clusters/public-cloud/aws/add-aws-accounts.md) guide for additional help. + +
+ + ![Screenshot of Cloud Accounts in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_palette-cloud-account.png) + +
+ + +* **SSH key**: An SSH key created in the region where you will deploy the cluster. + + This tutorial example will deploy the cluster in the **us-east-2** region, and the SSH key name used in this example is **aws_key_sk_us_east_2**. You must choose the desired region and the available SSH key name from your AWS account. + +
+ +Create a cluster profile and deploy it to a cluster using either Palette or Terraform code. + +- [UI Workflow](#ui-workflow) + +- [Terraform Workflow](#terraform-workflow) + +--- + +
+ +## UI Workflow + + +### Create a Cluster Profile +Switch to the **Default** project scope for creating a cluster profile. +
+ +![Screenshot of the Palette Default scope.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_default-scope.png) + +
+ +Select the **Profile** section in the left **Main Menu** to create a cluster profile that will combine the core infrastructure and add-on layers. Click on the **Add Cluster Profile** button, and provide the details in the wizard that follows. The wizard displays the following sections. +
+ +#### Basic Information +Use the following values in the **Basic Information** section. + +|**Field**|**Value**| +|---|---| +|Name|pack-tutorial-profile| +|Version|`1.0.0`| +|Description|Cluster profile as part of the pack tutorial.| +|Type|Full| +|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:true`| + +Click on **Next** to continue. +
+ +#### Cloud Type +In the **Cloud Type** section, choose AWS as the infrastructure provider for this tutorial, and click on **Next** at the bottom to move on to the next section. +
+ +:::info + +If you choose a different cloud service provider, the core infrastructure layers options, as outlined in the **Profile Layers** section below, will differ from this tutorial. + +::: + +
+ +#### Profile Layers +In the **Profile Layers** section, add the following core infrastructure layers if you have chosen the AWS cloud service provider. To deploy your resource to Azure or Google Cloud, use the core infrastructure layers outlined in [Cloud Service Provider Configurations](https://github.com/spectrocloud/tutorials/tree/main/terraform/pack-tf/README.md#cloud-service-provider-configurations). + +|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| +|---|---|---|---| +|OS|Public Repo|Ubuntu|`LTS__20.4.x`| +|Kubernetes|Public Repo|Kubernetes|`1.24.x`| +|Network|Public Repo|Calico|`3.25.x`| +|Storage|Public Repo|Amazon EBS CSI|`1.16.x`| + +As you add each layer, click on the **Next layer** button. After you add the **Storage** layer, click on the **Confirm** button to complete the core infrastructure stack. Palette displays the newly created infrastructure profile as a layered diagram. You can select any layer to make further edits or change the version if desired. + +Now you are ready to add the add-on layers. Click the **Add New Pack** button. + +Add the Spectro Proxy pack to enable a reverse proxy to connect to the cluster's API. Adding this pack is *optional*, but it will help connect your local machine to the cluster's API for debugging. +Refer to the [Spectro Proxy](../integrations/frp.md) guide for more details. + +|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| +|---|---|---|---| +|Authentication | Public Repo| Spectro Proxy | `1.3.x`| + +Click on the **Confirm & Create** button to finish adding the Spectro Proxy pack. Also, add the following certificate Subject Alternative Name (SAN) value to the Kubernetes pack under the `apiServer` parameter section to configure the Spectro Proxy pack. +
+ +```yaml +certSANs: + - "cluster-{{ .spectro.system.cluster.uid }}.{{ .spectro.system.reverseproxy.server }}" +``` +
+ +![Screenshot of the certificate Subject Alternative Name.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-certsan.png) + +
+ +Next, add the following **Hello Universe** pack. This is the custom add-on pack you defined and pushed to the **private-pack-registry** earlier in this tutorial. + +|**Pack Type**|**Registry**|**Pack Name**|**Pack Version**| +|---|---|---|---| +|App Services | private-pack-registry | Hello Universe | `1.0.x` | + + +Click on the **Confirm & Create** button to finish adding the Hello Universe pack. + + +If there are no errors or compatibility issues, Palette displays the newly created full cluster profile. Verify the layers you added, and click **Next**. + + +
+ +![Screenshot of the Profile Layers success.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_profile-layer.png) + +

+ + +#### Review +Review once more and click **Finish Configuration** to create the cluster profile. +
+ +### Create a Cluster +From the **Profile** page, click on the newly created cluster profile to view its details page. Palette displays all the layers and allows you to edit any of them. + +Click the **Deploy** button to deploy a new cluster. The cluster deployment wizard will displays the following sections. +
+ +### Basic Information +Use the following values in the first section, **Basic Information**. + +|**Field**|**Value**| +|---|---| +|Cluster name| pack-tutorial-cluster | +|Description| Cluster as part of the pack tutorial.| +|Tags|`spectro-cloud-education, app:hello-universe, terraform_managed:true`| +|Cloud Account|spectro-cloud| + +Note that the AWS cloud account name in this tutorial example is **spectro-cloud**. If you used a different cloud account name, choose the name configured in your Palette's project settings. + +Click **Next** to continue. + +
+ +#### Parameters + +The **Parameters** section allows you to change the profile configurations. For example, clicking on the **Hello Universe 1.0.x** layer allows you to configure the `registry`, `repository`, and `tag` parameters defined in the **values.yaml** file. +
+ +![Screenshot of the Cluster layers.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-layers.png) + +
+ +Keep the default values and click **Next**. + +
+ +#### Cluster config + +In the **Cluster config** section, ensure the **Static Placement** field is unchecked. If checked, the **Static Placement** will deploy the cluster in an existing VPC, and you will need the [Amazon Resource Names](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html) (ARNs) for the existing subnets, roles, and other resources. For this tutorial, we will use dynamic placement, where Palette creates a new VPC and all other resources needed for the cluster. + +For the **Region** field, select the region of your choice. The tutorial example will deploy the cluster in the **us-east-2** region. For the **SSH Key Pair Name** field, choose the SSH key pair name from the selected region. You must have an SSH key created already in the AWS region where you will deploy the cluster. + +Click **Next** to continue. + +
+ +#### Nodes config +In the **Nodes config** section, provide the details for the master and the worker pools. For this tutorial, you can use the following minimal configuration: + +|**Field** | **Value for the master-pool**| **Value for the worker-pool**| +|---| --- | ---| +|Node pool name| master-pool | worker-pool | +|Number of nodes in the pool| `1` | `1` | +|Allow worker capability| Checked | Not applicable | +|Enable Autoscaler | Not applicable | No | +|Rolling update | Not applicable | Expand First.
Launch a new node first, then shut down the old one. | + +Keep the **Cloud Configuration** the same for the master and worker pools. + +|**Field** | **Value**| +|---| --- | ---| +|Instance Type | General purpose `m4.xlarge`
A minimum allocation of four CPU cores are required for the master node. | +|Availability zones | Choose any *one* availability zone.
This tutorial example will deploy to the `us-east-2a` availability zone. | +|Disk size | 60 GiB | + +Click **Next** to continue. +
+ +#### Settings +# +The **Settings** section displays options for OS patching, scheduled scans, scheduled backups, and cluster role binding. Use the default values, and click on the **Validate** button. + +
+ +#### Review +Review all configurations in this section. The **Review** page displays the cluster name, tags, cloud account name, node pools, layers, and an estimated hourly cost. If everything looks good, click on the **Finish Configuration** button to finish deploying the cluster. Deployment may take up to *20 minutes* to finish. + +While deployment is in progress, Palette displays the cluster status as **Provisioning**. While you wait for the cluster to finish deploying, you can explore the various tabs on the cluster details page, such as **Overview**, **Workloads**, and **Events**. + +
+ + + +## Terraform Workflow + +The [Spectro Cloud Terraform provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) allows you to create and manage Palette resources using Infrastructure as Code (IaC). This offers such advantages as automating infrastructure, facilitating collaboration, documenting infrastructure, and keeping all infrastructure in a single source of truth. + +### Starter Code +Navigate back to your tutorials container bash session to locate the starter Terraform files. If you have closed the terminal session, you can reopen another bash session in the tutorials container using the following command. +
+ +```bash +docker exec -it tutorialContainer bash +``` + +Switch to the **/terraform/pack-tf** directory, which contains the Terraform code for this tutorial. +
+ +```bash +cd /terraform/pack-tf +``` + +### Set Up the Spectro Cloud API Key + +To get started with Terraform code, you need a Spectro Cloud API key to authenticate and interact with the Palette API endpoint. To add a new API key, log in to Palette, click on the user **User Menu** at the top right, and select **My API Keys**, as shown in the screenshot below. + +
+ +![Screenshot of generating an API key in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_generate-api-key.png ) + +
+ +Below are the steps to add and export an API key: + + +1. Fill in the required fields, such as the API key name and expiration date, and confirm your changes. + + + +2. Copy the key value to your clipboard, and switch back to the tutorials container environment. + + + +3. Export the API key as an environment variable in the tutorials container bash session so the Terraform code can authenticate with Palette API. +
+ + ```bash + export SPECTROCLOUD_APIKEY= + ``` +
+ +### Review Terraform Files +Ensure you have the following files in the current working directory. +
+ +```bash +. +├── profile.tf # Resource +├── cluster.tf # Resource +├── data.tf # Spectro Cloud data resources +├── inputs.tf # Input variables +├── terraform.tfvars # Variable definitions file +├── outputs.tf # Output variables +└── provider.tf # Spectro Cloud Terraform provider +``` + +Note that the Terraform code will deploy the resources to **AWS**. + +We recommend you explore all Terraform files. Below is a high-level overview of each file: +
+ +- **profile.tf** - contains the configuration for the `spectrocloud_cluster_profile` resource. Review the core infrastructure layers that make up the `spectrocloud_cluster_profile` resource. + + + +- **cluster.tf** - contains the configuration for the `spectrocloud_cluster_aws` resource. The cluster resource depends upon the `spectrocloud_cluster_profile` resource. + + + +- **data.tf** - contains the configuration for the resources to retrieve data from Palette dynamically. The table below lists the pack details required for each pack layer in order to deploy the `spectrocloud_cluster_profile` resource. + + |**Pack Type**|**Registry**|**Pack Name**|**Tag**| **Version** | + |---|---|---|---| + |OS|Public Repo|`ubuntu-aws`|`LTS__20.4.x`| `20.04`| + |Kubernetes|Public Repo|`kubernetes`|`1.24.x`| `1.24.10` | + |Network|Public Repo|`cni-calico`|`3.25.x`|`3.25.0`| + |Storage|Public Repo|`csi-aws-ebs`|`1.16.x`|`1.16.0`| + + Note that using this Terraform code will deploy the resources to AWS. To deploy your resource to Azure or Google Cloud, use the layer details outlined in [Cloud Service Provider Configurations] (https://github.com/spectrocloud/tutorials/tree/main/terraform/pack-tf/README.md#cloud-service-provider-configurations). + + + +- **inputs.tf** - contains the variables used in the tutorial such as the names of cluster profile, cluster, cloud account, SSH key name, AWS region, pack name, and registry server. + + Some variables have a default value, but you *must* provide the values for `cluster_cloud_account_aws_name`, `aws_region_name`, `ssh_key_name`, and `private_pack_registry` variables. You will find a `#ToDo` tag next to each variable to update. Provide the values for these variables in a separate file, **terraform.tfvars**. Use default values for the remaining variables. + + + +- **terraform.tfvars** - contains the variable definitions. The list of variables is outlined in the code block below. You *must* specify the values for all variables that are marked `"REPLACE ME"`. Read the inline comments below to understand each variable. + + - For example, the value for `cluster_cloud_account_aws_name` will be the name of the cloud account added to your Palette project settings. In this tutorial example, the cloud account name is **spectro-cloud**. + + - For `aws_region_name`, you can choose any [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) for your deployment. This tutorial example uses **us-east-2** region. + + - The value for `ssh_key_name` will be the name of the SSH key available in the region where you will deploy the cluster. The SSH key name used in this example is **aws_key_sk_us_east_2**. + + - Lastly, provide your registry server name for the `private_pack_registry` variable. You can provide the **private-pack-registry** as the value if you have followed the same naming convention as this tutorial. +
+ + ```bash + cluster_cloud_account_aws_name = "REPLACE ME" # Name of the cloud account added to your Palette project settings + aws_region_name = "REPLACE ME" # Use "us-east-2" or any other AWS region + ssh_key_name = "REPLACE ME" # Name of the SSH key available in the region where you will deploy the cluster + private_pack_registry = "REPLACE ME" # Your registry server name. This tutorial uses "private-pack-registry". + ``` + + + +- **outputs.tf** - contains the output variables to expose information. + + + +- **provider.tf** - contains the provider configuration and version. + +
+ +### Deploy Terraform +After you update the **terraform.tfvars** file and carefully review the other files, initialize the Terraform provider. +
+ +```bash +terraform init +``` + +The `init` command downloads plugins and providers from the **provider.tf** file. Next, preview the resources Terraform will create. +
+ +```bash +terraform plan +``` + +The output displays the resources Terraform will create in an actual implementation. +
+ +```bash +# Output condensed for readability +Plan: 2 to add, 0 to change, 0 to destroy. +``` + +Finish creating all the resources. +
+ +```bash +terraform apply -auto-approve +``` + +It can take up to 20 minutes to provision the cluster. When cluster provisioning completes, the following message displays. +
+ +```bash +# Output condensed for readability +Apply complete! Resources: 2 added, 0 changed, 0 destroyed. +``` + +You can observe the cluster deployment progress in Palette by navigating back to Palette. +
+ + +#### Check the In-Progress Deployment +Log into the [Palette](https://console.spectrocloud.com/), and navigate to the **Profile** section in the left **Main Menu**. If the Terraform deployment is successful, the newly created cluster profile is displayed as shown in the screenshot below. + +
+ +![Screenshot of the successful Profile in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_verify-profile.png) + +
+ + + +
+ +## Validate +In Palette, navigate to the left **Main Menu** and select **Clusters**. Next, select your cluster to display the cluster Overview page and monitor cluster provisioning progress. + +
+ +![Screenshot of the cluster health.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-health.png) + +
+ +When cluster status displays **Running** and **Healthy**, you can access the application from the exposed service URL with the port number displayed. For the Hello Universe application, port 8080 is exposed. Click on the URL to access the application. +
+ +:::caution + +We recommend waiting to click on the service URL, as it takes one to three minutes for DNS to properly resolve the public load balancer URL. This prevents the browser from caching an unresolved DNS request. + +::: + +
+ +![Screenshot of the successful accessing the application using the load balancer URL.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_success.png) + +
+ +You can also look at real-time metrics, such as CPU and memory consumption, in the cluster's **Overview** tab in Palette. + +
+ +![Screenshot of the cluster metrics.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_cluster-metrics.png) + +
+ +Using your custom pack in the cluster, you have successfully deployed the Hello Universe application to the cluster. + +
+ +## Cleanup +Delete the cluster, cluster profile, and registry server, and remove the registry service configuration from Palette's settings. + +The following steps will guide you in cleaning up your environment. Follow the steps for Palette if you used Palette to deploy the cluster. Use Terraform commands to delete the cluster if you used Terraform for deployment. + +
+ + + + + +
+ +#### Delete the Cluster and Profile using Palette +Navigate to the **Cluster** section in Palette's left **Main Menu**, and view the details page of the **pack-tutorial-cluster**. To delete the cluster, click on the **Settings** button to expand the **drop-down Menu**, and select the **Delete Cluster** option. Palette prompts you to enter the cluster name and confirm the delete action. Type the cluster name to proceed with the delete step. + +
+ +![Screenshot of deleting the cluster in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-cluster.png) + +
+ +The cluster status displays **Deleting**. Deletion takes up to 10 minutes. +
+ +:::info + +If a cluster remains in the delete phase for over 15 minutes, it becomes eligible for force deletion. Navigate to the cluster's details page and click on **Settings**. Select **Force Delete Cluster**. Palette automatically removes clusters that are stuck in the cluster deletion phase for over 24 hours. + +::: +
+ +After you delete the cluster, go ahead and delete the profile. From the left **Main Menu**, click **Profiles** and select the profile to delete. Choose the **Delete** option in the **three-dot Menu**. + +
+ +![Screenshot of deleting the profile in Palette.](/tutorials/deploy-pack/registries-and-packs_deploy-pack_delete-profile.png) + +
+ +Wait for the resources to clean up and ensure they are successfully deleted. + +
+ +
+ + + +
+ +#### Delete the Cluster and Profile using Terraform +If you've used Terraform to deploy the cluster, switch back to the tutorials container, and issue the following command from within the **/terraform/pack-tf** directory: +
+ +```bash +terraform destroy -auto-approve +``` + +Wait for the resources to clean up. Deleting the Terraform resources may take up to 10 minutes. +
+ +```bash +# Output condensed for readability +Destroy complete! Resources: 2 destroyed. +``` + +
+ +
+ +
+ +
+ +#### Delete the Registry Server +After deleting the cluster and cluster profile, navigate to **Tenant Settings** > **Registries** > **Pack Registries** to delete the registry service configuration from Palette. +
+ +![Screenshot of registry server delete in Palette](/tutorials/deploy-pack/registries-and-packs_deploy-pack_registry-delete.png) + +
+ +Stop the registry server by closing the tutorials container bash session that serves the Ngrok reverse proxy server. At this point, you can close all the bash sessions. To remove the container and the image from the local machine, issue the following commands: +
+ +```bash +docker container rm --force tutorialContainer +docker image rm --force ghcr.io/spectrocloud/tutorials:1.0.3 +``` + +
+ + +## Wrap-Up + +In this tutorial, you learned how to create a custom pack using manifest files. You packaged up an application in a custom pack that you pushed to a private registry server and added to Palette. + +Next, you created a cluster profile that included all the core infrastructure layers, such as the OS, Kubernetes distribution, and more. You also added your custom pack to the cluster profile so your application could be deployed to a Kubernetes cluster. + +Packs are the building blocks of cluster profiles, allowing you to customize your Kubernetes clusters. Palette enables you to use different packs to create multiple cluster profiles, each for specific purposes. As a result, you can ensure all Kubernetes deployments contain all the required dependencies and applications without developing complicated deployment scripts. All you need to do is maintain the cluster profiles. + +To learn more about packs in Palette, we encourage you to check out the reference resources below. + + +- [Custom OS Pack](add-custom-packs.md#add-a-custom-pack) + + +- [Add-on Packs](adding-add-on-packs.md) + + +- [Pack Constraints](pack-constraints.md) + diff --git a/docs/docs-content/registries-and-packs/helm-charts.md b/docs/docs-content/registries-and-packs/helm-charts.md new file mode 100644 index 0000000000..e34159d548 --- /dev/null +++ b/docs/docs-content/registries-and-packs/helm-charts.md @@ -0,0 +1,73 @@ +--- +sidebar_label: "Helm Registries" +title: "Helm Registries" +description: "Learn how to add your own Helm Registries to Palette" +hide_table_of_contents: false +sidebar_position: 60 +--- + +Helm Charts are a collection of Kubernetes resource files capable of deploying services of varying complexity. Palette provides some stable default Helm charts in its public Helm Chart Registry. + +Palette also supports creating custom Helm registries. You can add your own public or private Helm registries to Palette's Helm registry. + +The Helm Chart registry synchronizes charts with Palette, so you can use them when you create cluster profiles. + +## Prerequisite +For security, Palette requires Helm OCI version 3.7.0 and higher. + +## Add a Helm Chart Registry to Palette + +To add your private Helm Chart registry to Palette: + +1. Log in to Palette as a Tenant administrator. + +2. From the **Main Menu** navigate to **Tenant Settings > Registries**. + +3. From the **Helm Registries** tab, click **Add New Helm Registry** and type the registry name and endpoint. If the registries list is long, you may need to scroll down to see the Add link. + +4. Type the name of your registry and its endpoint. + +5. Choose **Protected** mode based on whether your network is private or public: + + + * Toggle **Protected** mode to *on* if your Helm registry is deployed in a private network. Palette downloads and deploys charts from protected chart registries instead of scanning a private network for them. + + When your registry is protected: + + * Charts are not synchronized with Palette, and you must type Helm chart names and versions when you create Cluster Profiles. + * The **Last Synced** column in the **Helm Registries** tab displays *n/a*. +
+
+ + * Leave **Protected** mode toggled *off* if your Helm registry is deployed in a public network. We refer to Helm registries with this option disabled as being unprotected. + + When your registry is unprotected: + + * Palette synchronizes Helm charts with the console so you can select charts and versions from drop-down menus. + * The **Last Synced** column in the **Helm Registries** tab displays the date that charts were last synchronized in Palette. +
+ +6. If you haven’t set up credentials for your registry, leave **No Authentication** toggled *on*. + + If your registry has credentials, toggle **No Authentication** to *off* and type the registry **Username** and **Password**. +
+ +7. Confirm your changes. + +Your chart is now deployed in Palette's Helm Chart registry and is ready to model in cluster profiles. + +## Validate + +You can find your Helm registry listed in the **Helm Registries** tab in **Tenant Settings > Registries**. Use charts from Helm registries in your cluster profiles. + +The following applies when adding Helm charts to cluster profiles. + +* When using charts from protected registries, you must type the chart name and version you want to use. These must match the name and version hosted in the Helm registry. +* For unprotected Helm registries, charts are synchronized in Palette, which allows you to select them from lists and dropdowns. + +## Resources + +[Create Cluster Profiles](../cluster-profiles/task-define-profile.md) + +
+ diff --git a/docs/docs-content/registries-and-packs/oci-registry.md b/docs/docs-content/registries-and-packs/oci-registry.md new file mode 100644 index 0000000000..d8642f2b39 --- /dev/null +++ b/docs/docs-content/registries-and-packs/oci-registry.md @@ -0,0 +1,146 @@ +--- +sidebar_label: "OCI Registry" +title: "Spectro Cloud OCI Registry" +description: "creation and usages of OCI Registry within Spectro Cloud" +icon: "" +hide_table_of_contents: false +sidebar_position: 70 +--- + + + +Palette supports OCI registries to serve the “filesystem bundle” unpacked on disk as helm registries. Helm charts hosted in OCI registries can be added to cluster profiles and deployed to Kubernetes clusters. We support all OCI complaint registries. + +## Setup OCI Registry: + +* Login as **Tenant Admin**. + + +* Click **Registries** to open **Manage Registries**. + + +* Select the **OCI Registries** tab and click **Add New OCI Registry** button. + + +* Provide the following information to the Add OCI registry wizard: + * Name: An unique registry name + * Provide registry endpoint + * OCI Authentication type: Basic or ECR + * Provide authentication details based on the authentication type selected + + +* Click on **Confirm** to complete the registry creation process. + + +* Once the registry is created, and charts are added, they can be [attached as part of an add-on cluster profile](#use-your-oci-registry). + +# BASIC Authentication of Azure Container Registry + +Palette supports basic authentication for [Azure Container Registry](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-portal?tabs=azure-cli). Azure Container Registry is a private registry service for building, storing, and managing container images and related artifacts. + +## Pre-requisite + +In Azure portal: + + * Create Azure Container Registry. + + + * Go to Azure Container Registry, select AccessKeys and enable AdminUser to generate the password. + +## How to authenticate: + + * Go to Palette Console,Create OCI Registry providing the following details: + + * EndPoint : Azure Container Registry Details - Login server End Point + * Username : Azure Container Registry UserName + * Password : Azure Container Registry Password + * Authentication type : Basic + +## Amazon ECR Authentication + +Choose among one of the following ECR protection modes: +* Un-protected Mode: No credentials required. + + +* Protected Mode: Toggle the “protected” button for protected registry creation and authenticate the AWS account using credentials or STS. + * For the credentials method of authentication, use the Access Key and Secret Access Key of the role created and validate the credentials. + * For STS, use the unique ARN of the AWS role and validate. + +:::caution +To provision ECR based OCI Authentication make sure that the User's STS Role has the ECR policy configured. +::: + +## ECR Policy: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ecr-public:DescribeRegistries", + "ecr:DescribeImageReplicationStatus", + "ecr:ListTagsForResource", + "ecr:ListImages", + "ecr:DescribeRepositories", + "ecr:BatchCheckLayerAvailability", + "ecr:GetLifecyclePolicy", + "ecr-public:DescribeImageTags", + "ecr-public:DescribeImages", + "ecr:GetRegistryPolicy", + "ecr-public:GetAuthorizationToken", + "ecr:DescribeImageScanFindings", + "ecr:GetLifecyclePolicyPreview", + "ecr:GetDownloadUrlForLayer", + "ecr-public:GetRepositoryCatalogData", + "ecr:DescribeRegistry", + "ecr:GetAuthorizationToken", + "ecr-public:GetRepositoryPolicy", + "ecr-public:DescribeRepositories", + "ecr:BatchGetImage", + "ecr:DescribeImages", + "ecr-public:GetRegistryCatalogData", + "ecr-public:ListTagsForResource", + "ecr-public:BatchCheckLayerAvailability", + "ecr:GetRepositoryPolicy" + ], + "Resource": "*" + } + ] +} +``` +## Multi-Region Support for AWS ECR registries: + +Palette supports the parameterization of AWS ECR registry endpoint to support cross-region replicated registries. For performance considerations, Helm chart content may be replicated across multiple AWS regions and served to the clusters from within the region of cluster deployment. To support this, the variable “{{.spectro.system.aws.region}}” can be used in the registry endpoint. This variable is substituted at the time of cluster deployment with the region selected for deployment. + + +**Region Parameter:** + +```json +{{.spectro.system.aws.region}} +``` +**Endpoint format:** + +```json +.dkr.ecr.{{.spectro.system.aws.region}}.amazonaws.com +``` +**A sample Endpoint:** + +```json +214575254960.dkr.ecr.{{.spectro.system.aws.region}}.amazonaws.com +``` +Specify a default region to fall back to when the deployment region does not contain the requested helm chart. +(Eg:, Default region: us-west-1) + +## Use Your OCI Registry +Charts from the OCI registry can be used in your **Add on** cluster profiles as follows: +* From the Repository menu, select the desired OCI registry. + + +* Key in the required chart name and version. The name and version should exactly match the chart name and version hosted in the OCI registry. + + +* Click done to get your OCI-helm layer added to the cluster profile. + diff --git a/docs/docs-content/registries-and-packs/pack-constraints.md b/docs/docs-content/registries-and-packs/pack-constraints.md new file mode 100644 index 0000000000..f355e34987 --- /dev/null +++ b/docs/docs-content/registries-and-packs/pack-constraints.md @@ -0,0 +1,676 @@ +--- +sidebar_label: "Pack Constraints" +title: "Pack Constraints" +description: "Description of pack constraints and their usages within Spectro Cloud" +icon: "" +hide_table_of_contents: false +sidebar_position: 50 +--- + + + + +Pack constraints are a set of rules defined at the pack level to validate the packs for a Profile or a Cluster *before* it gets created or updated. Packs must be validated before the cluster is submitted to ensure a successful deployment. + +
+ +:::info + +You can find information about the JSON schema for the pack metadata file in the [JSON schema](add-custom-packs.md#json-schema) section of the documentation. + +::: + + +## Pack Values Constraints + +A Spectro Pack currently supports various configurations through a configuration file called `values.yaml`. The values defined in the config file are applied while deploying the Kubernetes cluster. The values defined in the pack are default values and can be overridden in the Cluster Profile or during the Cluster deployment. + +Since the default pack values can be overridden, users may inadvertently set incorrect values leading to cluster deployment failure. These failures can occur at any point during the cluster deployment process. If the system is capable of detecting invalid pack values before the cluster is submitted for deployment, then deployment failures can be overcome to some extent. + +Pack value constraints are additional information provided through a template file called `schema.yaml` in the pack. They define the schema format of the pack values. The pack constraints framework auto-checks for any schema constraints defined in the pack and validates the pack values. This checking occurs while creating or updating Cluster Profiles and Clusters. + +## Schema Constraints + +Every schema constraint consists of a key name and the schema template. The key name must be the complete path of the parameter which is defined in the config file. + +**Required** + +Defines whether the pack value is optional or required. + +```bash +registry.hostname: + schema: '{{ required }}' +``` + +**Readonly** + +The pack value is not editable if marked as readonly. + +```bash +registry.hostname: + schema: '{{ readonly }}' +``` + + +**Format** + +Defines the pack value format: the value is valid only when the value format matches the format defined in the pack. + +**Format Syntax** + +A format template consists of one or more format types along with the optional regex and number range values. + +```bash +registry.hostname: + schema: '{{ required | format "${FORMAT_TYPE:/REGEX/ OR [NUMBER RANGE] OR [LIST_OPTIONS]}" }}' +``` + +:::caution + +The syntax of the regex accepted is the same general syntax used by Perl, Python, and other languages. More precisely, it is the syntax accepted by RE2 and described [here](https://golang.org/s/re2syntax). + +::: + +**Format Types** + + + + + +The string format type checks if the input value is a string and supports the regex in the template. If regex is specified in the template then the input value must match the regex. + +```bash +registry.hostname: + schema: '{{ format "${string}" }}' +registry.hostname: + schema: '{{ format "${string:/^([a-z0-9].*)$/}" }}' +``` + + + + + +The number format type checks if the input value is a number, and supports the regex and the number range in the template. + +```bash +registry.port: + schema: '{{ format "${number}" }}' +registry.port: + schema: '{{ format "${number:[5000-5005]}" }}' +registry.port: + schema: '{{ format "${number:/^(500[0-5])$/}" }}' +``` + + + + + +The bool format type checks if the input value is true or false. + +```bash +registry.private: + schema: '{{ format "${boolean}" }}' +``` + + + + + +The password format is a string type with masked values in the pack parameters of Cluster profiles and Clusters. + +```bash +registry.password: + schema: '{{ format "${password}" }}' +registry.password: + schema: '{{ format "${password:/^([a-z0-9].*)$/}" }}' +``` + + + + + +The list format checks if the input value matches with any of the options specified in the template. + +```bash +registry.type: + schema: '{{ format "${list:[PACK,GIT,CHART]}" }}' +``` + + + + + +The ipv4 format type checks if the input value is a valid ipv4. + +```bash +registry.hostIp: + schema: '{{ format "${ipv4}" }}' +``` + + + + + +The version format type checks if the input value is a semantic version. + +```bash +registry.version: + schema: '{{ required | format "${version}" }}' +``` + + + + + +Hints are optional short descriptions of the parameter. If defined in the schema template, these descriptions are visible in the UI while configuring the pack parameters in the Profile or the Cluster. One or more descriptions can be combined by using the pipe(|) separator. + +```bash +registry.type: + schema: '{{ hints "description A" "description B" }}' +``` + + + + + + + +**Examples** + +Schema constraints can be combined to support multiple validations using a single template. + + + + + +```bash +registry.addresses.$[]: + schema: '{{ required | format "${ipv4} - ${ipv4}" | hints "ip pool range"}}' +``` + +`registry.addresses.$[]` is an array data type in the config file. The schema template defines that the value is required and the format must match - `${ipv4} - ${ipv4}` + +**Examples**: + +10.10.10.10 - 10.10.10.255 → valid + +10.10.10.10 → invalid + +10.10.10.10-10.10.10.255 → invalid + + + + + +```bash +storageType: + schema: '{{ required | format "${string}, ${string:/size=\d+/}" }}' +``` + +**Examples**: + +type-zeroedthick, size=150 → valid + +type-zeroedthick, size=150 → invalid + +type-zeroedthick, size=s → invalid + + + + + +## Pack Dependency Constraints + +Spectro Cloud provides the flexibility to choose any pack of any version in the profile. Clusters are deployed based on the packs selected in the profile. While this works for most of the cases, it is sometimes required to select a minimum or maximum pack version, or to have dependencies between the packs to ensure the Kubernetes cluster is deployed successfully as desired. + +Pack dependency constraints are the rules defined in the pack metadata file `pack.json`. They are used to define the minimum and maximum supported versions, and also to specify which pack is required or not supported. The pack constraints framework auto-checks for any schema constraints defined in the pack and validates the pack values. This checking occurs while creating or updating Cluster Profiles and Clusters. + +## Pack metadata JSON + +Pack dependency constraints must be defined in the `pack.json` file. The sample pack metadata shown below defines the dependencies under `constraints` key. + +```json +{ + "addonType": "system app", + "cloudTypes": [ + "all" + ], + "displayName": "Test Pack", + "kubeManifests": [], + "layer": "addon", + "name": "pack-constraints-test", + "version": "1.0.0", + "constraints": { + "dependencies": [ + { + "packName": "vault", + "minVersion": "0.6.0", + "maxVersion": "", + "type": "optional" + }, + { + "packName": "csi-vsphere-volume", + "minVersion": "1.0.0", + "maxVersion": "", + "type": "notSupported" + }, + { + "packName": "kubernetes", + "minVersion": "1.17.0", + "maxVersion": "1.18.6", + "type": "required" + } + ] + } +} +``` + + + +:::caution + +If the minimum and maximum versions are not mentioned, the validation is skipped. + +::: + +## Pack Dependency Attributes + + + + + +Name of the dependent pack. + +**Example**: In the example, the three dependent packs are identified by unique pack names such as `vault`, `csi-vsphere-volume`, and `kubernetes`. + + + + + +Minimum supported dependent pack version, any version below the minimum version is not valid. + +**Example**: pack `pack-constraints-test` must require pack `vault` of min version `0.6.0`. + + + + + +Maximum supported dependent pack version, any version above the maximum version is not valid. + +**Example**: pack `pack-constraints-test` must require pack `kubernetes` of min version `1.18.6`. + + + + + + + + + +The dependent pack is optional but validates minimum or maximum versions if the pack is selected. + +**Example**: `vault` pack is optional. + + + + + +The dependent pack is mandatory and must contain a version within the minimum or maximum supported versions, if defined. + +**Example**: `kubernetes` pack must be required of min version `1.17.0` and max version `1.18.6`. Any Kubernetes version below `1.17.0` and above `1.18.6` is not valid. + + + + + +Pack versions within the range of the mentioned minimum and maximum (including the minimum and maximum) are not supported. + +**Example**: `csi-vsphere-volume` pack is not supported if the version selected falls within the min and max versions. + +:::info + +If the minimum and maximum versions are not mentioned, the validation is skipped and all versions are allowed. + +::: + + + + + + + + + +## Pack Resource Constraints + +A successful Kubernetes Cluster deployment is possible only when the cluster has sufficient hardware requirements. We consider the CPU, Memory, and Disk size as the hardware requirements. The minimum resource requests can be varied depending on the workload to be deployed in the cluster. Spectro Cloud users are allowed to select the desired instance type, and the disk size while configuring the machine pool in the Cluster deployment procedure. If the user selects the instance type which does not satisfy the minimum CPU or Memory or Disk size requirements, then there is a high probability that the cluster deployment may not succeed due to insufficient CPU or Memory or Disk size. + +Pack Resource Constraints are a set of rules defined in the pack metadata `pack.json` to specify the minimum CPU, Memory, and Disk size requirements. The pack constraints framework auto-checks the resource constraints and validates the user-selected instance type specifications before the cluster is submitted for deployment. The total input resource capacity is evaluated against the machine pool size with the actual hardware specifications of a selected instance type. + +## Pack metadata JSON + +Pack resource constraints must be defined in the `pack.json` file. The sample pack metadata is shown below to define the `resources` under `constraints` key. + +```json +{ + "addonType": "system app", + "cloudTypes": [ + "all" + ], + "displayName": "Test Pack", + "kubeManifests": [], + "layer": "addon", + "name": "pack-constraints-test", + "version": "1.0.0", + "constraints": { + "resources": [ + { + "type": "cpu", + "minLimit": 2000, + "components": [ + { + "resourceRequestParamRef": "requests.cpu", + "replicaCountParamRef": "replicas", + "scheduleType": "all" + } + ] + }, + { + "type": "memory", + "minLimit": 2048, + "components": [ + { + "resourceRequestParamRef": "requests.memory", + "replicaCountParamRef": "replicas", + "scheduleType": "worker" + } + ] + }, + { + "type": "diskSize", + "minLimit": 10 + } + ] + } +} +``` + +## Pack Resources Attributes + + + + + +The type of resource + +* cpu +* memory +* diskSize + + + + + +The minimum limit of the resource will be considered during the machine pool validation. The resource limit value is required to have the below unit depending on the resource type. Any change of unit will cause inaccurate computation of the total minimum requirement. + +* cpu - millicore (m) +* memory - Mibibyte (Mi) +* diskSize - Gigabyte (GB) + + + + + +The minLimit is the minimum resource requirement for each worker pool in the cluster. This value is sufficient for the basic resource validation, but in some cases where the pack contains one or more associated components, then each component can define its CPU or memory resource requests in the config file `values.yaml`. In this case, a single `minLimit` value is not sufficient as the minimum requirements can be different for each component. + +:::info + +If the components are defined then `minLimit` is ignored during resource validation. + +::: + +The `components` field is an array of the component which consists of these attributes. + + + + + +Resource requests and limits can be defined in the pack `values.yaml`. It is required for the pack constraints framework to know the parameter name from where the resource request value can be read during the resource validation. So, the `resourceRequestParamRef` is the configuration parameter name of the resource request defined in the `values.yaml`. + + + + + +The Kubernetes pod can run in one or more replicas based on the replica count configured in the `values.yaml` file. The resource request values defined in `values.yaml` are for one replica, and the requests must be multiplied by the number of replicas which gives the actual minimum requirement. So, the `replicaCountParamRef` is the configuration parameter name of the replica count defined in the `values.yaml` + + + + + +Kubernetes provides a way to schedule the pods on master/worker nodes or both. Pack Constraints framework must know where the pods are scheduled because the resource validation validates only the master machine pool when the pods are scheduled on master nodes. Similarily, if the pods are scheduled on worker nodes, then only the worker machine pool will be validated. In the case of daemon sets, the pods are scheduled in both master and worker nodes, and the framework validates both master and worker machine pool configurations before the cluster is submitted for deployment. + +* master - pods are scheduled only on master nodes +* worker - pods are scheduled only on worker nodes +* all - pods are scheduled on both master and worker nodes + + + + + + + + + +## Pack Presets + +Pack Presets are the predefined values in a file called `presets.yaml` in the pack. It contains an array of the presets for the pack, and is visible in the pack parameters of the Cluster profile and the Cluster. Users can select any preset from the available pack presets, and the predefined values of a selected preset are applied automatically by the Spectro Cloud UI. Presets make pack configuration much easier as multiple pack values are updated at a time and the user does not need to understand all the configuration parameters which get changed depending on various factors. + +## Presets Metadata YAML + +This `presets.yaml` shows two presets + +* `privatePackRegistry` +* `publicPackRegistry` + +with a different set of pre-defined values. + +```bash +presets: +- name: "privatePackRegistry" + displayName: "Private Registry" + group: "registry" + remove: ["registry.ingress.host"] + add: | + registry: + private: true + type: "PACK" + host: + ip: "127.0.0.1" + port: 5000 +- name: "publicPackRegistry" + displayName: "Public Registry" + group: "registry" + remove: ["registry.ingress.host"] + add: | + registry: + private: false + type: "PACK" + host: + ip: "13.233.2.255" + port: 80 +``` + +## Preset Attributes + + + + + +*Name of the preset.* It must be unique. + + + + + +*Name of the preset.* It is visible in the parameters configuration + + + + + +*An array of parameter names.* These are removed from the pack values when a preset is selected. + + + + + +*A set of values in YAML format.* These are added/updated in the pack values when a preset is selected. + + + + + +One or more presets can be categorized into a common group, but only one preset can be selected from the same group of presets. + + + + + +## Pack Macros + +Pack macros are the variables defined in the Cluster profile or in Cluster pack values, and these variables are resolved only at the cluster deployment time. + +## Types of Macros + + + + + +System macros are variables defined by the system. Users are allowed to use these variables and the system is capable of resolving all the variables to values at the time of cluster deployment. + + + + + +```bash +user: + name: "{{ .spectro.system.[VARIABLE_NAME] }}" +``` + +### Supported Variables + + +| Macro | Description | +|-------|-------------| +| `{{.spectro.system.user.name}}`| The name of the user currently logged in. | +| `{{.spectro.system.user.uid}}` | The unique identifier of the user currently logged in. | +| `{{.spectro.system.user.email}}` | The email address of the user currently logged in. | +| `{{.spectro.system.tenant.uid}}` | The unique identifier of the current tenant. | +| `{{.spectro.system.project.name}}` | The name of the project. | +| `{{.spectro.system.project.uid}}` | The unique identifier of the project. | +| `{{.spectro.system.clusterprofile.name}}`| The name of the cluster profile associated with the current project. | +| `{{.spectro.system.clusterprofile.uid}}` | The unique identifier of the cluster profile the pack is part of. | +| `{{.spectro.system.clusterprofile.version}}`| The current version of the cluster profile the pack is part of.| +| `{{.spectro.system.cluster.name}}` | The name of the cluster. | +| `{{.spectro.system.cluster.uid}}` | The unique identifier of the cluster. | +| `{{.spectro.system.cloudaccount.name}}` | The name of the cloud account associated with the current project. | +| `{{.spectro.system.cloudaccount.uid}}` | The unique identifier of the cloud account associated with the current project. | +| `{{.spectro.system.kubernetes.version}}` | The version of Kubernetes currently running on the cluster. | +| `{{.spectro.system.reverseproxy.server}}` | The hostname of the reverse proxy server. | +| `{{.spectro.system.reverseproxy.port}}` | The port number of the reverse proxy server. | +| `{{.spectro.system.reverseproxy.protocol}}` | The protocol used by the reverse proxy server, either HTTP or HTTPS. | +| `{{.spectro.system.reverseproxy.vhostport}}` | The port number used by the virtual host on the reverse proxy server. | +| `{{.spectro.system.cloud.type }}` | The type of cloud provider being used, such as AWS, GCP, Azure or other providers. | +| `{{.spectro.system.cloud.region }}` | The region where the cloud resources are located. | +| `{{.spectro.system.clusterprofile.infra.name}}` | The name of the cluster profile. | +| `{{.spectro.system.clusterprofile.infra.uid}}` | The unique identifier of the cluster profile. | +| `{{.spectro.system.clusterprofile.infra.version}}` | The version of the cluster profile. | +| `{{.spectro.system.cluster.kubevip}}`| The IP address of the virtual IP (VIP) assigned to the cluster and load balancer for the control plane. This macro is only available for Edge and vSphere cluster deployments. | + + + + + +```bash +user: + name: "{{ .spectro.system.user.name }}" + uid: "{{ .spectro.system.user.uid}}" +``` + + + + + + + + + +Pack reference macros are custom variables that must be defined in a pack and then can be used as a variable in any pack. If the variable is not defined with a value, then the default value is applied, if specified. If the default value is not specified, then the variable will be resolved to an empty value. + + + + + +```bash +k8s: + version: "{{ .spectro.pack.[PACK_NAME].[VARIABLE_NAME] }}" +``` + +`PACK_NAME` - the name of the pack where the variable is defined + +`VARIABLE_NAME` - the fully qualified name of the variable defined in the pack + + + + + +Referencing Kubernetes pack variable version in CentOS pack values: + +centos values.yaml + +```bash +k8s: + version: "{{ .spectro.pack.kubernetes.version }}" +``` + +kubernetes values.yaml + +```bash +version: 1.18.0 +``` + + + + + + + + + +## Additional Capabilities + +### Sprig Template Functions + +Users are allowed to use the [sprig template functions](http://masterminds.github.io/sprig/) to modify the resolved variable value. + +**Examples** + +```bash +user: + name: "{{ .spectro.system.user.name | upper }}" +``` + +`upper` - sprig template function which converts resolved user name to upper case + +### How to set the default value? + +```bash +k8s: + version: "{{ .spectro.pack.kubernetes.version | default \"1.19.0\"}}" +``` + +:::info + +If the variable `version` is not defined in the pack `kubernetes`, then the default value `1.19.0` will be applied at deployment. In case the default value is not specified then the empty value will be applied. + +::: diff --git a/docs/docs-content/registries-and-packs/registries-and-packs.md b/docs/docs-content/registries-and-packs/registries-and-packs.md new file mode 100644 index 0000000000..4ec9d8165b --- /dev/null +++ b/docs/docs-content/registries-and-packs/registries-and-packs.md @@ -0,0 +1,109 @@ +--- +sidebar_label: "Registries and Packs" +title: "Registries and Packs" +description: "Learn about Packs, how to use and combine Packs, and how to create your Pack ." +hide_table_of_contents: false +sidebar_custom_props: + icon: "nodes" +--- + + + + +# Packs + + +A **Cluster Profile** is made up of layers, each of which is created using a pack. Packs can be broadly categorized into two types: + +- **Infrastructure** packs - These packs are used to create the core infrastructure layers required to provision a Kubernetes cluster. These packs include the operating system, Kubernetes, the container network interface (CNI), and the container storage interface (CSI) specifications. Spectro Cloud builds and maintains these infrastructure packs for updates. + + +- **Add-on** packs - These packs are used to form the integrations and application layers that exist on top of the infrastructure layers. Examples of applications are system, authentication, security, monitoring, logging, ingress, load balancer, service mesh, or helm charts. + +Both the infrastructure and add-on packs described above are configurable, and you can define new add-on custom packs from scratch as well. The use case for defining new add-on packs is to drive consistency across your profile deployments. + + +## Pack Structure + +Palette provides a rich collection of out-of-the-box packs for various integrations and also offers extensibility through custom-built packs. To configure an existing infrastructure or add-on pack or to define a new add-on custom pack, it is essential to understand the pack structure. + +Each pack is a collection of files such as manifests, helm charts, Ansible roles, configuration files, and more. Ansible roles, if provided, are used to customize cluster VM images, whereas Kubernetes manifests and Helm charts are applied to the Kubernetes clusters after deployment. The following is a typical pack structure: + + +| **Pack Name** |**Requirement** | **Description** | +|-|-|-| +| `pack.json` | mandatory| Pack metadata.| +| `values.yaml`| mandatory| Pack configuration, parameters exposed from the underlying charts, and templated parameters from Ansible roles. | +| `charts/`| mandatory| Mandatory for Helm chart-based packs. Contains the Helm charts to be deployed for the pack. | +| `manifests/`| mandatory| Mandatory for Manifest-based packs. Contains the manifest files to be deployed for the pack. +| `ansible-roles`| optional| Ansible roles used to install the pack.| +| `logo.png`| optional| Contains the pack logo. | +| `README.md`|optional| The pack description. | + + +Let's look at the examples below to better understand pack structure. + + + + + + + +The example shows the structure of a Helm chart-based pack, **istio-1.6.2**, which is made up of two charts: *istio-controlplane* and *istio-operator*. Each chart has its **values.yaml** file. In this example, we have a pack-level **values.yaml** file and individual chart-level **values.yaml** files.

+ +```bash +. +├── charts/ +│   ├── istio-controlplane.tgz +│   ├── istio-controlplane +│   │   ├── Chart.yaml +│   │   ├── templates/ +│   │   └── values.yaml +│   ├── istio-operator.tgz +│   └── istio-operator +│      ├── Chart.yaml +│      ├── templates/ +│      └── values.yaml +├── logo.png +├── pack.json +└── values.yaml +``` + +
+ + + +This example shows the structure of a Manifest-based pack, *kubeflow-1.2.0*, made up of **kubeflow-kfdef.yaml** and **kubeflow-operator.yaml** manifests. + +```bash +. +├── manifests/ +│   ├── kubeflow-kfdef.yaml +│   └── kubeflow-operator.yaml +├── logo.png +├── pack.json +└── values.yaml +``` + + + +
+ +## Registries + + +The pack registry is a server-side application to store and serve packs to its clients. Packs from a pack registry are retrieved and presented as options during the creation of a cluster profile. Palette supports the configuration of multiple registries. + +## Default Registry + +The default pack registry is Spectro Cloud's public pack registry. It consists of several packs that make it easy for a user to quickly create a cluster profile and launch a Kubernetes cluster with their choice of integrations. Palette maintains all packs in this pack registry and takes care of upgrading packs in the pack registry whenever required. + +## Custom Pack Registry + +Users can set up a custom pack registry using a Docker image provided by Spectro Cloud to upload and maintain custom packs. Spectro Cloud provides a CLI tool to interact with and manage pack content in the pack registry. Custom registries offer a mechanism of extending the capabilities of a platform by defining additional integrations. + +## Spectro CLI + +The Spectro Cloud Command Line Interface (CLI) is a tool to interact with a Spectro Cloud pack registry. You can use the CLI to upload and download packs. The CLI must authenticate with the pack registry before executing any CLI commands. Review the [Spectro Cloud CLI](spectro-cli-reference.md) reference page for usage instructions. + +
diff --git a/docs/docs-content/registries-and-packs/spectro-cli-reference.md b/docs/docs-content/registries-and-packs/spectro-cli-reference.md new file mode 100644 index 0000000000..f0bd5a7022 --- /dev/null +++ b/docs/docs-content/registries-and-packs/spectro-cli-reference.md @@ -0,0 +1,347 @@ +--- +sidebar_label: "Spectro Cloud CLI Tool" +title: "Spectro Cloud CLI Tool" +description: "A reference sheet for the Spectro Cloud CLI tool" +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +--- + + + + +# Overview + +The Spectro CLI tool is a command-line interface for the Spectro Cloud Pack Registry server to upload or download the packs using commands. + +## Prerequisites + +- A custom pack registry server must be up and running. + +## Installation + +The Spectro CLI tool is currently available for OSX and Linux. + +1. Download the CLI binary file: + + + + + + ```bash + wget https://software.spectrocloud.com/spectro-registry/cli/v4.0.1/osx/spectro + ``` + + + + + + ```bash + wget https://software.spectrocloud.com/spectro-registry/cli/v4.0.1/linux/spectro + ``` + + + + + +2. Provide the executable permission to the CLI spectro. + + ```bash + chmod +x spectro + ``` + +## Global Arguments + +List of Arguments available to all the Spectro CLI commands: + +## Global Flags + +* List of Flags available to all the Spectro CLI commands: + + * h, --help - help for each command + +## Commands + + + + + +## LOGIN + +Authenticate user with Spectro Cloud pack registry by using the login command: + + + + + +```bash + spectro registry login [SERVER] +``` + + + + + +```bash + spectro registry login spectro.io:5000 +``` + +```bash + spectro registry login spectro.io:5000 --insecure --default +``` + + + + + +### Args + +SERVER - Spectro Cloud pack registry server in the format [host:port] + + +### Flags + +-i, --insecure - Insecure is used when the pack registry is installed in HTTP or HTTPS with self-signed certificates. + +-d, --default - Set the server as default Spectro Cloud pack registry for all the CLI commands. + +**Note:** In case of HTTPS, if you have access to the pack registry's CA certificate, there is no need for the insecure flag; simply place the CA certificate at /etc/spectro/certs.d/[SERVER]/ca.crt. + + + + + +## PUSH + +Upload the pack content from the pack source dir to the Spectro Cloud pack registry. + + + + + +```bash + spectro pack push [PACK_SOURCE_DIR] [flags] +``` + + + + + +```bash + spectro pack push /tmp/packs/nginx-1.16.1 +``` + +```bash + spectro pack push /tmp/packs/nginx-1.16.1 --registry-server spectro.io:5000 +``` + +```bash + spectro pack push /tmp/packs/nginx-1.16.1 --force --message "updated nginx pack values" +``` + + + + + +### Args + +PACK_SOURCE_DIR: Directory location where pack content is located. + +### Flags + +-r, --registry-server string - To override the default Spectro Cloud pack registry + +-f, --force - If a pack with the same tag already exists in the registry, then the *force* option can be used to overwrite the pack contents in the registry. + +-m, --message - A short description about the pack changes. It is mandatory to set this flag when the force option is enabled. + +--skip-digest-check - By default, the *force* option can push the pack only if the pack content digest is different than the registry pack digest. So the *skip digest* command can be used to skip the comparison of the digests. + + + + + +## LIST + +List all the packs from the Spectro Cloud pack registry: + + + + + +```bash + spectro pack ls [flags] +``` + + + + + +```bash + spectro pack ls spectro.io:5000 +``` + +```bash + spectro pack ls spectro.io:5000 --name ubuntu --registry-server spectro.io:5000 +``` + + + + + +### Flags + +-n, --name string - packs can be filtered by pack name + +-r, --registry-server string - To override the default Spectro Cloud pack registry + + + + + +## PULL + +Download the packs from the Spectro Cloud pack registry to a pack target location: + + + + + +```bash + spectro pack pull NAME[:TAG|@DIGEST] TARGET_DIR [flags] +``` + + + + + +```bash + spectro pack pull nginx:1.16.1 /tmp/packs +``` + +```bash + spectro pack pull nginx@sha256:5269f073ac8e3c2536270b496ca1cc537e32e44186a5a014b8c48cddca3c6e87 /tmp/packs --registry-server spectro.io:5000 +``` + + + + + +### Args + +PACK_NAME: TAG|@DIGEST - Name of the pack for a particular tag or a sha digest. + +PACK_TARGET_DIR - Directory location where pack content will be pulled. + +### Flags + +-r, --registry-server string - To override the default Spectro Cloud pack registry. + + + + + +## ADD (Add a Tag) + +Create a new tag to a pack which is already pushed to the Spectro Cloud pack registry: + + + + + +```bash + spectro pack tag add SOURCE_PACK:TAG TARGET_LABEL [flags] +``` + + + + + +```bash + spectro pack tag add ubuntu:lts__14.4.3 stable +``` + +```bash + spectro pack tag add ubuntu:lts__14.4.3 14.4.3-beta -g lts -r spectro.io:5000 +``` + + + + + +**Note:** Tag is a combination of label and the group name. The label is mandatory whereas the group is optional. + +tag → <group>__<label> + +Ex. lts___14.4.3 : lts → group, 14.4.3 → label + +### Args + +PACK_NAME: TAG - Name of the pack for a particular tag to which a new tag will be created. + +TARGET_LABEL - Target tag label. + +### Flags + +-g, --group string - Target tag group. + +-r, --registry-server string - To override the default Spectro Cloud pack registry. + + + + + +## DELETE (Delete a tag) + +Delete a tag to a pack that is already pushed to the Spectro Cloud pack registry. + + + + + +```bash + spectro pack tag delete PACK:TAG [flags] +``` + + + + + +```bash + spectro pack tag delete ubuntu:14.4.3 +``` + +```bash + spectro pack tag delete ubuntu:14.4.3 -r spectro.io:5000 +``` + + + + + +**Note:** Parent tags like major version (Ex: 14.x) and minor version (Ex: 14.4.x) can not be deleted as these are auto-generated by the system. So, when no tags are associated with the pack then these are auto-deleted by the system. When a tag (Ex: 14.4.3) is deleted then the major and minor version tags are auto-linked to the remaining tags of a pack. + +### Args + +PACK_NAME: TAG - Pack name and Tag which needs to be deleted. + +### Flags + +-r, --registry-server string - To override the default Spectro Cloud pack registry. + + + + + +## VERSION + +Check the version of the Spectro CLI that is currently installed. + +```bash + spectro version +Spectro ClI Version 4.0.1 linux/amd64 +``` + + + + diff --git a/docs/docs-content/release-notes.md b/docs/docs-content/release-notes.md new file mode 100644 index 0000000000..1f3171e87a --- /dev/null +++ b/docs/docs-content/release-notes.md @@ -0,0 +1,1502 @@ +--- +sidebar_label: "Release Notes" +title: "Release Notes" +description: "Spectro Cloud release notes for Palette and its sub-components." +hide_table_of_contents: false +toc_min_heading_level: 2 +toc_max_heading_level: 3 +sidebar_position: 0 +sidebar_custom_props: + icon: "audits" +tags: ["release-notes"] +--- + + +## September 20, 2023 - Release 4.0.13 + +This release contains minor bug fixes. + +### Bug Fixes + +- An issue where `etcd` sometimes retained an incorrect state entry of cluster nodes during scaling operations is now resolved. + +- The issue where Palette does not display Helm Charts in third-party registries that contain charts without a logo URL is now resolved. + +- Longhorn and Cilium Helm Chart-based packs are fixed to deploy with correct values. + +- An issue where Palette was updating clusters when *Pause Platform Updates* was enabled at the project level is fixed. This issue did not cause nodes to repave or for clusters to become unhealthy. + + +## September 1, 2023 - Release 4.0.8 + +This release contains minor bug fixes. + +### Bug Fixes + +- A bug that caused ARM64 Nvidia Jetson cluster deployment to fail has been resolved. + +- The problem with a blank **drop-down Menu** when trying to add a CoxEdge cloud account is resolved with a populated menu. + +## August 31, 2023 - Release 4.0.7 + +This release contains minor bug fixes. + +### Bug Fixes + +- A problem with Palette retaining original pack values in manifests after users apply modifications has been resolved. + + +## August 27, 2023 - Release 4.0.0 {#release-4-0} + +Palette 4.0.0 introduces new features and improvements, including [Palette VerteX](vertex/vertex.md) - a FIPS-compliant edition - and the [Virtual Machine Orchestrator](vm-management/vm-management.md) (VMO) which enables unified management of containerized applications and virtual machines in Kubernetes. Additionally, Palette 4.0.0 introduces a new Pack User Interface (UI) that improves the user experience for finding and installing packs. Check out the [Upgrade Notes](enterprise-version/upgrade.md) and release notes below to learn more about the new features and improvements in Palette 4.0.0. + + +### Palette {#release-4-0-palette} + + +#### Breaking Changes {#release-4-0-palette-breaking-changes} + +- Deploying Virtual Clusters directly into host clusters is no longer supported. Use Cluster Groups to deploy Virtual Clusters in host clusters. For guidance on deploying Virtual Clusters into a Cluster Group, check out the [Add Virtual Clusters to a Cluster Group](clusters/palette-virtual-clusters/deploy-virtual-cluster.md) documentation. + + +#### Features {#release-4-0-palette-features} + + +- The Virtual Machine Orchestrator (VMO) is now available in Palette. You can natively manage virtual machines from Palette. Palette uses kubevirt under the hood to facilitate the management of virtual machines. Review the [VMO](vm-management/vm-management.md) documentation to learn more. + + +- Custom Pack registries now support the ability for you to upload your own SSL Certificate Authority (CA). You can use HTTPS to connect to your private registries by providing your SSL certificate. Refer to the [Configure a Custom Pack Registry in Palette](registries-and-packs/adding-a-custom-registry.md) documentation to learn more. + + +- A new Pack User Interface (UI) is available in Palette. This new UI allows you to search for packs across registries while providing you with important metadata. The new search experience improves the user experience for finding and installing packs. + + +- Pack registries now support the Open Container Initiative (OCI) image format. This allows you to use OCI images in your custom pack registries instead of the previous Palette-specific format. + + +- Palette now supports VMware vSphere 8.0. You can now deploy host clusters with VMware vSphere 8.0. + + +- Host clusters deployed to VMware now support [VMware NSX](https://www.vmware.com/products/nsx.html) overlay networking. + + +- Palette's internal message communication between components now uses the gRPC protocol. The previous usage of [NATS](https://nats.io/) has been deprecated and will be removed in a future release. You can review a network diagram of Palette's communication architecture on the [Network Ports](architecture/networking-ports.md) page. If you are using network proxies, we encourage you to review the [gRPC and Proxies](architecture/grps-proxy.md) documentation for potential issues. + + +- Pack deprecated status is now available in the Palette UI. This lets you identify which packs are deprecated and will be removed in future releases. Review the [Maintenance Policy](integrations/maintenance-policy.md) documentation to learn more. + + +- Self-hosted Palette now provides a new installation method using the [Palette CLI](palette-cli/palette-cli.md). You can now install a self-hosted Palette through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette's installation parameters. Check out the [Install Enterprise Cluster](enterprise-version/deploying-an-enterprise-cluster.md) documentation to learn more. The previous installation method using the Palette OVA Installer is deprecated and unavailable in this release. + + +- Private Cloud Gateway (PCG) deployments are now available through the Palette CLI. You can now install a PCG through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure the PCG installation parameters. Check out the Palette CLI [PCG install command](palette-cli/commands.md#pcg) documentation to learn more. The previous installation method using the PCG Docker image is deprecated and unavailable in this release. + + +- You can now specify namespace labels and annotations in a Container Network Interface (CNI), Container Storage Interface (CSI), and Add-on pack's YAML configuration. This allows you to specify labels and annotations that are applied to specific namespaces in the cluster. To learn more about configuring labels and annotations, refer to the [Add-on Profile](cluster-profiles/create-add-on-profile.md#pack-labels-and-annotations) documentation. + + + +#### Improvements {#release-4-0-palette-improvements} + +- You can now download different kubeconfig files for your host clusters in Palette. You can download an admin kubeconfig file or a user kubeconfig file. The admin kubeconfig file allows you to perform all actions on the cluster. In contrast, the user kubeconfig file is only accessible to those with the proper Palette permissions to access the cluster. To learn more, check out the Palette [kubeconfig](clusters/cluster-management/kubeconfig.md) documentation. + + +- You can now install a self-hosted Palette through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette's installation parameters. Learn more about the Palette [EC command](palette-cli/commands.md#ec) documentation. + + +- The login banner message in Palette is now also exposed in the Palette CLI. Users logging in to Palette through the CLI will receive the same message as those logging in through the UI. Refer to the [Login Banner](tenant-settings/login-banner.md) documentation to learn more. + + +- You can now configure the logout timer for users in Palette. This allows you to set the time a user can be inactive before they are automatically logged out of Palette. The default value is 240 minutes. + + +- Private Cloud Gateway (PCG) deployments and self-hosted Palette Enterprise Clusters (EC) are now deployed with Kubernetes version 1.25. + + +- Palette now supports Kubernetes 1.27.x. You can deploy host clusters with Kubernetes 1.27.x. + + +- The Cox Edge provider is upgraded to version 0.5.0. + + +- You can now access Palette documentation directly from the Palette UI. This allows you to quickly access the documentation for the page you are currently on. You can find the documentation link in the top right corner of the Palette UI. + + +- Palette now supports configuring the time interval for node repavement. The time interval is the amount of time that Palette waits before it starts the node replacement process on nodes in the cluster. The default time interval is 15 minutes. Refer to the [Node Pool](clusters/cluster-management/node-pool.md) documentation to learn more. + + + +#### Deprecations and Removals {#release-4-0-palette-deprecation-removals} + +- The Palette OVA Installer is deprecated and no longer provided as of this release. Self-hosted Palette now provides a new installation method using the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette's installation parameters. Check out the [Install Enterprise Cluster](enterprise-version/deploying-an-enterprise-cluster.md) documentation to learn more. + + + +- The Palette PCG Docker installation method is deprecated and not available in this release. You can now install a PCG through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette's installation parameters. Check out the Palette CLI [PCG install command](palette-cli/commands.md#pcg) documentation to learn more. + + +#### Known Issues {#release-4-0-palette-known-issues} + +- With the deprecation of deploying Virtual Clusters directly into host clusters. The ability to specify an Add-on profile to a Palette Virtual Cluster is currently unavailable. This will be addressed in an upcoming release. + + + + + +### Edge {#release-4-0-edge} + + +#### Features {#release-4-0-edge-features} + + +- Palette Edge now supports ARM64 architecture. This is a preview feature and still active in development. You can deploy Palette Edge on ARM64 architecture, such as Nvidia Jetson (Orin). Review the list of available [ARM64 packs](integrations/integrations.mdx) in Palette before deploying Palette Edge on ARM64 architecture. + + +- Palette Edge now supports the ability for you to configure OIDC Identity Providers (IDP) at the Kubernetes layer of a Cluster Profile. Refer to the Kubernetes distributions [pack documentation](integrations/integrations.mdx) to learn more. + + +#### Improvements {#release-4-0-edge-improvements} + +- You can now assign dynamic tags to your edge hosts by specifying files or invoking a script that returns a JSON payload containing the tag values. This allows you to dynamically assign tags to your Edge hosts based on the host's local environment. Refer to the [Edge Installer Configuration Tags](clusters/edge/edge-configuration/installer-reference.md#tags) documentation to learn more. + + +- You can now skip the auto registration of Edge hosts in Palette. This allows you to manually register your Edge hosts in Palette by either using the QR code method or by providing the machine ID in Palette. Set the Edge Installer configuration parameter `disableAutoRegister` to `true` to turn off auto registration. Refer to the [Edge Installer Configuration](clusters/edge/edge-configuration/installer-reference.md) documentation to learn more. + + +- You can configure the node drainage behavior for your Edge hosts. To learn more about configuring node drainage, refer to the [Bring Your Own OS (BYOOS) pack](integrations/byoos.md#parameters) documentation. + + +#### Known Issues {#release-4-0-edge-known-issues} + +- Palette eXtended Kubernetes - Edge (PXKE) and RKE2 cannot be upgraded from version 1.26.4 to 1.27.2 in an active cluster. Create a new cluster profile with the latest version of PXKE or RKE2 to upgrade to version 1.27.2. This will be addressed in an upcoming release. + + + +### Palette Dev Engine (PDE) {#release-4-0-pde} + + +#### Features {#release-4-0-pde-features} + +- A Visual Studio Code (VS Code) extension is now available for Palette Dev Engine (PDE). This extension allows you to deploy and manage virtual clusters directly from VS Code. To learn more, you can review the [Palette PDE Plugin](https://marketplace.visualstudio.com/items?itemName=SpectroCloud.extension-palette) documentation. + + +- The Palette CLI now supports managing App Profiles and Apps in Palette Dev Engine (PDE). You can now create, update, and delete App Profiles and Apps directly from the CLI. Use the `palette pde app-profile` and `palette pde app` commands to manage App Profiles and Apps. Refer to the [Palette CLI](palette-cli/commands.md) documentation or use the `--help` flag to learn more. + + +### Virtual Machine Orchestrator (VMO) {#release-4-0-vmo} + + + +#### Features {#release-4-0-vmo-features} + + +- Host clusters supporting Virtual Machine (VM) workloads can now be placed in host maintenance mode, with the ability to choose which Kubernetes node to place in maintenance mode. When a node is placed in maintenance mode, also known as “cordoned”, the VM workload is automatically migrated without any disruptions to another healthy node in the cluster. + + +- VMO supports the ability to import a VMware OVA template from VMware vSphere into Palette. This allows you to import a VM template from VMware vSphere into Palette and deploy it as a VM workload in a host cluster. + + +- You can now migrate a VM from VMware vSphere to a host cluster in Palette through the Palette CLI. The CLI provides an interactive migration experience allowing you to configure the VM migration parameters. + + + +### VerteX {#release-4-0-vertex} + + +#### Features {#release-4-0-vertex-features} + +- [Palette VerteX](https://www.spectrocloud.com/news/spectro-cloud-announces-palette-vertex-for-government) is now available and brings FIPS 140-2 cryptographic modules to the Palette management platform and deployed clusters. Palette VerteX is available to all government and private sector organizations that value strong data protection, backed by the Spectro Cloud Government practice, a growing ecosystem of specialist channel partners, and continental US technical support. Refer to the [Palette VerteX](vertex/vertex.md) documentation to learn more. + + +- You can install Palette VerteX in a VMware environment through the Palette CLI. The CLI provides an interactive installation experience allowing you to configure Palette VerteX's installation parameters. To learn more, refer to the Palette [VMware install instructions](vertex/install-palette-vertex/install-on-vmware/install.md) documentation. You can also install Palette VerteX in a FIPS-certified Kubernetes cluster. Check out the [Kubernetes install instructions](vertex/install-palette-vertex/install-on-kubernetes/install.md) for more details. + + + +### Terraform {#release-4-0-terraform} + +- Version 0.15.0 of the [Spectro Cloud Terraform provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) is available. For more details, refer to the Terraform provider [release page](https://github.com/spectrocloud/terraform-provider-spectrocloud/releases). + + + +### Education {#release-4-0-education-features} + +- A new Edge tutorial is available to learn how to deploy an Edge cluster using Palette with VMware. The [Deploy an Edge Cluster on VMware](clusters/edge/site-deployment/deploy-cluster.md) provides an end-to-end tutorial that walks you through creating Edge artifacts, creating a Cluster Profile, and deploying an Edge cluster on VMware. + + +- The documentation site for Palette now provides a chatbot capable of answering your questions about Palette. The chatbot is available in the bottom right corner of the documentation site. You can ask the chatbot questions about Palette, and it will provide you with relevant answers and documentation links. + + + +### Packs {#release-4-0-packs} + + +#### Pack Notes {#release-4-0-packs-pack-notes} + +- A new community pack repository is available. The Palette Community Repository allows partners and customers to contribute and share their packs. For more details, refer to the Palette Community Repository [README](https://github.com/spectrocloud/pack-central). + +- The Spectro-VM-Dashboard pack is renamed to Virtual Machine Orchestrator. + +- This release introduces the start of a formal maintance policy for packs. Several packs are now marked as deprecated, disabled, and deleted. A completed list of packs that are deprecated, disabled, and deleted is available in the [Deprecations and Removals](#release-4-0-packs-pack-deprecation-removals) section below. To learn more about the pack maintenance policy, refer to the [Maintenance Policy](integrations/maintenance-policy.md) documentation. + + + + +The following news packs are available in Palette 4.0.0. + +
+ + + +#### Kubernetes {#release-4-0-packs-kubernetes} + +| **Pack** | **New Version** | +|--------------------|----------| +| K3s | 1.27.2 | +| Kubernetes AKS | 1.27.0 | +| Kubernetes Coxedge | 1.25.10 | +| Kubernetes Coxedge | 1.26.5 | +| Kubernetes Coxedge | 1.27.2 | +| Kubernetes EKS | 1.27.0 | +| Kubernetes GKE | 1.24.14 | +| Kubernetes GKE | 1.25.10 | +| Kubernetes GKE | 1.26.5 | +| Kubernetes GKE | 1.27.2 | +| MicroK8s | 1.27.0 | +| Palette eXtended Kubernetes| 1.24.14 | +| Palette eXtended Kubernetes| 1.25.10 | +| Palette eXtended Kubernetes| 1.26.5 | +| Palette eXtended Kubernetes| 1.27.1 | +| Palette eXtended Kubernetes - Edge | 1.27.2 | +| RKE2 | 1.24.6 | +| RKE2 | 1.25.10 | +| RKE2 | 1.26.3 | +| RKE2 | 1.26.5 | +| RKE2 | 1.27.2 | + + + +#### CNI {#release-4-0-packs-cni} + +| **Pack** | **New Version** | +|--------------------|----------| +| AWS VPC CNI | 1.13.0 | +| AWS VPC CNI | 1.17.0 | +| Calico | 3.25.1 | +| Calico | 3.26.0 | +| Cilium OSS | 1.14.0 | +| Flannel | 0.22.0 | + + + + +#### CSI {#release-4-0-packs-csi} + +| **Pack** | **New Version** | +|--------------------|----------| +| AWS EBS CSI | 1.17.0 | +| AWS EBS CSI | 1.20.0 | +| AWS EFS CSI | 1.5.06 | +| Azure Disk CSI | 1.26.3 | +| Longhorn CSI | 1.4.1 | +| Portworx CSI | 3.0.0 | +| Rook Ceph | 1.11.9 | +| vSphere CSI | 3.0.0 | +| vSphere CSI | 3.0.2 | + + + +#### Add-on Packs {#release-4-0-packs-add-on-packs} + +| **Pack** | **New Version** | +|--------------------|----------| +| AWS ALB | 2.5.1 | +| AWS Cluster Autoscaler | 1.26.3 | +| External Secrets Operator | 0.8.1| +| Image Swap | 1.5.2 | +| MetalLB | 0.13.10 | +| Nvidia GPU Operator | 23.3.2 | +| Open Policy Agent | 3.12.0 | +| Prometheus Grafana | 46.4.0 | +| Vault | 0.25.0 | + + + +#### Community Packs {#release-4-0-packs-community-packs} + +| **Pack** | **New Version** | +|--------------------|----------| +| Ngrok Ingerss Controller | 0.9.0 | + + +#### FIPS Packs {#release-4-0-packs-fips-packs} + +| **Pack** | **New Version** | +|-------------------------------------------|---------------| +| AWS EBS CSI | 1.17.0 | +| AWS VPC CNI | 1.1.17 | +| Calico | 3.25.1 | +| Calico | 3.4.1 | +| Longhorn CSI | 1.4.1 | +| Palette eXtended Kubernetes | 1.24.10 | +| Palette eXtended Kubernetes | 1.24.13 | +| Palette eXtended Kubernetes | 1.24.14 | +| Palette eXtended Kubernetes | 1.25.6 | +| Palette eXtended Kubernetes | 1.25.9 | +| Palette eXtended Kubernetes | 1.25.10 | +| Palette eXtended Kubernetes | 1.26.3 | +| Palette eXtended Kubernetes | 1.26.4 | +| Palette eXtended Kubernetes | 1.26.5 | +| Palette eXtended Kubernetes | 1.27.1 | +| Palette eXtended Kubernetes | 1.27.2 | +| Palette eXtended Kubernetes - Edge (PXKE) | 1.24.13 | +| Palette eXtended Kubernetes - Edge (PXKE) | 1.25.9 | +| Palette eXtended Kubernetes - Edge (PXKE) | 1.26.4 | +| Palette eXtended Kubernetes - Edge (PXKE) | 1.27.2 | +| RKE2 | 1.24.6 | +| RKE2 | 1.25.0 | +| RKE2 | 1.25.2 | +| RKE2 | 1.25.10 | +| RKE2 | 1.26.4 | +| RKE2 | 1.26.5 | +| RKE2 | 1.27.2 | +| vSphere CSI | 3.0 | + + + +#### Deprecations and Removals {#release-4-0-packs-pack-deprecation-removals} + +The following packs are marked as deprecated, disabled, or deleted. Refer to the [Maintenance Policy](integrations/maintenance-policy.md) for more details on the deprecation and removal process. + +
+ +#### Operating Systems + +| **Pack** | **Version** | **Status** | +|-------------------------------------------|-------------|--------------| +| OpenSuse Leap | 15.4 | Disabled | +| Ubuntu (For Edge) | 20.04 | Disabled | +| Ubuntu (For Edge) | 22.04 | Disabled | + +#### Kubernetes + +| **Pack** | **Version** | **Status** | +|-------------------------------------------|-------------|--------------| +| MicroK8s | 1.23 | Deprecated | +| Konvoy | 1.19.10 | Deleted | +| Konvoy | 1.19.15 | Deleted | +| Konvoy | 1.20.8 | Deleted | +| Konvoy | 1.20.11 | Deleted | +| Konvoy | 1.21.6 | Deleted | +| Kubernetes AKS | 1.22 | Deleted | +| Kubernetes AKS | 1.23 | Deleted | +| Kubernetes AKS | 1.24 | Deleted | +| Kubernetes Coxedge | 1.21.14 | Deprecated | +| Kubernetes Coxedge | 1.22.12 | Deprecated | +| Kubernetes Coxedge | 1.23.9 | Deprecated | +| Kubernetes EKS | 1.17 | Deprecated | +| Kubernetes EKS | 1.18 | Deprecated | +| Kubernetes EKS | 1.18 | Deprecated | +| Kubernetes EKS | 1.19 | Deprecated | +| Kubernetes EKS | 1.20 | Deprecated | +| Kubernetes EKS | 1.21 | Deprecated | +| Kubernetes EKS | 1.22 | Deprecated | +| Kubernetes EKS | 1.23 | Deprecated | +| Kubernetes EKSD | 1.18.9 | Disabled | +| Kubernetes EKSD | 1.19.6 | Disabled | +| Kubernetes EKSD | 1.20.8 | Disabled | +| Kubernetes EKSD | 1.21.6 | Disabled | +| Kubernetes GKE | 1.24.10 | Deleted | +| Kubernetes GKE | 1.25.7 | Deleted | +| Kubernetes GKE | 1.26.4 | Deleted | +| K3s | 1.22.13 | Deprecated | +| K3s | 1.22.15 | Deprecated | +| K3s | 1.23.10 | Deprecated | +| K3s | 1.23.12 | Deprecated | +| K3s | 1.24.6 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.0 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.4 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.5 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.6 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.7 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.8 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.9 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.10 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.11 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.12 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.13 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.14 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.15 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.19.16 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.0 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.1 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.2 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.4 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.5 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.6 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.7 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.8 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.9 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.10 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.11 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.12 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.20.14 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.21.0 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.21.1 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.21.2 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.21.3 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.21.5 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.21.6 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.21.8 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.21.10 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.21.14 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.22.7 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.22.12 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.23.4 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.23.9 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.23.16 | Deprecated | +| Palette eXtended Kubernetes (PXK) | 1.23.17 | Deprecated | +| Palette eXtended Kubernetes - Edge (PXKE) | 1.22.15 | Deprecated | +| Palette eXtended Kubernetes - Edge (PXKE) | 1.23.12 | Deprecated | +| Palette eXtended Kubernetes - Edge (PXKE) | 1.24.6 | Deprecated | +| RKE2 | 1.22.12 | Disabled | +| RKE2 | 1.22.13 | Deprecated | +| RKE2 | 1.22.15 | Deprecated | +| RKE2 | 1.23.9 | Disabled | +| RKE2 | 1.23.10 | Deprecated | +| RKE2 | 1.23.12 | Deprecated | +| RKE2 | 1.24.6 | Deprecated | + + + +#### CNI + +| **Pack** | **Version** | **Status** | +|---------------|---------------|--------------| +| Calico | 3.9 | Deprecated | +| Calico | 3.10 | Deprecated | +| Calico | 3.16 | Deprecated | +| Calico | 3.19 | Deprecated | +| Calico | 3.22 | Deprecated | +| Cilium OSS | 1.10.9 | Deprecated | +| Cilium OSS | 1.12.3 | Deprecated | +| Cilium OSS | 1.12.4 | Deprecated | +| Flannel CNI | 0.10.0 | Deprecated | + + + +#### CSI + +| **Pack** | **Version** | **Status** | +|---------------------------------------------|--------------|--------------| +| AWS EBS CSI | 1.0.0 | Deprecated | +| AWS EBS CSI | 1.5.1 | Deprecated | +| AWS EBS CSI | 1.8.0 | Deprecated | +| AWS EBS CSI | 1.10.0 | Deprecated | +| AWS EBS CSI | 1.12.0 | Deprecated | +| AWS EFS CSI | 1.3.6 | Deprecated | +| Azure CSI Driver | 1.20.0 | Deprecated | +| Azure Disk | 1.0.0 | Deprecated | +| GCE Persistent Disk CSI | 1.7.1 | Deprecated | +| GCE Persistent Disk | 1.0.0 | Deprecated | +| Openstack Cinder | 1.18 | Deprecated | +| Openstack Cinder | 1.19 | Deprecated | +| Openstack Cinder | 1.20 | Deprecated | +| Openstack Cinder | 1.21 | Deprecated | +| Openstack Cinder | 1.22 | Deprecated | +| Openstack Cinder | 1.23 | Deprecated | +| Portworx CSI AWS | 2.9.0 | Deprecated | +| Portworx CSI AWS | 2.10 | Deprecated | +| Portworx CSI GCP | 2.6.1 | Deprecated | +| Portworx CSI Generic | 2.11.2 | Deprecated | +| Portworx CSI Generic | 2.11.4 | Deprecated | +| Portworx CSI Vsphere | 2.8.0 | Deprecated | +| Portworx CSI Vsphere | 2.9.0 | Deprecated | +| Portworx CSI Vsphere | 2.10 | Deprecated | +| Rook-Ceph CSI | 1.5.9 | Deprecated | +| VSphere CSI | 1.0.0 | Deprecated | +| VSphere CSI | 2.3.0 | Deprecated | +| VSphere CSI | 2.5.2 | Deprecated | +| VSphere Volume | 1.0.0 | Deprecated | + + + + + +#### Add-on + +| **Pack** | **Version** | **Status** | +|-------------------------------|-----------------|--------------| +| AWS Cluster Autoscaler | 1.0.0 | Deprecated | +| AWS EFS Addon | 1.3.6 | Deprecated | +| Dex | 2.21.0 | Deprecated | +| Dex | 2.25.0 | Deprecated | +| Dex | 2.28.0 | Deprecated | +| External DNS | 0.7.2 | Deprecated | +| External Secrets | 8.5.0 | Deprecated | +| External Secrets Operator | 0.5.6 | Deprecated | +| External Secrets Operator | 0.6.0 | Deprecated | +| Hashicorp Vault | 0.3.1 | Deprecated | +| Hashicorp Vault | 0.6.0 | Deprecated | +| Hashicorp Vault | 0.9.0 | Deprecated | +| Hashicorp Vault | 0.11.0 | Deprecated | +| Hashicorp Vault | 0.17.1 | Deprecated | +| Hashicorp Vault | 0.20.1 | Deprecated | +| Image Swap | 1.4.2 | Deprecated | +| Istio | 1.6.2 | Deprecated | +| Kong | 1.4 | Deprecated | +| Kubernetes Dashboard | 2.0.1 | Deprecated | +| Kubernetes Dashboard | 2.1.0 | Deprecated | +| Kubernetes Dashboard | 2.4.0 | Deprecated | +| Kubernetes Dashboard | 2.5.1 | Deprecated | +| MetalLB | 0.8.3 | Deprecated | +| MetalLB | 0.9.5 | Deprecated | +| Nginx | 0.26.1 | Deprecated | +| Nginx | 0.43.0 | Deprecated | +| Nginx | 1.0.4 | Deprecated | +| Nginx | 1.2.1 | Deprecated | +| Nginx | 1.3.0 | Deprecated | +| Nvidia GPU Operator | 1.9.1 | Deprecated | +| Open Policy Agent | 3.5.1 | Deprecated | +| Open Policy Agent | 3.6.0 | Deprecated | +| Palette Upgrader | 3.0.51 | Deprecated | +| Palette Upgrader | 3.0.70 | Deprecated | +| Palette Upgrader | 3.0.95 | Deprecated | +| Palette Upgrader | 3.1.26 | Deprecated | +| Portworx Generic Addon | 2.11.2 | Deprecated | +| Portworx Generic Addon | 2.11.4 | Deprecated | +| Prometheus Operator | 12.3.0 | Deprecated | +| Prometheus Operator | 19.2.3 | Deprecated | +| Prometheus Operator | 30.0.3 | Deprecated | +| Prometheus Operator | 30.2.0 | Deprecated | +| Prometheus Operator | 35.5.1 | Deprecated | +| Prometheus Operator | 37.2.0 | Deprecated | +| Reloader | 0.0.104 | Deprecated | +| Spectro Proxy | 1.0.0 | Deprecated | +| Spectro Proxy | 1.1.0 | Deprecated | + + +## September 21, 2023 - Release 3.4.114 + +This release contains a minor bug fix. + +### Bug Fix + +- The *Throttling* error that reports a failure to create, update, and reconcile Auto Scaling Groups (ASG) tags is fixed. Previously, Palette reconciled tags every two minutes regardless of differences. Palette now reconciles tags only when it detects differences. + + +## May 22, 2023 - Release 3.4.0 + +Palette 3.4.0 has various security upgrades, better support for multiple Kubernetes environments, a new cluster deployment platform, and increased user customization options for Palette, Edge, and Palette Dev Engine. Additionally, it includes updates for several packs and stops supporting Kubernetes 1.23 in Azure Kubernetes Service (AKS). You can find upgrade notes for self-hosted Palette 3.4 in the [Upgrade Notes](enterprise-version/upgrade.md#palette-34) documentation. + + +### Palette + +#### Breaking Changes + +- Installations of self-hosted Palette in a Kubernetes cluster now require [cert-manager](https://cert-manager.io/docs/installation/) to be available before installing Palette. Cert-manager is used to enable Mutual TLS (mTLS) between all of Palette's internal components. Refer to the prerequisites section of [Installing Palette using Helm Charts](enterprise-version/deploying-palette-with-helm.md) guide for more details. + + +- Self-hosted Palette for Kubernetes now installs Palette Ingress resources in a namespace that Palette manages. Prior versions of Palette installed internal components ingress resources in the default namespace. Review the [Upgrade Notes](enterprise-version/upgrade.md#palette-34) to learn more about this change and how to upgrade. + + +#### Features + +- Palette's tenant administrators now have the ability to set up a personalized login banner for both the system and tenant levels. Refer to the [Login Banner](tenant-settings/login-banner.md) reference page to learn more. + + +- You can now access a customized Amazon Machine Image (AMI) in Palette for Amazon Elastic Kubernetes Service (Amazon EKS) with support for AWS Launch Template. This allows you to personalize your EKS nodes and EBS root volumes by creating your own custom AMI. + + +- Palette now supports using IAM Roles for Service Accounts (IRSA) for AWS clusters. Enable the Palette managed policy *Controllers EKS Policy* to enable this feature. Refer to the [AWS Required Policies](clusters/public-cloud/aws/required-iam-policies.md) for more information about the managed policy. + + +- You can now deploy clusters in the Google Kubernetes Engine (GKE) environment with Palette. Use the [Create and Managed GCP GKE Cluster](clusters/public-cloud/gcp/create-gcp-gke-cluster.md) guide to learn how to deploy clusters to GKE with Palette. + + + +- Palette now supports the ability for you to use image swap to override specific registries, images, or a combination of both. You can add an `imageSwap` configuration to the Kubernetes pack YAML to point to a different registry or image. Check out the [Image Swap](clusters/cluster-management/image-swap.md) reference resource to learn more. + + + +- Deploying a host cluster to AWS with Red Hat Enterprise Linux (RHEL) as the Operating System (OS) is now possible. This can be done by utilizing the *Bring Your Own Operating System* (BYOOS) pack, which allows for the creation of a custom AMI based on RHEL. + + +#### Improvements + +- OpenID Connect (OIDC) identity provider configuration has now moved to the Kubernetes layer. You can now select the desired OIDC setting when selecting a Kubernetes distribution and version during a cluster profile creation. + + +- New macros for gathering attributes about a cluster profile, such as name, uid, and version, are now available. Refer to the [Macros Supported Variables](registries-and-packs/pack-constraints.md#pack-resource-constraints) documentation to learn more. + + +- Cluster profiles can now be filtered by scope such as Tenant and project. + + +- The tenant administrator dashboard now displays the cluster usage and cost information at the tenant scope. + + +- The Cox Edge cluster deployment wizard now populates a Point of Presence (PoP) list to help you select the geographical deployment target. + + +- As a tenant administrator, you can now quickly review all Edge hosts that are deployed in your tenant and quickly identify which project they belong to. + + +- The Cox Edge provider has been updated to support worker nodes' load balancers and customizable volume mounts for virtual machines. + + +- The Metal as a Service (MAAS) provider has been updated to improve the node reconciliation behavior. In scenarios where the Machine Intelligent Platform Management Interface (IPMI) is powered off, the machine is powered on instead of provisioning a new node. + + + +#### Bug Fixes + +- A bug that caused issues with the deletion of a cluster's profile manifest has been successfully fixed. Manifests are now correctly deleted when removed from a cluster profile. + + +- The problem with Palette not removing namespaces when removing a layer from a cluster profile has been resolved. + + + +- You can now configure the behavior of the Palette agent to disable sending workload reports to the Palette control plane. This addresses scenarios where large clusters with many nodes exceed the 1 MB payload threshold, resulting in agent failures. Refer to the [Nodes Troubleshooting](troubleshooting/nodes.md#palette-agents-workload-payload-size-issue) for guidance on disabling the workload report feature. + + +### Edge + +#### Breaking Changes + +- To enhance the security of Edge deployments, a tenant [registration token](clusters/edge/site-deployment/site-installation/create-registration-token.md) created by the Tenant administrator is now required for pairing an Edge host with Palette. However, you can continue to use the auto registration, QR code, and manual registration methods available today. Refer to the [Register Edge Host](clusters/edge/site-deployment/site-installation/edge-host-registration.md) documentation to learn more about Edge registration methods. + + +- Prior Edge Installer versions are incompatible with Palette 3.4 and newer versions due to product enhancements and security fixes. New Edge clusters deployed with an earlier Edge Installer version will not operate in Palette. Active Edge clusters in Palette will continue to work as expected. Use the latest version of the [Edge Installer](spectro-downloads.md) when creating Edge artifacts and deploying new Edge clusters. + +#### Features + + + +- You can now assign a static IP address to an Edge host during deployment. Previously, you could only assign a static IP address through the user-data configuration file. You can now set a static IP address by using the user-data configuration file, the Palette API, Terraform, or the Palette dashboard during the Edge host cluster creation wizard. + + +- An Edge host ID can now be sourced directly from system files exposed by the BIOS or firmware via the [Desktop Management Interface](https://www.dmtf.org/standards/dmi) (DMI). Ensure that the system file is not empty and does not contain special characters, and the Edge installer will use the value in the file as the Edge host ID. This is an advanced feature and is not required for setting a device ID. + + + +- To deploy an Edge host device, use the Edge Forge workflow. The workflow allows you to customize the Edge Installer, include a user-agent configuration file, preload content bundles, and perform other functions according to your preferences. Visit the [Edge Forge workflow](clusters/edge/edgeforge-workflow/edgeforge-workflow.md) page to learn more. + + +#### Improvements + + +- The events log stream for the Edge host cluster now includes audit messages and critical errors. Logs for an individual Edge host are now also accessible. The Edge host logs are helpful in debugging and monitoring the deployment process of an Edge host. + + +- The upgrade process for Edge cluster has been optimized to avoid extra reboots of the Edge host, whether it's for upgrading the OS or the Kubernetes version. + + + +- The latest Kairos release information is now appended to the **/etc/os-release** file. Unlike previous versions of Pallette, the Kairos release information no longer replaces the entire content of the OS's release file. This change prevents any issues that may arise with tools like Nvidia's GPU Operator due to the previous overwrite behavior. + + +- The Palette dashboard now displays Edge host clusters undergoing an upgrade process.. + + +### Palette Dev Engine (PDE) + + +### Features + +- Palette PDE is now available in self-hosted installation of Palette. + + +- PDE now has a Command Line Interface (CLI) that you can use for programmatic access to PDE resources. Users can perform actions such as create, list, delete, resize, pause, and resume virtual cluster. You can also download the kubeconfig file of a virtual cluster with the CLI. Refer to the [Palette CLI](palette-cli/install-palette-cli.md) documentation page to learn more. + + +### Improvements + +- Container applications that expose a service now automatically receive ingress support with HTTPS support out-of-the-box. This means exposed service URLs automatically receive dynamic SSL certificates used for HTTPS. + + +- You can now access a [new dashboard](devx/devx.md) to better understand your virtual clusters, app profiles, deployed apps, and resource utilization. The dashboard provides a comprehensive overview of critical metrics and more. + + +- You can now increase or decrease the number of replicated instances of a container service. Check out the [Container Deployment](devx/app-profile/container-deployment.md) guide to learn more about containerized deployments. + + +### Terraform + +- Version 0.14.0 of the [Spectro Cloud Terraform provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) is available. Refer to the Terraform provider [release page](https://github.com/spectrocloud/terraform-provider-spectrocloud/releases) for more details. + +### Packs + +#### Operating System Packs + +| Pack | New Version | +|--------------------|----------| +| COS GKE | 1.0.0 | +| Edge Native BYOI | 1.0.0 | +| SLES Libvirt | 15.4.1 | +| SLES vSphere | 15.4.1 | +| Ubuntu OpenStack | 22.04 | + + +#### Kubernetes Packs + +| Pack | New Version | +|--------------------|----------| +| Edge k3s | 1.25.2 | +| Edge k8s | 1.25.2 | +| Edge microk8s | 1.25 | +| Edge RKE2 | 1.25.2 | +| Kubernetes | 1.26.3 | +| Kubernetes EKS | 1.26 | +| Kubernetes GKE | 1.25.8 | + + +#### CNI Packs + +| Pack | New Version | +|------------------------|----------| +| CNI Calico | 3.25.1 | +| CNI Calico Azure | 3.25.1 | +| CNI Cilium | 1.13.2 | +| CNI VPC Native GKE | 1.0.0 | +| CNI Flannel | 0.21.4 | +| CNI Cilium Tetragon | 0.9.0 | + + +#### CSI Packs + +| Pack | New Version | +|----------------------------|----------| +| CSI AWS EBS | 1.17.0 | +| CSI GCP Persistent Driver | 1.10.1 | +| CSI Longhorn | 1.4.1 | +| CSI OpenStack Cinder | 1.26 | +| CSI Portworx Generic | 2.12.0 | +| GKE CSI GCP Driver | 1.0.0 | +| CSI vSphere | 3.0.0 | + + + +#### Add-on Packs + +| Pack | New Version | +|------------------------|----------| +| Nvidia GPU Operator | 22.9.2 | +| AVI Kubernetes Operator| 1.9.2 | +| Istio | 1.17.2 | +| Cloudanix | 1.0.0 | +| CSI Longhorn | 1.4.1 | +| CSI Topolvm | 11.1.1 | +| External DNS | 0.13.4 | +| Flux CD | 2.6.0 | +| Kong | 2.17.0 | +| Nginx | 1.7.0 | +| Palette Upgrader | 3.3.16 | +| Portworx | 2.13.0 | +| Prometheus Agent | 19.0.2 | +| Prometheus Operator | 45.25.0 | +| Reloader | 1.0.24 | +| Spectro k8s Dashboard | 2.7.1 | +| HashiCorp Vault | 0.24.1 | + + +#### Pack Notes + +- The CNI Calico pack version 3.25.1 is now available which contains support for IPV6 CIDR support has been added. + +- The Nvidia GPU Operator pack is available and can be used to install Nvidia GPU drivers on Nvidia hardware. + +- The AVI Kubernetes Operator (AKO) pack is now available. You can use this pack to provide L4-L7 load balancing for applications deployed in a Kubernetes cluster for north-south traffic network traffic. This pack is only available for VMware vSphere. + +- The vSphere CSI pack version 3.0.0 is available. This versions supports the usage of custom images for vSphere pods. + + +- The CNCF Kubernetes pack is renamed to Palette eXtended Kubernetes. + + +- Kubernetes versions before 1.24 are no longer supported for host clusters targeting Azure Kubernetes Service (AKS). This deprecation is due to Azure's Kubernetes support policy. You can learn more about Azure-supported Kubernetes versions [here](https://learn.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli). + + +### Education + +- Learn how to create a custom pack and how to deploy the custom pack to a Palette registry server with the [Create and Deploy a Custom Add-On Pack](registries-and-packs/deploy-pack.md) tutorial. + + +- An introductory tutorial on deploying a Palette-managed cluster to public cloud providers is now available. Learn to deploy a host cluster with Palette using the Palette user interface or Terraform. Check out the [Deploy a Cluster](clusters/public-cloud/deploy-k8s-cluster.md) tutorial to get started. + + + +## March 19, 2023 - Release 3.3.0 + +This release contains several security fixes and new features for Edge. The Edge installation process has been improved to allow users greater flexibility and more control over the installation process. + +### Palette + +#### Enhancements: + +* Users can now download all the clusters listed when applying a filter to the clusters list. + +### Edge + +#### Features: + +* Edge now supports the ability to load images from an external OCI registry. +* The Edge Installer can now include preloaded content bundles containing packages and artifacts. This is useful for scenarios where you work with limited internet bandwidth or want to optimize the installation process. +* Users can now [create custom Edge Installer images](clusters/edge/edgeforge-workflow/palette-canvos.md) to support advanced scenarios such as Bring Your Own Operating System (BYOOS), installing additional OS packages, preloading content into the installer, and more. +* Support for creating Virtual Machine Disks (VMDK) from the Edge installer ISO is now available. Use this to simplify deployments into VMware-based environments. +* Support for generating random UUID values for the Edge host is now available. This addresses the issue of some devices having the same Universal Unique Identifier (UUID) due to identical device identifiers. + +### Packs + +* CNI Packs: + * Calico CNI 3.25.0 +* CSI Packs: + * EBS CSI 1.16.0 + * vSphere CSI 2.7.0 +* Add-on Packs: + * Flux v2 2.6.0 + * Prometheus Operator 45.4.0 + * MetalLB 0.13.9 + * Spectro Proxy 1.3.0 + +## February 28, 2023 - Release 3.2.0 + +Release 3.2 introduces support for a new public cloud provider, Cox Edge. Other highlights include a streamlined experience for installing the Kubernetes Dashboard in a cluster, a new security scan, auto registration capabilities for edge devices, new [out-of-the-box services](devx/app-profile/services/service-listings/service-listings.mdx), and many other product enhancements. + +### Palette + +##### Features + +* Support for the [Cox Edge](clusters/public-cloud/cox-edge/cox-edge.md) cloud provider is now available in Palette. +* Palette introduces a new user sign-in flow for users who previously created an account through SSO and who are a member of different organizations. Palette prompts you to select the organization to log in to. If you need help remembering, you can retrieve it using “Forgot your organization name?”. +* Palette now provides a streamlined experience for users when installing [Kubernetes dashboard](integrations/spectro-k8s-dashboard.md). When adding Kubernetes dashboard as a pack, Palette displays relevant configuration items directly in the pack UI. +* Palette now auto-cleans deleted clusters, deployments, cluster profiles, cloud accounts, edge hosts, and other resources. Users can expect auto cleanup to take approximately 15 minutes. +* Additional filtering options are available to apply to clusters. Users can filter by region and country with pre-populated values based on cluster information and by ‘Unknown’ state. +* Palette now provides a way to search and filter private cloud gateways (PCGs) by resource tag. +* Palette provides the ability to schedule OS patching for enterprise clusters and PCGs. OS patching applies to clusters that have a master pool with multiple nodes. +* Palette provides a **tag.update** permission that can be assigned to user roles that allows modifying resource tags. +* Palette introduces a Software Bill of Materials [(SBOM) scan](clusters/cluster-management/compliance-scan.md#sbom-dependencies--vulnerabilities) capability that can be invoked manually or scheduled to run on tenant clusters. Multiple output formats are available. +* Palette offers two new app services: CockroachDB and HashiCorp Vault. +* Palette provides access to configuration and status [logs for each application](devx/apps/logs.md). +* Palette now allows you to revise the order of layers as you create an app profile. +* Virtual clusters now support the ability to [back up all disk volumes](clusters/cluster-groups/cluster-group-backups.md) within the cluster. +* A system cluster profile named **nginx-ingress** is now available to help users [set up ingress endpoints](clusters/cluster-groups/ingress-cluster-group.md) for cluster groups. + +#### Enhancements + +* [Cluster groups](clusters/cluster-groups/cluster-groups.md) that were previously supported only at the tenant scope are now supported at the project scope. +* Palette has improved the launch time for virtual clusters. +* [Virtual clusters can be resized](devx/palette-virtual-clusters/resize-virtual-clusters.md) from the default to a size that does not exceed the system-level quota for a cluster group like Beehive or the user quota for tenant-level cluster groups. +* Virtual clusters now display a progress status during the creation phase. +* The App profile container service layer contains additional [output variables](devx/app-profile/app-profile-macros.md#container-service-output-variables) to help services connect. Refer to the [service connectivity](devx/app-profile/services/connectivity.md) document for additional guidance. +* We optimized the Spectro Cloud Postman collection to circumvent a nested levels [bug](https://github.com/postmanlabs/postman-app-support/issues/10928) in Postman. + +#### Deprecations + +* Enabling virtual clusters on host clusters is deprecated. Use [cluster groups](clusters/cluster-groups/create-cluster-group.md) to enable virtual clusters moving forward. Cluster groups are also now supported at the [project](projects.md) scope. + +### Edge + +#### Features + +* Palette provides the ability to automatically register edge hosts for a specific project when a host authentication token is specified in **Tenant Settings > Registration Tokens**. + +* Bring Your Own OS (BYOS) support. + +### Packs + +* OS packs: + * Ubuntu 22.04 on AWS, Azure, GCP +* K8s packs: + * Support for K8s 1.26.1 + * Support for K8s 1.25.6 + * Support for K8s 1.24.10 + * Support for K8s 1.23.16 + * Support for Tencent TKE 1.0.0 on VMware +* CNI Packs: + * Calico CNI 3.24.5 + * Cilium CNI 1.12.6 + * Antrea CNI for VMware 1.9.0 +* CSI Packs: + * EFS CSI 1.4.9 + * Azure Disk CSI 1.25.0 + * GCE Persistent Disk CSI 1.8.2 + * Rook-Ceph CSI 1.10.0 +* Add-on Packs: + * Kong Ingress 2.13.1 + * K8S Dashboard 2.7.0 + * External DNS 0.13.1 + * Open Policy Agent 3.11.0 + * Reloader 0.0.129 + * External 0.7.1 + * Vault 0.23.0 + * Nginx Ingress 1.5.1 + * AWS Application Load Balancer 2.4.6 + * Prometheus Operator 44.3.0 + * Bring Your Own OS (BYOS) pack 1.1.0 + * Spectro Proxy 1.2.0 + +
+ + + + +## December 28, 2022 - Release 3.1.0 + +Palette 3.1 is released with support for AWS GovCloud, FIPS compliant PXK, and PXK-E Kubernetes versions. This release also features Autoscalers for IaaS clusters, FIPS enablement at the scope level, cluster tagging, and the ability to use tags for resource filtering and access control. The Palette Developer Experience (PDE) product also contains several enhancements that improve the user experience, such as the ability to pause and resume clusters, new services for app profiles, and more. + +### Palette + +#### Upgrade Notes: + +* MaaS cluster's initialization configuration has been updated to disable memory swap. This will result in MaaS cluster nodes becoming repaved when applying the new configuration. + +#### Features: + +* Palette supports integration with [AWS GovCloud services](clusters/public-cloud/aws/add-aws-accounts.md#prerequisites) to meet the compliance mandates for safeguarding sensitive data by strengthening identity management, improving cloud visibility, and protecting accounts and workloads to support mission-critical workloads for government customers. +* [Autoscaling](clusters/cluster-management/node-pool.md#worker-node-pool) capabilities for Palette IaaS clusters to ensure better availability and cost management for dynamic workloads. +* Palette is now compliant with FIPS compliance and provides a [FIPS-compliant](compliance.md#fips-140-2) version of Kubernetes (PXK and PXK-E). Palette FIPS support is extended at the platform Level with the tenant and project Scope and cluster level with FIPS compliant infrastructure layer cluster profiles. +* Palette supports tagging and the ability to filter user [access](clusters/cluster-management/cluster-tag-filter/cluster-tag-filter.md) and [visibility](clusters/cluster-management/noc-ui.md#monitor-your-cluster-location) to clusters using tags. You can filter geographically dispersed clusters in the Palette map view and list view using [flexible filters](clusters/cluster-management/noc-ui.md#map-filters) to have a granular view of cluster information. +* Palette supports app profile versioning. Versioning enables users to create multiple [versions of an App Profile](devx/app-profile/versioning-app-profile.md#apply-version-to-a-profile) within the scope of a single profile name. +* Palette supports the [cloning](devx/app-profile/app-profile-cloning.md#clone-an-app-profile) of App Profiles across multiple projects. For example, you can clone an app profile created under a specific project to another project within the same tenant. +* Palette Dev Engine supports the manual and system update of an [App Profile](devx/app-profile/versioning-app-profile.md#apply-version-to-a-profile). You can verify the update notification and apply the changes to the Apps. +* Palette app mode now supports the use of [containers](devx/app-profile/app-profile.md#resources). You can specify containers when creating an app profile. +* Palette leverages [Helm and OCI registries](devx/manage-dev-engine/registries.md) for custom pack management. +* Palette provides [out-of-the-box](devx/app-profile/app-profile.md) support for application services such as Kafka, MySQL, NATS, and more for Palette Dev Engine. These services can be specified when creating an App Profile. +* Palette allows you to [pause and resume](devx/palette-virtual-clusters/pause-restore-virtual-clusters.md) virtual clusters that are not in use. This adds significant flexibility in managing the operating costs and optimizing resource management for virtual clusters. + +#### Enhancements: + +* [OS patch reboot](clusters/cluster-management/os-patching.md#reboot) allows clusters to reboot to apply system updates if required. + +* Palette Tencent clusters now support using [security groups](clusters/public-cloud/tke.md#deploy-a-tencent-cluster) for network isolation and improved security. + +* Reduced launch time when creating Palette Virtual Clusters. + +* Palette Virtual Clusters now support ephemeral storage. + +#### Deprecations: + +* Deprecated API : GET/v1/dashboard/projects , new API: POST /v1/dashboard/projects + +* Deprecated API: POST /v1/dashboard/spectroclusters , new API: POST /v1/dashboard/spectroclusters/search + +#### Known Issues: + +* Palette does not allow scaling of control plane nodes for the Microk8s pack. The workaround is to remove the scaling limit of the control plane. + +* Currently, Microk8s does not support an out-of-box service load balancer. + * Work Around: To avoid this, you can install the [AWS Application Load Balancer](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/guide/service/nlb/) pack. The packs containing service type as a load balancer will require annotation and `loadBalancerClass` changes. + +### Edge + +#### Features: + +* Palette supports the provision of [MicroK8s clusters](integrations/microk8s.md). Microk8s deployments are quick and ideal when creating disposal Kubernetes clusters. The MicroK8s pack supports automatic updates, security configuration, and the ability to self-update Kubernetes dependencies. + +#### [Spectro Image Updates](spectro-downloads.md): + +* Private Cloud Gateway Installer updated to version 1.4.0. +* On-Prem Installer updated to version 2.4.0. +* Air Gap Repo Appliance updated to version 2.1.0. +* EDGE Installer version 2.2.23. + +#### [Packs and Integrations](integrations/integrations.mdx): + +* csi-longhorn version 1.3.1 +* csi-longhorn-addon version 1.3.1 +* kafka-operator version 0.32.0 +* mysql-operator version 0.6.2 +* nats-operator version 0.18.2 +* palette-upgrader version 3.0.70 +* palette-upgrader version 3.0.51 +* spectro-k8s-dashboard version 2.6.0 + + +## October 24, 2022 - Release 3.0.0 + +Spectro Cloud Palette 3.0.0 is released with [Native Edge](clusters/edge/edge.md), [Palette Dev Engine](devx/devx.md), [NOC-UI](clusters/cluster-management/noc-ui.md), and many more exciting capabilities. + +**Features** + +* A new set of capabilities that improve the [developer experience](devx/devx.md) are introduced in this release: + * Rapid Application deployment with a smooth onboarding experience. + * RBAC with a developer-centric view. + * System scope resource quota. + * System Scope Cluster groups to host [Palette Virtual Clusters](clusters/palette-virtual-clusters/deploy-virtual-cluster.md). + * Out-of-the-box application profiles and applications deployment with Palette Virtual Clusters. + * Application profiles can consists of Helm charts, Manifests, and Database services such as MongoDB, Redis, PostgreSQL + +* The Palette [Native Edge](clusters/edge/edge.md) architecture is an instance of Palette Edge Distribution. The Palette Edge instance is based on the desired version of Operating System-Kubernetes installed natively onto the edge devices. All the Day 1 and Day 2 Operations starting from Installation to Scaling, Upgrades, and Reconfigurations, will be managed by the Palette Console. + +* Palette provides intuitive, location-based UI that monitors clusters with [NOC-UI](clusters/cluster-management/noc-ui.md). + +* Palette enterprise mode production clusters can be backed up to [Azure Blob storage](clusters/cluster-management/backup-restore/backup-restore.md) for convenient restoration. + +* Palette provisions cluster monitoring with [Kubernetes Dashboard](integrations/spectro-k8s-dashboard.md) exposed to external traffic using [Spectro Proxy](integrations/frp.md) pack with RBAC authentication. + +**Enhancements** + +* Palette enables the provisioning of private Azure Kubernetes Clusters (AKS) clusters within Azure Virtual networks (VNet) for enhanced security by offloading the orchestration to a [Private Cloud Gateway](clusters/public-cloud/azure/gateways.md) deployed within the same account as the private AKS clusters. + +* Operators can now customize the [pod limit](https://learn.microsoft.com/en-us/azure/aks/) for AKS clusters. Customize the pod limit value from the Kubernetes configuration [file](clusters/cluster-management/node-pool.md) at any time by editing the `maxPodPerNode` value. + +* The Kubernetes Packs for [Edge Native](clusters/edge/architecture.md) deployments disable a few items by default to allow users to install those items independently or to avoid duplication. + +* The latest Palette Terraform releases, [Module 0.4.1 and Module 0.10.1](terraform.md), support: + * Native Edge clusters + * Palette Virtual Clusters + * Fixes towards [Enhancements](terraform.md) + +**Packs and Integrations** + +* Dex version 2.35.1 +* Harbor version 1.9.3 +* Istio version 1.14.3 +* Image Swap version 1.5.1 +* Generic-VM Libvirt version 1.0.1 +* Generic VM vSphere version 1.0.3 +* Tekton-chains version 0.12.0 +* Tekton-operator version 0.61.0 +* K3s version 1.24.4 +* Spectro Proxy version 1.1.0 +* External DNS version 0.12.2 +* MetalLB-version version 0.13.5 +* Reloader version version 0.0.118 +* AWS Cluster Autoscaler version 1.22.2 +* Fluentbit version 1.9.6 +* Kubernetes dashboard version 2.6.1 +* Calico version 3.24 +* Cert-Manager version 1.9.1 +* Open Policy Agent version 3.9.0 +* AWS EBS CSI version 1.10.0 + +**Known Issues** + +* While deploying multiple apps in a Palette Virtual Cluster, if the deployment of one of the apps is blocked due to errors, then subsequent apps deployed to the same virtual cluster might also be stuck in deploying state. Apply the following workarounds if you encounter the issue. + + * Delete the stuck App. + * Fix the App with the error. + * Redeploy the App again. +## September 10, 2022 - Release 2.8.0 +Spectro Cloud Palette 2.8.0 is now available with the support of Palette Virtual Clusters, Web-Based Kubectl, Import and Export of Profiles, Terraform Releases, and many exciting enhancements. + +**Features** +* Palette now supports lightweight, cost-effective, secure, and resource-efficient [Palette Virtual Clusters](/clusters/palette-virtual-clusters) to rapidly create securely-isolated environments for applications without the infrastructure and operational overhead of additional Kubernetes clusters. +* Palette leverages web-based [Kubectl](/clusters/cluster-management/palette-webctl#overview) for the users to deploy applications, inspect and manage cluster resources, and view logs via the Palette terminal without an external terminal. +* Palette enables the reuse and sharing of large profiles with many add-ons and integrations to be [exported and imported](/cluster-profiles/cluster-profile-import-export#overview) across multiple environments, projects, and tenants. +* Palette customers can now provision the fully conformant Kubernetes distribution [RKE2](/integrations/rke2#rke2overview) focusing on security and compliance. +* The latest Palette Terraform releases, [Module 0.2.3 and Module 0.3.0](/terraform#moduletoprovidercompatibilitymatrix), focus on: + * Cluster resource tagging + * Static placement of AKS clusters + * VMware cloud-type support of Terraform modules + * Image template support + +**Enhancements** +* Palette upgrades the vSphere Private Cloud Gateways and On-Prem cluster specifications to newer versions: + + * K8s version has been upgraded from 1.21 to 1.22.12 [ the latest version in 1.22 ] + + * The storage layer has been upgraded from 2.3 to 2.5.2 to fix volume attachment issues + + * Ubuntu OS has been upgraded from LTS 18.04 to LTS 20.04 + + * The PCG and On-Premise images now have all the latest OS patches updated + +* Palette enables [Cluster(s) Lock](/clusters/cluster-management/palette-lock-cluster#overview) to restrict the cluster(s) under the Tenant, Project, or single cluster from being upgraded from cluster management services upgrade on the upgrade of the Palette. +* Palette feeds observability of [OS patching details](/clusters/cluster-management/os-patching#monitoring) such as `Last Applied Patch Time` and `The date and time of the last OS Patch.` +* Palette boards the mapping between cluster profiles and clusters in cluster profiles details UI page listing the clusters created using a specific cluster profile. +* Palette promotes VNet Resource Group filtering for AKS clusters, allowing the VNet to be a part of a different resource group than the AKS resource group. +* Palette enables the users to override the [custom folder](/clusters/data-center/vmware#deployingavmwarecluster) for vSphere templates, in addition to the default image template folder, `spectro-templates` for the vSphere environment. +* [Regex Expression](/workspace#regexfornamespaces) for mass selection of workspace names for role binding. +* Palette also leverages the single sign-on, using SAML/OIDC integration with [Google Identity](/user-management/saml-sso#oidcbasedsso). +* Palette enables the customers to optionally disable the [OIDC associate provider](/clusters/public-cloud/aws/eks) for EKS clusters if the service provider restricts the cluster deployment in OIDC enabled state. +* Tenant administrators can now set the [Palette resource limits](/user-management/palette-resource-limits#setresourcelimit) though the Palette console. +* Palette provisions user's [infrastructure privacy](/clusters/public-cloud/azure#deployinganazurecluster) for the Azure cloud account. + +**Deprecations** + +* **API Deprecations** + + * Deprecated API: `GET /v1/clusterprofiles`
+ New API : `POST /v1/dashboard/clusterprofiles` with better filter support + * Deprecated API: `GET /v1/projects`
+ New API : `POST /v1/dashboard/projects` with better filter support + * Deprecated API: `GET /v1/spectroclusters`
+ New API : `POST /v1/dashboard/spectroclusters` with better filter support + * Deprecated API: `GET /v1/spectroclusters/{uid}/packs/{packName}/config`.
+ New API : `GET /v1/spectroclusters/{uid}/profiles/{profileUid}/packs/{packName}/config` with multiple cluster profiles support within cluster, the profileUid is required to locate a uniquely within the cluster + + +* **Pack Deprecations:** + + * Azure Kubernetes Services (AKS) 1.21 + +**Packs and Integrations** + +* Nginx 1.3.0 +* Thanos - 10.5.3 +* EFK - 7.17.3 +* Kubernetes Dashboard - 2.6.0 +* Vault - 0.20.1 +* Calico - 3.23 +* Calico for Azure - 3.23 +* AWS EBS CSI - 1.8.0 +* AWS EFS - 1.4.0 +* AWS EFS -addon - 1.4.0 +* gce-pd-csi-driver-v1.7.1 +* Portworx-generic-addon-v2.11.2 +* Portworx-generic-v2.11.2 +* vSphere_csi_2.5.2 + +**Known Issues** + +* AKS Clusters in v1beta1 environment gives an empty report for Kubernetes Conformance Testing (Sonobuoy scan). +* OS Patch information not getting displayed for clusters with os patch scheduled on boot. + + + +## July 17, 2022 - Release 2.7.0 +Spectro Cloud Palette 2.7 is released with advanced features supporting Windows Worker Node Pools, Canonical Ubuntu Advantage, Cluster Migration from Private Cloud Gateway, enhanced Workspace, and more. + +**Features:** +* Spectro Cloud Palette has enhanced the import cluster functionality with ["Read-Only Mode"](/clusters/imported-clusters/cluster-import) mode and the "Full Permission Mode" mode. Users can start exploring Palette by importing a cluster in a minimal model without granting the full administrative set of permissions. Over time, users can grant additional permissions to manage Day 2 operations. +* Palette now supports [Windows worker nodes](/clusters/public-cloud/azure) in addition to the Linux worker nodes for Azure Kubernetes Services (AKS) clusters. +* Palette ensures Security and OS patching benefits with [Canonical's Ubuntu Advantage](/integrations/ubuntu#ubuntuadvantage) for Infrastructure subscription with Ubuntu as an OS layer for multiple operating environments. +* Automatically scale the workload resources of your Azure Kubernetes Services (AKS) clusters with [AKS Autoscaler](/clusters/public-cloud/azure) to meet the dynamic user workloads. +* Palette leverages the Container Storage Interface (CSI) and Container Network Interface (CNI) layers using Helm Chart in addition to manifest-based deployment. +* Palette introduces a well-defined [color scheme to monitor](/clusters/cluster-management/pack-monitoring#packmonitoring) the different stages of pack deployment during cluster creation. +* Palette [Edge Clusters](/clusters/edge) deployed on remote bare metal or virtual machines appliances providing end-to-end support on deployment, scaling, upgrades and reconfiguration. + +**Enhancements:** + +* Palette [Azure CNI Pack](/integrations/azure-cni#azurecni) ensures advanced traffic flow control using Calico Policies for AKS clusters. +* Palette supports the [migration of Private Cloud Gateway (PCG)](/enterprise-version/enterprise-cluster-management#palettepcgmigration) traffic from unhealthy to healthy PCG without compromising service availability. +* Palette Workspace upgraded with + * [Resource Quota](/workspace/workload-features#workspacequota) allocation for Workspaces, Namespaces, and Clusters. + * [Restricted Container Images](/workspace/workload-features#restrictedcontainerimages) feature to restrict the accidental deployment of a delisted or unwanted container to a specific namespace. + * Enable the collective role binding of namespaces using [regular expressions for namespaces](/workspace#regexfornamespaces) selection. + * Selective [Resource Restore](/workspace/workload-features#restoreyourbackup) from Workspace Backup across Cluster resources, Node Ports, and Persistent Volumes. +* Palette provides visibility into [Role Binding and Cluster Role Binding](/clusters/cluster-management/workloads#overview) resources running inside our workload clusters. + +## May 30, 2022 - Release 2.6.0 + +Spectro Cloud Palette 2.6 is released to support Cluster Profile Version, EKS Secret Encryption, CSI Storageclass, and added Parameters capabilities. + +**Features:** + +* Palette supports multiple [versions](/cluster-profiles/task-define-profile#clusterprofileversioning) of a single-cluster profile under a unique name to allow backward compatibility. + +* Palette leverages AWS Key Management Service (KMS) to provide envelope [encryption](/clusters/public-cloud/aws/eks#eksclustersecretsencryption) of Kubernetes Secrets stored in Amazon Elastic Kubernetes Service (EKS) clusters. + +* Palette covers a long list of [parameters](https://github.com/kubernetes-sigs/aws-ebs-csi-driver#createvolume-parameters) and customization capabilities for the [csi-aws-1.0.0](/integrations/aws-ebs#parametersupportcsi-aws-1.0.0packmanifest) pack manifest. + +**Enhancement:** + +* Palette allows reconciliation of the CSI layer Storageclass for managed clusters of Amazon Elastic Kubernetes Service (EKS). + +**Bug Fixes** + +* We request our users to add the `ec2:ReplaceRoute` permission to the [AWS](/clusters/public-cloud/aws/required-iam-policies) and [EKS-AWS](/clusters/public-cloud/aws/required-iam-policies) cloud account Controller Policy to replace an existing route, within a route table in a Virtual Private Cloud, to facilitate the cluster deletion process. + + +## April 26, 2022 - Release 2.5.0 + +Spectro Cloud Palette 2.5.0 was released with support for Tencent Kubernetes Engine (TKE), Palette free service offerings, many enhancements, and bug fixes. + +**Features:** + +- Palette now supports [Tencent Kubernetes Engine (TKE)](/clusters/public-cloud/tke#overview)—a fully-managed Kubernetes service from Tencent Cloud. Deploy and manage the end-to-end life cycle of TKS clusters, effortlessly. +- Palette introduces **Placeholder Variables** as [Macros](/clusters/cluster-management/macros#overview) in our Cluster Profile layers for advanced regression and easier update of variables, across multiple running clusters. +- Palette displays a well-organized [Product Onboarding](/getting-started/onboarding-workflow#paletteonboardingworkflow) process to streamline user-product adoption, with an assured unfailing user experience, to jump-start our product journey. +- Palette helps out new users in their purchase decision by offering free tier services. + - [Palette Freemium](/getting-started/palette-freemium#trypaletteforfree) to explore Palette's capabilities with free and fixed kilo-Core-hour usage for finite cluster deployments. + - [Free Cloud Credit](/getting-started/palette-freemium) offers access to a free cloud account, with sufficient permissions and credentials, to have a first impression on our product journey. + +**Enhancements:** + +- Palette users can now manually [Force Delete a Cluster](/clusters/public-cloud/aws#forcedeleteacluster), stuck in the **Deletion** state for more than **15 minutes**, through the User Interface. +- Palette production clusters can be backed up to object storage of [GCP Buckets](/clusters/cluster-management/backup-restore#configureyourbackupingcpbucket) for convenient restoration. + +**Bug Fixes:** + +- We request our users to please add the `ec2:DeleteNetworkInterface` permission to their AWS cloud account Controller Policy Permissions to detach and delete the network interface for [AWS](/clusters/public-cloud/aws#awscloudaccountpermissions) and [EKS](/clusters/public-cloud/aws/eks) clusters. + +**Packs and Integrations:** + +- [Ubuntu 20.04](/integrations/ubuntu)- A long term support release of highly popular Ubuntu Linux Operating System. +- Imageswap-webhook—a mutating webhook integration that intercepts pod requests and changes to the container image location. + +## February 26, 2022 - Release 2.3.0 + +Palette 2.3.0 includes the following enhancements: + +- Added support for cluster-centric detailed [**Namespace Management** and granular **RBAC**](/clusters/cluster-management/cluster-rbac). Previously this capability was only available via workspaces. +- Enabled secure and straightforward user authentication with [**API Keys**](/user-management/user-authentication/#apikey) to access the APIs without referring to user credentials. +- Tenant administrators can now get an [**aggregated view**](/clusters/#scope) of clusters across all the projects under their tenant. +- Added support for [**Taints**](/clusters/cluster-management/taints/#overviewontaints) that can be applied to a node pool to restrict a set of intolerant pods getting scheduled to an inadequate node. +- Added support for [**Labels**](/clusters/cluster-management/taints/#overviewonlabels) to constrain pods so that they can run on a particular set of nodes. +- Enable multi-cluster [**backup and restore from workspaces**](/clusters/cluster-management/backup-restore/#workspacebackupandrestore). +- New [**workspace user roles**](/clusters/cluster-management/backup-restore#workspaceoperator) that provide granular control to specific actions within a workspace: + - _Workspace Operator_ - Allows only backup and restore capabilities within a workspace + - _Workspace Admin_ - Administrative privileges within a workspace +- Palette will now perform a [**rolling upgrade**](/clusters/#rollingupgrade) on the nodes for any fundamental changes to the cluster config. Palette will keep track of the reason that triggered the rolling upgrade on the nodes in the cluster and is made accessible under **Cluster Overview** > **Upgrade details**. +- Enable deployment of the single pack across [**multiple layers**](https://docs-latest.spectrocloud.com/cluster-profiles/task-define-profile/#creatingclusterprofiles) cluster profile layers. +- Palette introduces a VM operator to allow Virtual Machine based applications to be modeled as Cluster Profile layers. + +## January 20, 2022 - Hotfix 2.2.26 + +- Palette Hotfix 2.2.26 supports custom Helm chart registry in Private Networks. +- [Helm registries](/registries-and-packs/helm-charts) can now be set up in **Protected** mode also. In protected mode, charts are configured in cluster profiles without being synchronized into the management console. +- For the tenant clusters deployed in a private network, these charts from the protected Helm registries are downloaded and deployed by the Palette orchestrator. + +## December 24, 2021 - Release 2.2.0 + +Palette 2.2.0 is released with the beta version of Edge Clusters along with upgraded Cluster API support. + +The 2.2.0 Palette enhancements are: + +- Palette users can now provision and manage their [Kubernetes clusters using edge appliances](/clusters/edge/) in addition to usual data centers or cloud environments. +- Palette has been upgraded to use a newer version of the CNCF Cluster API for better automation, integration, and efficiency. +- The upgraded Cluster API version used by Palette mandates the following pack updates: + - Kubernetes 1.18.x and below are no longer supported. Please use Kubernetes version 1.19.x or above in the Cluster Profile. + - vSphere CSI storage driver 1.0.x version is no longer supported for new Cluster Provisioning. Please upgrade your CSI Pack to 2.3.x for enhanced performance. +- As part of Palette upgrade to 2.2.0, control plane node(s) of any existing vSphere cluster will be replaced. + +## November 20, 2021 - Release 2.1.0 + +Palette 2.1.0 is released with the following key improvements: + +- Added support for replicated, cross-region Amazon Elastic Container Registries (ECR) whereby a single OCI registry within Spectro Cloud Palette can serve multiple deployment regions. +- Spectro Cloud users can now join more than one tenant. Users belonging to multiple organizations must choose the desired tenant to log in to. This feature is also supported for SSO-enabled tenants. +- Improved the UI of the Cluster Overview page. Visibility into basic cluster properties as well as cluster management actions such as configuration overrides, machine management, scan and backup policies, cluster deletion are now arranged under the **Settings** menu on the top right-hand side. + +## November 1, 2021 - Release 2.0.0 + +We are excited to announce the Spectro Cloud platform's new name - "PALETTE". In addition, version 2.0 of our platform brings additional cost visibility, optimization features, enhanced governance, and control with **Workspaces**. + +Our latest list of features includes: + +- **Workspaces** enable the association of relevant namespaces across clusters to manage access, obtain cost visibility, and get workload visibility by applications or teams. +- Cluster health alert can be integrated with IT service management (ITSM) and collaboration tools such as Slack, ServiceNow, Microsoft Teams, etc. +- Our built-in Spectro Proxy can be leveraged to establish seamless and secured access to the Kubernetes clusters in public and private data center environments. +- Cluster cloud cost calculation for public and private clouds. +- Granular usage cost break down by namespaces, workspaces, and projects based on actual resource utilization by pods, jobs, stateful sets, PVCs, etc. +- Detailed visibility of resource utilization by cluster, namespaces, projects, and workspaces. + +## September 14, 2021 - Release 1.14.0 + +Spectro Cloud 1.14 is released with additional health alert conveyances, secured log storage, transparent cost features, and scalable enterprise cluster backup. + +- Spectro Cloud users can now push their audit logs to the AWS Cloudtrail to enhance continuous monitoring and troubleshooting of the workload clusters. +- Spectro Cloud layouts instantaneous and effortless monitoring of the cluster cloud cost. +- Now Spectro Cloud users can receive real-time alerts on cluster health at hooked external applications. +- Spectro Cloud enterprise mode production clusters can be backed up to object storage of S3 buckets for convenient restoration. +- Spectro Proxy authentication pack to provision reverse proxy aided communication for clusters deployed in a private network belonging to local data centers. +- Spectro Cloud has stepped up to an upgraded and stable API version for better automation, integration, and efficiency. + +## August 14, 2021 - Release 1.13.0 + +Spectro Cloud users can now convert their bare-metal servers into flexible, cohesive, and distributed instances of virtual machines with the slightest efforts utilizing Metal as a Service (MAAS). + +## July 23, 2021 - Release 1.12.0 + +Spectro Cloud 1.12 is released with generic cluster import, OpenID Connect (OIDC) support to handle identify management securely and seamlessly, and support for AKS—a managed Kubernetes Service offering from Azure cloud. + +- Now import existing non-Spectro clusters from any cloud platform using our Generic cluster import feature. We support broad operations like scans, backups, etc. on these imported clusters as well as provisioning and lifecycle management of add-ons. +- Spectro Cloud now supports AKS, a fully-managed Kubernetes service from Azure. Deploy and manage end-to-end lifecycle of AKS clusters. +- Spectro Cloud extends its SSO support by providing integration with OpenID Connect (OIDC). OIDC is the de facto standard to handling application authentication in the modern world. Through this integration, Spectro Cloud enables users to integrate single sign on, using various identify providers such as Amazon Cognito, Keycloak etc. +- Kubernetes upgraded to version 1.19 for enterprise clusters. + +## June 28, 2021 - Release 1.11.0 + +Spectro Cloud 1.11 is released with the support of OpenStack cloud and support for OIDC based authentication into Kubernetes clusters. + +- Spectro now supports deployment and management of Kubernetes clusters in OpenStack based private data centers. +- Support for OIDC based authentication into Kubernetes clusters and preconfigured kubeconfig file to easily authenticate when using kubectl. + +## June 1, 2021 - Release 1.10.0 + +Spectro Cloud 1.10 released with support for Amazon Elastic Kubernetes Service (EKS), cluster management policies to measure cluster compliance and perform backups and restores. + +- Provision and manage Kubernetes clusters using Amazon EKS service including support for advanced configurations like Fargate profiles, OIDC Authentication etc. +- Scan your Kubernetes clusters to ensure they are conformant and compliant. +- Consensus-driven security scan for the Kubernetes deployment with CIS Kubernetes Benchmarks. +- Perform penetration tests to check for configuration issues that can leave the tenant clusters exposed to attackers. +- Backup your Kubernetes clusters including any persistent volumes. Restore these backups as required on any cluster. + +**Note**: + +The following permissions are additionally required to be granted to the cloud accounts used to launch clusters on AWS. Please update your account to ensure that you have these new permissions included. + +Add the following permissions to the IAM policy called NodePolicy if it was created as documented in Spectro Cloud documentation. + +```json +{ + "Effect": "Allow", + "Action": ["secretsmanager:DeleteSecret", "secretsmanager:GetSecretValue"], + "Resource": ["arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/*"] +}, +{ + "Effect": "Allow", + "Action": [ + "ssm:UpdateInstanceInformation", + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel", + "s3:GetEncryptionConfiguration" + ], + "Resource": ["*"] +} +``` + +Add the following permissions to the IAM policy called ControllerPolicy if it was created as documented in Spectro Cloud documentation. + +```json +{ + "Effect": "Allow", + "Action": ["eks:AssociateIdentityProviderConfig", "eks:ListIdentityProviderConfigs"], + "Resource": ["arn:aws:eks:*:*:cluster/*"] +}, +{ + "Effect": "Allow", + "Action": ["eks:DisassociateIdentityProviderConfig", "eks:DescribeIdentityProviderConfig"], + "Resource": ["*"] +} +``` + +## May 4, 2021 - Release 1.9.0 + +Spectro Cloud 1.9.0 released with advanced support of security, availability and updates. + +- Spectro Cloud ensures users to start, run and scale highly-available and secure clusters with automated key tasks such as patching, node provisioning and updates with EKS support. +- Now create and gain permission to your AWS cloud account by just using role ARN, without sharing long-term credentials. + +## March 29, 2021 - Release 1.8.0 + +Spectro Cloud 1.8.0 released with advanced support for deploying & discovering Helm Charts and several usability enhancements! + +Featuring + +- Set up public and private helm chart registries to leverage the vast database of integrations and add-ons. +- Deploy reliable and secure Kubernetes clusters, without worrying about Kubernetes updates, dependencies and security patches using the EKS Distro (EKS-D). +- Accumulate container logs across all cluster nodes to create a support bundle to enable faster troubleshooting. +- Attach multiple supporting manifests to your cluster profile layers in order to deploy integrations end to end without having to use command line client. +- Add additional BYOM (Bring Your Own Manifest) layers to your cluster profiles to perform ad-hoc customized deployments on the cluster. +- You can now import and manage existing clusters running in your private VMware environment behind a proxy. +- Discover charts deployed on your existing clusters and convert them into a cluster profile to use it as a template for future cluster deployments. +- Enhanced cluster profile builder experience with several usability enhancements. + +# February 07, 2021 - Release 1.7.0 + +The following features and enhancements were released as part of 1.7.0 + +- Support for existing Kubernetes clusters that were not deployed by Spectro Cloud to be imported into the Spectro Cloud platform for visibility, management and additional capabilities such as application lifecycle management +- Automated as well as on-demand OS updates to keep cluster nodes up-to-date with the latest security fixes and enhancements. +- Modularize cluster profiles as Core Infra, Add-on, and Full profiles; Apply multiple add-on profiles to a cluster. +- Optimize AWS cloud cost utilizing spot instance pricing for cluster worker node pools. +- Selectively upgrade on-premises Spectro Cloud instance to a desired version, as opposed to always having to upgrade to the latest version. + +## December 23, 2020 - Hotfix 1.6.4 + +This release adds a fix for the permissions of vSphere GET folders. + +## December 13, 2020 - Release 1.6.0 + +Our on-premises version gets attention to finer details with this release: + +- The Spectro Cloud database can now be backed up and restored. +- Whereas previous on-premises versions allowed upgrading only to major versions, this release allows upgrading}> Upgrades to the Spectro Cloud platform are published to the Spectro Cloud repository and a notification is displayed on the console when new versions are available. to minor versions of the Spectro Cloud platform. +- Monitoring the installation using the dedicated UI}>The platform installer contains a web application called the Supervisor, to provide detailed progress of the installation. now provides more details when [migrating](/enterprise-version/deploying-an-enterprise-cluster/#migratequickstartmodeclustertoenterprise) from the quick start version to the enterprise version. +- AWS and GCP clusters can now be provisioned from an on-premises Spectro Cloud system. + +On the VMware front, we have: + +- removed the dependency on the HA Proxy Load balancer for creating clusters via DHCP. +- introduced dynamic folder creation in vCenter. This applies to every cluster, in all of the cluster virtual machines. +- enabled support for DNS mapping in search domains on vSphere. + +Other new features: + +- New customers can now sign up for free trials of Spectro Cloud. When ready, it is easy to upgrade plans and set up automatic payments using credit/debit cards. +- Pack constraints}> Pack constraints are a set of rules defined at the pack level to validate the packs for a Profile or a Cluster before it gets created or updated. Packs must be validated before the cluster is submitted to ensure a successful deployment. have been enabled to reduce the chances of cluster deployment failures that might occur due to incorrect values being set. +- Compatibility for Portworx version 2.6.1, Calico version 3.16, and for newer versions for [Kubernetes](/integrations/kubernetes/). + +## December 03, 2020 - Hotfix 1.5.7 + +In this hotfix, we added: + +- Compatibility for [Calico 3.16](https://www.projectcalico.org/whats-new-in-calico-3-16/). +- The on-premises version now allows specifying [CIDR for pods](/enterprise-version/deploying-the-platform-installer/#deployplatforminstaller) to allocate them an exclusive IP range. +- It also allows allocating an IP range in the CIDR format exclusive to the service clusters. + +The IP ranges for the pods, service clusters, and your IP network must not overlap with one another. This hotfix provides options to prevent node creation errors due to IP conflicts. + +## November 05, 2020 - Hotfixes 1.5.1 through 1.5.6 + +A host of hotfixes were applied for a smoother on-premises operation: + +| Version | Feature | +| ------- | --------------------------------------------------------------------------------------------- | +| 1.5.6 | Added improvements for faster kCh usage calculation. | +| 1.5.5 | Patched the `govc vm.info` command to allow spaces in datacenter names. | +| 1.5.4 | Changes to use client updates instead of patches for _vendorcrd_ installations. | +| 1.5.3 | Improved resource utilization by deleting a machine when a node is not available. | +| 1.5.2 | Updates to keep sessions alive for SOAP and REST clients using the `keepalive` command. | +| 1.5.1 | Fixed a bug that caused a trailing line to be added in the `vsphere.conf` file. | + +## October 23, 2020 - Release 1.5.0 + +The 1.5.0 release of the Spectro Cloud platform consists of the following features and enhancements: + +- On-Premise version of the Spectro Cloud platform for deployment into private VMWare environments. +- Cloud accounts can now be created at the tenant scope, to allow accounts to be shared across all projects in the tenant. +- Cross-compute cluster deployment of Private Cloud Gateway clusters for high-availability purposes. +- SSH Public Key management to easily select the desired keys and share them across Kubernetes clusters within a project. +- Improvements to cloud settings interface to simplify the creation of multiple failure domains during cluster provisioning. + +## September 10, 2020 - Release 1.2.0 + +With release 1.2.0, users get more control and added support: + +- Users can now access Kubernetes cluster certificates and renew them. +- For VMware, multi-domain support for private gateways is now available. +- Also for VMware, layout changes have been made to improve usability. + +## August 21, 2020 - Release 1.1.0 + +Release 1.1.0 is all about enhancing the user experience, providing tighter controls on clusters, and important bug fixes. + +- On the UI side, the login has been made faster. Additionally, users can now set up alerts to monitor cluster health. A `Revert to default values` button for cluster profiles is added. +- Clusters are easier to launch with the `Copy from Master` button; bad deployments are now prevented for certain instances; scaling is easier with the `Scale Strategy`. +- Private gateways can now be provisioned on static IPs with greater control on IP allocation using [IP pools](/clusters?clusterType=vmware_cluster#ipaddressmanagement). +- Updates to the CLI tool include more [flags](/registries-and-packs/spectro-cli-reference?cliCommands=cli_push#flags) to the `PUSH` command for forcibly overwriting registry packs. +- Bug Fixes: BET-806 related to SSO login and BET-403 related to validation of dependencies for availability zones have been resolved. + +## July 3, 2020 - Release 1.0.2 + +- Minor bug fixes for release 1.0.1. +- Updates to the [orchestration engine](https://www.spectrocloud.com/webinars/cluster-api-and-the-spectro-cloud-orchestration-engine/) for the new regions. +- Minor updates to the Istio integration. + +## July 3, 2020 - Release 1.0.1 + +- New Regions for AWS > Spectro Cloud is now available for deploying AWS clusters in the European regions. +- Changes to the pricing structures > more usage = lesser price per kCh. + +## June 23, 2020 - Release 1.0 + +The following features are included as part of Spectro Cloud 1.0: + +- Multi cluster deployment and lifecycle management of Kubernetes clusters across multiple cloud environments—AWS, Azure, and VMWare. +- Security-hardened, compliant, and conformant Kubernetes clusters out of the box. +- Cluster construction templates called Cluster Profiles. +- Platform extensibility through custom integration packs. +- Grouping of clusters logically into Projects for governance and control. +- Rich set of enterprise features such as granular RBAC, Single Sign-on, detailed Audit logs, etc. + + +:::info +Spectro Cloud adopts relevant security best practices for operating systems, Kubernetes components, and cloud environments. All Spectro Cloud container images are scanned for CVEs before a release. While Spectro Cloud takes ownership of securing the cluster infrastructure, there may be additional 3rd party integrations installed on the Kubernetes clusters provisioned. Security of such 3rd party integrations, including their container images and associated configurations, is the responsibility of the provider. +::: \ No newline at end of file diff --git a/docs/docs-content/security/_category_.json b/docs/docs-content/security/_category_.json new file mode 100644 index 0000000000..adf9e585bb --- /dev/null +++ b/docs/docs-content/security/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 140 +} diff --git a/docs/docs-content/security/core-principles.md b/docs/docs-content/security/core-principles.md new file mode 100644 index 0000000000..a5f1166969 --- /dev/null +++ b/docs/docs-content/security/core-principles.md @@ -0,0 +1,98 @@ +--- +sidebar_label: "Core Principles" +title: "Core Principles" +description: "Learn about Spectro Cloud security principles for Palette." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["security"] +--- + + + +Security is about controlling who can interact with your information, what they can do with it, and when they can interact with it. + +We use the Confidentiality, Integrity, and Availability (CIA) Triad as the framework that guides the security and controls we provide in our products and services. This framework is often extended with Authentication, Authorization, and Auditing. Components of the CIA Triad are described below. +
+ +- **Confidentiality**: Preserve authorized restrictions on information access and disclosure to protect personal privacy and proprietary data. This includes confirming identity and access rights to resources. + + +- **Integrity**: Protect data against unauthorized modification - either accidental or deliberate. + + +- **Availability**: Protect the services that provide data access. + + +- **Authorization**: Apply access rights through privileges or access levels to secure resources such as data, services, files, applications, and more. + + +- **Authentication**: Confirm the identity of the entity that wants access to a secure system. + + +- **Auditing**: Track implementation-level and domain-level events to ensure certain actions have been performed in the product. + + +# Core Principles + +Our security philosophy is grounded in the following core principles that we apply to our decision-making and product. + +
+ +## Secure by Design + +Your data security is a core business requirement, not just a technical feature. We apply this principle during the design phase of our product feature development lifecycle to dramatically reduce the number of exploitable flaws and prevent them from being introduced in a release. + +
+ +## Secure by Default + +We believe that security should be the default setting for all of our systems and software. Our products are secure to use out-of-the-box with little or no configuration needed and at no additional cost – such as audit logs and access control for sensitive information. Palette also supports Multi-Factor authentication (MFA) using external Identify Providers (IDP), such as Okta. + +
+ +## Never Rely Just on Obscurity + +We believe that using security through obscurity by itself is the absence of a security strategy. While some organizations use this method as their main security method, it puts their network at risk if an attacker gains access to obscure resources. + +Determined attackers use various methods to discover the hidden details of a system, and discovery eventually happens - either accidentally or deliberately. We believe that while obscurity alone is not a robust security strategy, it can be layered with security policies and controls. This is the principle of Defense in Depth. + +
+ +## Defense in Depth + +We believe security should be layered and redundant with multiple defenses in place to protect against different types of attack. The intent is to provide redundancy in the event a security control fails or a vulnerability is exploited. + +
+ +## Least Privilege + +This principle encourages system designers and implementers to allow runtime code with only the permissions needed to complete the required tasks and no more. + +We use the principle of least privilege to ensure that all users have only the necessary access rights to fulfill their job roles. To ensure the security of our users and systems, we use mechanisms such as defined access rights, regular reviews, restricted privileges, and system monitoring. + +
+ +## Secrets Handling + +We use the following methods for secrets handling, which contribute to a robust and resilient security infrastructure. + +
+ +- Secure password manager. + + +- Dynamic secret retrieval, which automates the secret rotation process to reduce the risk of unauthorized access and limit sensitive data exposure. + + +- MFA and Single Sign-On (SSO). + + + +## Continuous Improvement + +We believe security is an ongoing process, and we are committed to constantly improving our security posture through regular assessment and testing. + +We review and audit our internal setup regularly to ensure our employees have access to the tools they need while maintaining strong security standards. + + diff --git a/docs/docs-content/security/lifecycle/_category_.json b/docs/docs-content/security/lifecycle/_category_.json new file mode 100644 index 0000000000..094470741d --- /dev/null +++ b/docs/docs-content/security/lifecycle/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 10 +} diff --git a/docs/docs-content/security/lifecycle/lifecycle.md b/docs/docs-content/security/lifecycle/lifecycle.md new file mode 100644 index 0000000000..daf3539f6e --- /dev/null +++ b/docs/docs-content/security/lifecycle/lifecycle.md @@ -0,0 +1,26 @@ +--- +sidebar_label: "Lifecycle" +title: "Spectro Cloud Secure Development Lifecycle" +description: "Learn how Spectro Cloud applies security throughout its development lifecycle." +icon: "" +hide_table_of_contents: false + +--- + + + + +Our comprehensive approach to security is ingrained in each stage of our development lifecycle. From initial design and coding to testing and deployment, our processes are designed to identify, prevent, and mitigate security risks to ensure we deliver reliable and secure solutions. + +![Secure development flow from feature definition and design to development and release](/security_dev_lifecycle.png) + +Our Security team reviews early in the design process and identifies real and potential issues, which are logged in a ticketing system. Throughout development and before release we conduct more reviews and automated scans for common vulnerabilities. Scans go beyond our own code to include third-party libraries and container images. Should any vulnerabilities be found, we block the release and apply remediations. Our Security team must approve all our releases. + + + +## Resources + +- [Secure Development](secure-development.md) + + +- [Release Process](release-process.md) diff --git a/docs/docs-content/security/lifecycle/release-process.md b/docs/docs-content/security/lifecycle/release-process.md new file mode 100644 index 0000000000..7372545da5 --- /dev/null +++ b/docs/docs-content/security/lifecycle/release-process.md @@ -0,0 +1,45 @@ +--- +sidebar_label: "Release Process" +title: "Release Process" +description: "Learn about Spectro Cloud's release process for Palette." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +--- + + + + + +# Release Process + +We use semantic versioning for releases, where release versions follow the *Major.Minor.Patch* numbering pattern across all components, and we utilize and maintain *Integration*, *Stage*, and *Production* environments. + +
+ +## Checklist + +Our release process includes a checklist of the features planned for release to ensure their completion or to ensure a completion plan is in place. + +When all pre-deployment checklist items are complete, stakeholders review the checklist to make an informed decision about the state of the release and do the following: + +
+ +- Identify any steps that have not been completed. + + +- Request additional information. + + +- Follow up as needed. + + +## Signoff + +A new version deployment will not proceed until all stakeholders have signed off. + +
+ +## Backup + +We back up the current release before starting a new one. Should a rollback be required and patching is not an option, a rollback request is submitted to the Spectro Cloud DevOps team. The DevOps team will restore from the backup and revert the production SaaS instance to the prior version. diff --git a/docs/docs-content/security/lifecycle/secure-development.md b/docs/docs-content/security/lifecycle/secure-development.md new file mode 100644 index 0000000000..96fdecd38c --- /dev/null +++ b/docs/docs-content/security/lifecycle/secure-development.md @@ -0,0 +1,23 @@ +--- +sidebar_label: "Secure Development" +title: "Secure Development" +description: "Learn about Spectro Cloud's secure product development." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +--- + + + + + +# Secure Development + +Our proactive *shift left* approach to making security an integral part of our development process ensures we detect and address vulnerabilities and design flaws early in the development cycle. By integrating security measures into the development process, we improve our software quality and reduce vulnerabilities while minimizing manual intervention and potential human error. + +We integrate security measures with our Continuous Integration/Continuous Delivery (CI/CD) pipeline. By regularly monitoring and improving our software to ensure product security, we are able to deliver high-quality solutions more quickly to you. + +We also employ a comprehensive suite of security scans that cover various aspects of our software: containers, container images, Kubernetes, source code, operating systems, and configurations. This extensive coverage enables us to identify and address a wide range of potential security issues before they can impact our end users. + +We connect the results of our security scans directly to our ticketing system. This seamless integration ensures we promptly address any identified vulnerabilities or issues and validate them during our release checklist activities. +In addition, we continually evaluate and adopt new security tools and practices to stay ahead of evolving threats. Our investment in security automation and tools demonstrates our commitment to safeguarding your data and maintaining the highest standards of software quality and security. diff --git a/docs/docs-content/security/product-architecture/_category_.json b/docs/docs-content/security/product-architecture/_category_.json new file mode 100644 index 0000000000..455b8e4969 --- /dev/null +++ b/docs/docs-content/security/product-architecture/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 20 +} diff --git a/docs/docs-content/security/product-architecture/data-encryption.md b/docs/docs-content/security/product-architecture/data-encryption.md new file mode 100644 index 0000000000..d42377cef0 --- /dev/null +++ b/docs/docs-content/security/product-architecture/data-encryption.md @@ -0,0 +1,55 @@ +--- +sidebar_label: "Data Encryption" +title: "Data Encryption" +description: "Learn about Palette security controls for data and communications." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["security"] +--- + + +Palette has security controls for the operating system, containers, and Kubernetes. Data is protected with secure keys, encryption, and secure communication, standard authentication and authorization, and API security. Audit logs record actions taken on the platform. Review the [Audit Logs](../../audit-logs/audit-logs.md) guide to learn how to access and use them. + + +
+ +## Data At Rest Encryption + +Tenant data is encrypted using a 64-bit cryptographically secure tenant key. A unique tenant key is generated for each tenant. The tenant key is encrypted using the system root key and is stored in the database. The system root key is stored in the cluster’s etcd key-value store. All message communication uses tenant-specific channels. + +The following secure keys are unique and generated for each installation: + +
+ +- **Root Key**: Encrypts the tenant-specific encryption key. + + +- **JSON Web Token (JWT) signature key**: Used to sign the JWT token. + + +- **Hash Salt**: Used to hash the user password and email ID. + + +- **Tenant key**: A 64-bit cryptographically secure tenant key encrypts tenant data stored in the management cluster, such as user account name, user email ID, and tenant cloud credentials. + + +In self-managed deployments, secure keys are generated during installation and stored as secrets in the management cluster’s etcd key-value store. + +
+ +## Data In Transit Encryption + +Palette secures data in motion using an encrypted Transport Layer Security (TLS) communication channel for all internal and external interactions.

+ +- **End User Communication**: Public certificates are created using a cert-manager for external API/UI communication. In self-hosted deployments, you can import an optional certificate and private key to match the Fully Qualified Domain Name (FQDN) management cluster. + + +- **Inter-Service Communication**: Services in the management cluster communicate over HTTPS with self-signed certificates and an Rivest–Shamir–Adleman (RSA) 2048-bit key. + + +- **Database Communication**: The database connection between Palette internal services that are active in the management cluster and MongoDB is protected by TLS with Authentication enabled. + + +- **Message Bus**: A Secure Network Address Translation (NATS) message bus is used for asynchronous communication between Palette management clusters and tenant clusters. NATS messages are exchanged using TLS protocol, and each tenant cluster uses dedicated credentials to connect to the message bus. Authentication and authorization policies are enforced in the NATS deployment to ensure message and data isolation across tenants. + diff --git a/docs/docs-content/security/product-architecture/platform-security.md b/docs/docs-content/security/product-architecture/platform-security.md new file mode 100644 index 0000000000..bce18f27fe --- /dev/null +++ b/docs/docs-content/security/product-architecture/platform-security.md @@ -0,0 +1,34 @@ +--- +sidebar_label: "Platform Security" +title: "Platform Security" +description: "Learn how Palette provides platform infrastructure security." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["security"] +--- + + +Based on the deployment model, Palette is composed of a multi-layer deployment stack: Cloud > VM > OS > Container Runtime > Kubernetes > Pods. + +To ensure maximum security, we follow defense-in-depth principles that prescribe securing each layer in a multi-layer deployment stack. + +For SaaS deployment models, cloud and Virtual Machine (VM) security are handled by SaaS platform operation security controls. For on-prem deployment models, the customer’s data center infrastructure team typically handles cloud and VM security. + +## Operating Systems + +The operating system that Palette uses for its Kubernetes management cluster is Ubuntu 20.04 LTS. We follow CIS standards to harden the operating system. + +These hardened images are used to launch control planes and worker nodes for the Kubernetes cluster hosting Palette. Additionally, all OS images are scanned for vulnerabilities prior to being published to a repository. + +## Container Security + +Spectro Cloud uses Containerd for container runtime. Containerd is an industry-standard container runtime that emphasizes simplicity, robustness, and portability in managing the complete container lifecycle. It runs as a demon on Ubuntu instances. + +Container images for various application services are built using distroless images, which have significantly fewer packages and improve security by reducing attack surface. + +All container images are scanned for vulnerabilities prior to being published to a repository or deployed to the SaaS platform. + +## Kubernetes Hardening + +We secure Palette's Kubernetes version based on Center for Internet Security (CIS) Kubernetes benchmarks. Several additional rules are also enforced on components such as the API Server, Controller Manager, Scheduler, and Kubelet. diff --git a/docs/docs-content/security/product-architecture/product-architecture.md b/docs/docs-content/security/product-architecture/product-architecture.md new file mode 100644 index 0000000000..17cb71d158 --- /dev/null +++ b/docs/docs-content/security/product-architecture/product-architecture.md @@ -0,0 +1,89 @@ +--- +sidebar_label: "Secure Product Architecture" +title: "Secure Product Architecture" +description: "Learn about the integrity of Palette's secure architecture." +icon: "" +hide_table_of_contents: false +tags: ["security"] +--- + + +In addition to the security principles we adhere to and our secure development lifecycle, we provide a cohesive security architecture for Palette. + +## Secure Product Architecture + +Palette uses a microservices-based architecture, and we take steps to ensure each service is secured. Product functionality is broken down logically into isolated services within containers. Containers are deployed in a Kubernetes cluster, called a management cluster, which Palette hosts and manages in Software as a Service (SaaS) mode or that users can host and manage in a self-hosted environment. + +Palette supports three architecture models: multi-tenant SaaS, dedicated SaaS, and self-hosted, which includes support for air-gapped environments. These flexible deployment models allow us to adapt to existing requirements in terms of separating responsibilities and network restrictions. + +
+ +- **Multi-tenant SaaS**: The management plane is hosted in AWS across three regions that we manage: us-east-1, us-west-1, and us-west-2. Each customer occupies a tenant in our multi-tenant cloud environment. Our Operations team controls when to upgrade the management plane. + + +- **Dedicated SaaS**: The management plane is hosted in a cloud or region that you specify in our Spectro Cloud cloud account with a dedicated instance that we manage. In this scenario, you decide when to upgrade the management plane. + + +- **Self-hosted**: The management plane is hosted in your environment. It can be on-prem VMware vSphere, OpenStack, bare metal, in a public cloud that manages your compute instances, or a managed Kubernetes cluster such as Amazon Elastic Kubernetes Service (EKS), Azure Kubernetes Service (AKS), and Google Kubernetes Engine (GKE). + + +
+ +![A diagram of Palette deployment models](/architecture_architecture-overview-deployment-models.png) + +
+ + +Palette’s robust security measures safeguard your data and ensure the integrity of our services. We adhere to industry-leading standards and continuously refine our practices to provide the highest level of security. Palette infrastructure safeguards data in your Kubernetes production environment with its zero-trust architecture, granular Role-Based Access Control (RBAC), immutable Linux distributions ([Kairos](https://kairos.io/)), and hardened clusters and Kubernetes packs. + +Palette's security controls ensure data protection in SaaS operation at the management platform level and the [tenant](../../glossary-all.md#tenant) cluster level. To learn more, refer to [SaaS Operation](saas-operation.md). In self-hosted operation, you must ensure security controls in your environment. Find out more about self-hosted deployment in [Self-Hosted Operation](self-hosted-operation.md). + + +## Multi-tenancy + +Palette is a multi-tenant SaaS platform in which every tenant represents a customer. We ensure tenant isolation through the following design principles and techniques: + +
+ +- **Network isolation**: Tenant clusters are created in the tenant’s public cloud accounts or in private data centers. Customers cannot intercept network traffic in other tenant clusters. Access to tenant cluster APIs through the cluster’s kubeconfig file is restricted to the tenant. + + +- **Data isolation**: Palette applies a tenant filter to every operation to ensure users' access is restricted to their own tenant. + + +- **Tenant Data Encryption**: Tenant data is encrypted, and all message communication uses tenant-specific channels. + + +- **Audit Policies**: We record all actions taken on the platform and provide a comprehensive report for tracking purposes. + + +- **Noisy Neighbor Prevention**: In the SaaS deployment model, we use AWS Load Balancers and AWS CloudFront with a web application firewall (WAF) for all our public-facing services. These services benefit from the protections of AWS Shield Standard, which defends against the most common and frequently occurring network and transport layer Distributed Denial-of-Service (DDoS) attacks that target applications. This ensures that excessive calls from a tenant do not adversely affect other tenants' use of the platform. + + +## Palette Authentication & Authorization + +Palette fully supports RBAC and two authentication modes: + +
+ +- *Local authentication* and *password policy*
+ + With local authentication, a user email serves as the ID, and a password is compared with the one-way hash stored in the database to authenticate users to a tenant. The platform administrator can set password policy to control the requirements for password length, rule, and expiration. + + +- *Single Sign-On (SSO)* and *Multi-Factor Authentication (MFA)*
+ + In these modes, the tenant is configured to have Security Assertion Markup Language (SAML) 2.0 Identify Provider (IDP) integrations. If the IDP requires MFA, you are redirected to the IDP’s authentication page. SSO can also automatically map a user to one or more user groups in the tenant. + + +## API Security + +Palette uses JSON Web Token (JWT)-based authentication and authorization for Representational State Transfer (REST) API access over HTTPS. + +The authentication token is valid for a limited time. If the token is about to expire, you can request a token refresh before making other API calls. + +Palette has a common API gateway validation service that ensures there are no incorrect parameter values or potential vulnerabilities, such as Structured Query Language (SQL) injection or cross-site scripting. + +You can use the gateway validation service log to trace APIs with a unique ID, Tenant UID, or Session UID. To avoid revealing unnecessary information, all UIDs are 48-bit random hex strings. + +Refer to the [API Key Authentication](../../user-management/user-authentication.md#api-key) guide for details. diff --git a/docs/docs-content/security/product-architecture/saas-operation.md b/docs/docs-content/security/product-architecture/saas-operation.md new file mode 100644 index 0000000000..150fa82fdb --- /dev/null +++ b/docs/docs-content/security/product-architecture/saas-operation.md @@ -0,0 +1,46 @@ +--- +sidebar_label: "SaaS Operation" +title: "SaaS Operation" +description: "Learn about Palette security in a SaaS deployment." +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["security"] +--- + + +Palette can be deployed as a multi-tenant SaaS system in which each tenant represents a customer. Palette SaaS infrastructure is hosted in the public cloud within a logically isolated virtual network that has a private and a public subnet. The [control plane and worker nodes](saas-operation.md#control-plane-and-worker-nodes) for the Kubernetes cluster are launched in the private network. + +
+ +## Cloud Infrastructure Security + +In public cloud environments such as AWS, Azure, and GCP, Palette interacts directly with a cloud provider’s API endpoint for access using cloud credentials specified in the tenant. The tenant clusters can be deployed in a virtual private cloud (VPC), as described in [Tenant Cluster Security](tenant-cluster.md). + +This allows the SaaS controller to do the following: + +
+ +- Dynamically query cloud resources. + + +- Act as an orchestrator to initiate SaaS controller requests for deployments. + +When a Palette SaaS deployment such as VMware or MAAS environments must connect on-prem to deploy target Kubernetes clusters, a Private Cloud Gateway (PCG) component is deployed in the self-hosted environment as a virtual appliance (OVA). The PCG is Palette's on-prem component to enable support for isolated, private cloud or data center environments. + +The PCG pairs automatically with a tenant based on a randomly generated pairing code similar to the Bluetooth pairing process and acts as a proxy between Palette SaaS and private cloud endpoints, such as vCenter. The PCG uses an outgoing internet connection to the SaaS platform using Static Network Address Translation (NATS) with Transport Layer Security (TLS). Refer to the [System Private Gateway](../../clusters/data-center/maas/architecture.md#system-private-gateway) reference page to learn more. + +
+ +## Control Plane and Worker Nodes + +Control plane nodes and worker nodes in the Kubernetes cluster hosting Palette SaaS are launched in private subnets. All ports on the nodes are protected from external access. + +In self-hosted Palette installations, customers manage their own SSH public keys unless an agreement is in place for Spectro Cloud to maintain their environment. + +
+ +## Resources + +[Tenant Cluster Security](tenant-cluster.md) + diff --git a/docs/docs-content/security/product-architecture/self-hosted-operation.md b/docs/docs-content/security/product-architecture/self-hosted-operation.md new file mode 100644 index 0000000000..d1f9102579 --- /dev/null +++ b/docs/docs-content/security/product-architecture/self-hosted-operation.md @@ -0,0 +1,31 @@ +--- +sidebar_label: "Self-Hosted Operation" +title: "Self-Hosted Operation" +description: "Learn about Palette security in a self-Hosted deployment." +icon: "" +hide_table_of_contents: false +sidebar_position: 30 +tags: ["security"] +--- + + + + +# Self-Hosted Operation + +In self-hosted operation, where Palette is typically deployed on-prem behind a firewall, you must ensure your environment has security controls. Palette automatically generates security keys at installation and stores them in the management cluster. You can import an optional certificate and private key to match the Fully Qualified Domain Name (FQDN) management cluster. Palette supports enabling disk encryption policies for management cluster virtual machines (VMs) if required. For information about deploying Palette in a self-hosted environment, review the [Self-Hosted Installation](../../enterprise-version/enterprise-version.md) guide. + +In self-hosted deployments, the Open Virtualization Appliance (OVA) can operate in stand-alone mode for quick Proof of Concept (POC) or in enterprise mode, which launches a three-node High Availability (HA) cluster as the Palette management cluster. The management cluster provides a browser-based web interface that allows you to set up a tenant and provision and manage tenant clusters. You can also deploy Palette to a Kubernetes cluster by using the Palette Helm Chart. To learn more, review the [Install Using Helm Chart](../../enterprise-version/deploying-palette-with-helm.md) guide. + + +The following points apply to self-hosted deployments: + +
+ +- In deployments that require a proxy internet connection, both the Private Cloud Gateway (PCG) component and the management agent support SOCKS5 or HTTPS proxy. + + +- You manage your own SSH public keys unless an agreement is in place for Spectro Cloud to maintain your environment. + + +- Self-hosted Palette does not connect to Palette SaaS or send telemetry or customer data back to the Palette SaaS platform. diff --git a/docs/docs-content/security/product-architecture/tenant-cluster.md b/docs/docs-content/security/product-architecture/tenant-cluster.md new file mode 100644 index 0000000000..88f58c1eae --- /dev/null +++ b/docs/docs-content/security/product-architecture/tenant-cluster.md @@ -0,0 +1,92 @@ +--- +sidebar_label: "Tenant Cluster Security" +title: "Tenant Cluster Security" +description: "Lorem ipsum dolor" +icon: "" +hide_table_of_contents: false +sidebar_position: 40 +tags: ["security"] +--- + + +Tenant clusters are Kubernetes clusters that Palette deploys for customers. Tenant clusters can be launched in the customer's choice of public or private cloud or bare metal environment. Palette offers complete flexibility and control in designing these tenant clusters through a construct called [Cluster Profiles](../../glossary-all.md#cluster-profile). + +[Cluster profiles](../../cluster-profiles/cluster-profiles.md) are cluster construction templates. Palette deploys a Kubernetes cluster based on what the profile specifies. + +A cluster profile consists of core layers that consist of an Operating System (OS), a Kubernetes distribution, networking, and storage, and any add-on layers such as monitoring, logging, and more. Palette offers several out-of-the-box choices for each profile layer in the form of packs and the flexibility for you to bring your own pack for use in Palette cluster profiles. + +Palette's flexibility and extensibility make the security of tenant clusters a shared responsibility, as listed in the table. + +|Layer |Out of the box Pack | Custom Pack| +|:---------------|:---------|:--------------| +|Operating System |Spectro Cloud Responsibility|Customer Responsibility| +|Kubernetes|Spectro Cloud Responsibility|Customer Responsibility| +|Storage|Spectro Cloud Responsibility|Customer Responsibility| +|Networking|Spectro Cloud Responsibility|Customer Responsibility| +|Add-Ons|Spectro Cloud & Customer Responsibility|Customer Responsibility| + +We ensure our out-of-the-box core layer packs are secure. You ensure security for custom packs and add-on packs you bring to Palette. Palette provides defaults for its out-of-the-box add-on layers based on third-party best practices. You have the flexibility to tune the configuration to fit your needs, making security a shared responsibility. + +
+ +## Cloud Infrastructure Security + +In a public cloud, Kubernetes nodes in tenant clusters are deployed within a logically isolated virtual network that has private and public subnets. The control plane and worker nodes for the Kubernetes cluster are launched in a private network. All ports on the nodes are protected from external access. + +Each tenant cluster has a management agent that is deployed as a pod. This agent has an outbound internet connection to Palette using static Network Address Translation (NAT) with Transport Layer Security (TLS) protocol v1.2 or higher and a hardened cipher suite. The agent periodically reports health, heartbeat, and statistics and connects to Palette's public repository over HTTPS for any out-of-the-box integration packs. + +In a self-hosted environment, where Palette is typically deployed on-prem behind a firewall, you must ensure security controls in your environment. Palette automatically generates security keys at installation and stores them in the management cluster. You can import an optional certificate and private key to match the management cluster Fully Qualified Domain Name (FQDN). Palette supports enabling disk encryption policies for management cluster Virtual Machines (VMs) if required. + +
+ +## Hardened Operating System + +Palette provides Ubuntu or CentOS images for supported cloud environments. Images that are hardened according to Center for Internet Security (CIS) Benchmarks are used to launch control planes and worker nodes for the Kubernetes cluster. + +Palette's OS hardening utility performs the following tasks: + +
+ +- Applies the latest available security updates. + + +- Hardens SSH server parameters, network parameters (sysctl), and system files by ensuring proper file permissions are set. + + +- Removes legacy services and Graphical User Interface (GUI) packages. + +Palette allows you to set up OS patching policies. You can patch the base OS when you deploy the cluster. Refer to [OS Patching](/clusters/cluster-management/os-patching) to learn more. + +
+ +## Hardened Containers + +Container images for various application services are built using distroless images, which have significantly fewer packages and improve security by reducing attack surface. + +All container images are scanned for vulnerability using Palo Alto Networks Prisma Cloud (Twistlock) Defender before being published to a repository or deployed to the SaaS platform. + +
+ +## Hardened Kubernetes + +Spectro Cloud has a fully automated Kubernetes verification system that adds the newest patch version of Kubernetes to its public repository. All Kubernetes packs are hardened according to CIS Benchmarks. + +We assess major Kubernetes versions based on the extent of changes. + +Kubernetes run-time security support is achieved through a variety of add-on packages, such as Sysdig Falco and Twistlock. + +You can set a schedule to start Kubernetes conformance and compliance tests using kube-bench, kube-hunter, and Sonobuoy. These tests ensure tenant clusters are secure, compliant, and up to date. + +
+ +## Kubernetes Authentication & Authorization + +Kubernetes cluster authentication can be optionally integrated with Kubelogin with OpenID Connect (OIDC)-based authentication/authorization against an external Identify Provider (IDP). This enables group membership-based access control on different namespaces within the tenant Kubernetes cluster. Our Terraform provider also supports automatically setting role binding on namespaces by user or group. + +
+ +## Compliance & Security Scans + +You can initiate multiple scans on tenant clusters. These scans ensure clusters adhere to specific compliance and security standards. The scans also perform penetration tests to detect potential vulnerabilities. + +Palette supports four types of scans: compliance, security, conformance, and Software Bill of Materials (SBOM). Each scan generates reports with scan-specific details. You can initiate multiple scans of each type over time. In addition, Palette keeps a history of previous scans for comparison purposes. \ No newline at end of file diff --git a/docs/docs-content/security/security-bulletins/_category_.json b/docs/docs-content/security/security-bulletins/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/security/security-bulletins/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/security/security-bulletins/cve-index.md b/docs/docs-content/security/security-bulletins/cve-index.md new file mode 100644 index 0000000000..b828004208 --- /dev/null +++ b/docs/docs-content/security/security-bulletins/cve-index.md @@ -0,0 +1,48 @@ +--- +sidebar_label: "CVE Index" +title: "CVE Index" +description: "Security bulletins for Common Vulnerabilities and Exposures (CVEs) related to Palette" +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +slug: "index" +tags: ["security", "cve"] +--- + +The following is an index of all Palette-related CVEs and their disclosure year. Click on a CVE report to learn more. + +## 2023 + +- [September 01, 2023 - CVE-2023-22809 Sudo Vulnerability - 7.8 CVSS](cve-reports.md#september-01-2023---cve-2023-22809-sudo-vulnerability---78-cvss) + + +- [September 01, 2023 - CVE-2023-38408 OpenSSH Vulnerability - 9.8 CVSS](cve-reports.md#september-01-2023---cve-2023-38408-openssh-vulnerability---98-cvss) + + +- [September 01, 2023 - CVE-2023-29400 - HTML Template Vulnerability Security Advisory](cve-reports.md#september-01-2023---cve-2023-29400---html-template-vulnerability-security-advisory---73-cvss) + + +- [September 01, 2023 - CVE-2023-24539 - HTML Template Vulnerability Security Advisory - 7.3 CVSS](cve-reports.md#september-01-2023---cve-2023-24539---html-template-vulnerability-security-advisory---73-cvss) + + +- [September 01, 2023 - CVE-2023-24538 - HTML Template Vulnerability - Security Advisory - 9.8 CVSS](cve-reports.md#september-01-2023---cve-2023-24538---html-template-vulnerability---security-advisory---98-cvss) + + +- [September 01, 2023 - CVE-2023-29404 - CGO LDFLAGS Vulnerability Security Advisory - 9.8 CVSS](cve-reports.md#september-01-2023---cve-2023-29404---cgo-ldflags-vulnerability-security-advisory---98-cvss) + + +- [September 01, 2023 - CVE-2023-29402 - Go Modules Vulnerability Security Advisory - 9.8 CVSS](cve-reports.md#september-01-2023---cve-2023-29402---go-modules-vulnerability-security-advisory---98-cvss) + + +- [eptember 01, 2023 - CVE-2023-29402 - Go get Vulnerability Security Advisory - 9.8 CVSS](cve-reports.md#september-01-2023---cve-2023-29402---go-get-vulnerability-security-advisory---98-cvss) + + +- [September 01, 2023 - CVE-2023-24540 - HTML Template Security Advisory - 9.8 CVSS](cve-reports.md#september-01-2023---cve-2023-24540---html-template-security-advisory---98-cvss) + + +- [March 20, 2023 - CVE-2023-22809 Sudo Vulnerability in Palette - 7.8 CVSS](cve-reports.md#march-20-2023---cve-2023-22809-sudo-vulnerability-in-palette---78-cvss) + + +## 2022 + +- [August 4, 2022 - CVE-2022-1292 c_rehash script vulnerability in vSphere CSI pack - 9.8 CVSS](cve-reports.md#august-4-2022---cve-2022-1292-c_rehash-script-vulnerability-in-vsphere-csi-pack---98-cvss) diff --git a/docs/docs-content/security/security-bulletins/cve-reports.md b/docs/docs-content/security/security-bulletins/cve-reports.md new file mode 100644 index 0000000000..ba884e6175 --- /dev/null +++ b/docs/docs-content/security/security-bulletins/cve-reports.md @@ -0,0 +1,444 @@ +--- +sidebar_label: "CVE Reports" +title: "CVE Reports" +description: "Security bulletins for Common Vulnerabilities and Exposures (CVEs) related to Palette" +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["security", "cve"] +--- + +# Security Bulletins + + + + + +## September 01, 2023 - CVE-2023-22809 Sudo Vulnerability - 7.8 CVSS + + +The sudo program version 1.9.12p2 and earlier mishandles extra arguments passed in the user-provided environment variables `SUDO_EDITOR`, `VISUAL`, and `EDITOR` when the `sudoedit` command is executed. + +The mishandling allows a local attacker to append arbitrary entries to the list of files to process. This can lead to privilege escalation. Affected versions are 1.8.0 through 1.9.12.p1. The problem exists because a user-specified editor may contain the `--` argument that defeats a protection mechanism. For example, an attacker may issue the following command `EDITOR='vim -- /path/to/extra/file` value. + +
+ +### Impact + + +This vulnerability affects the following Palette components: + +- Self-hosted Palette instances with versions older than 4.0.0 + + +- Private Cloud Gateways instances with versions older than 4.0.0 + + +- Clusters deployed with Palette versions older than 4.0.0 + +
+ +### Patches + +For self-hosted Palette environments, upgrade to Palette version 4.0.0 or greater. Upgrading Palette will automatically update the Operating System (OS). + +
+ +### Workarounds + +For clusters and Private Cloud Gateways, patch the OS. You can use the on-demand or scheduled features to apply the OS security patches. Refer to the [OS Patching](/clusters/cluster-management/os-patching) documentation for more information. + +
+ +### References + +- [CVE-2023-22809](https://nvd.nist.gov/vuln/detail/cve-2023-22809) + +
+ + + +## September 01, 2023 - CVE-2023-38408 OpenSSH Vulnerability - 9.8 CVSS + + +The PKCS#11 feature in the OpenSSH ssh-agent before version 9.3p2 has an insufficiently trustworthy search path. This may lead to remote code execution if an agent is forwarded to an attacker-controlled system. Code in the folder **/usr/lib** may be unsafe to load into the ssh-agent. This issue exists because of an incomplete fix for [CVE-2016-10009](https://nvd.nist.gov/vuln/detail/cve-2016-10009). + +
+ +### Impact + +This vulnerability affects the following Palette components: + +- Self-hosted Palette instances with versions older than 4.0.0 + + +- Private Cloud Gateways instances with versions older than 4.0.0 + + +- Clusters deployed with Palette versions older than 4.0.0 + +
+ +### Patches + +- For self-hosted Palette environments, upgrade to Palette version 4.0.0 or greater. Upgrading Palette will automatically update the Operating System (OS). + +
+ +### Workarounds + +- For clusters and Private Cloud Gateways, patch the OS. You can use the on-demand or scheduled features to apply the OS security patches. Refer to the [OS Patching](/clusters/cluster-management/os-patching) documentation for more information. + +
+ +### References + +- [CVE-2023-38408](https://nvd.nist.gov/vuln/detail/CVE-2023-38408) + + +## September 01, 2023 - CVE-2023-29400 - HTML Template Vulnerability Security Advisory - 7.3 CVSS + + +When using Go templates with actions in unquoted HTML attributes, such as `attr={{.}}`, unexpected output may occur due to HTML normalization rules if invoked with an empty input. This may allow the injection of arbitrary attributes into tags. + +
+ +### Impact + + +No impact. We use the Go package [html/template](https://pkg.go.dev/html/template) and our HTML templates are static. Our templates do not contain characters mentioned in the CVE. We also do not accept or parse any provided user data + + +
+ +
+ +### Patches + +Not applicable. + +
+ +### Workarounds + +Not applicable. + +
+ +### References + +- [CVE-2023-29400](https://nvd.nist.gov/vuln/detail/CVE-2023-29400) + + +- [GO-2023-1753](https://pkg.go.dev/vuln/GO-2023-1753) + +## September 01, 2023 - CVE-2023-24539 - HTML Template Vulnerability Security Advisory - 7.3 CVSS + +Angle brackets `<>` are not considered dangerous characters when inserted into Cascading Style Sheets (CSS) contexts. Go templates containing multiple actions separated by a `/` character can result in unexpectedly closing the CSS context and allowing for the injection of unexpected HTML if executed with untrusted input. + +
+ +### Impact + +No impact. We use the Go package [html/template](https://pkg.go.dev/html/template) and our HTML templates are static. We also do not accept or parse any provided user data. + +
+ + +
+ +### Patches + +Not applicable. + +
+ + +### Workarounds + +Not applicable. + +
+ + +### References + +- [CVE-2023-24539](https://nvd.nist.gov/vuln/detail/CVE-2023-24539) + + +- [GO-2023-1751](https://pkg.go.dev/vuln/GO-2023-1751) + + +## September 01, 2023 - CVE-2023-24538 - HTML Template Vulnerability - Security Advisory - 9.8 CVSS + +Go templates do not consider backticks as a Javascript string delimiter and, as a result, do not escape them as expected. Backticks have been used since ES6 for JS template literals. If a Go template contains an action within a literal Javascript template, the action's contents can be used to terminate the literal and potentially inject arbitrary Javascript code into the Go template. + +Go template actions are disallowed from being used inside of them, for example, `"var a = {{.}}"` since there is no safe way to allow this behavior. This takes the same approach as github.com/google/safehtml. With this fix, `Template.Parse()` returns an error when it encounters templates containing actions with literal JavaScript. The ErrorCode has a value of 12. This ErrorCode is currently unexported but will be exported in the release of Go 1.21. Users who rely on the previous behavior can re-enable it using the `GODEBUG flag jstmpllitinterp=1` with the caveat that backticks will now be escaped. + +
+ +### Impact + +No impact. We use the Go package [html/template](https://pkg.go.dev/html/template) and our HTML templates are static. We also do not accept or parse any provided user data. + +
+ +### Affected Products + +Not applicable. + +
+ +### Patches + +Not applicable. + +
+ +### Workarounds + +Not applicable. + +
+ + +### References + +- [CVE-2023-24538](https://nvd.nist.gov/vuln/detail/CVE-2023-24538) + + +- [GO-2023-1703](https://pkg.go.dev/vuln/GO-2023-1703) + + +## September 01, 2023 - CVE-2023-29404 - CGO LDFLAGS Vulnerability Security Advisory - 9.8 CVSS + + +The `go` command can execute any code during the build process when using cgo. This can happen when using `go get` command on a malicious module or any other command that builds untrusted code. It can also be triggered by linker flags specified through the `#cgo LDFLAGS` directive. The non-optional flags in LDFLAGS sanitization allow disallowed flags to be used with gc and gccgo compilers. + +
+ +### Impact + +No impact. This is not a runtime issue and we do not compile untrusted code. + +
+ +### Affected Products + +Not applicable. + +
+ +### Patches + +Not applicable. + +
+ + +### Workarounds + +Not applicable. + +
+ +### References + +- [CVE-2023-29402](https://nvd.nist.gov/vuln/detail/CVE-2023-29402) + + +- [GO-2023-1841](https://pkg.go.dev/vuln/GO-2023-1841) + + +## September 01, 2023 - CVE-2023-29402 - Go Modules Vulnerability Security Advisory - 9.8 CVSS + + +The go command may generate unexpected code at build time when using cgo. Using unexpected code with cgo can cause unexpected behavior in Go programs. This may occur when an untrusted module contains directories with newline characters in their names. Go modules retrieved using the command `go get` are unaffected. Modules retrieved using the legacy module retrieve method with the environment variables `GOPATH` and `GO111MODULE=off` may be affected. + +
+ +### Impact + +No impact. This is not a runtime issue and we do not compile untrusted code. + +
+ +### Affected Products + +Not applicable. + +
+ +### Patches + +Not applicable. + +
+ +### Workarounds + +Not applicable. + +
+ +### References + +- [CVE-2023-29402](https://nvd.nist.gov/vuln/detail/CVE-2023-29402) + + +- [GO-2023-1839](https://pkg.go.dev/vuln/GO-2023-1839) + + +## September 01, 2023 - CVE-2023-29402 - Go get Vulnerability Security Advisory - 9.8 CVSS + +The go command may execute arbitrary code at build time when using cgo. The arbitrary code execution may occur when the command `go get` is issued on a malicious module or when using any other command that builds untrusted code. This can be triggered by linker flags specified via a `#cgo LDFLAGS directive`. Flags containing embedded spaces are mishandled, and disallowed flags are smuggled through the LDFLAGS sanitization by including them in the argument of another flag. This only affects the gccgo compiler. + +
+ +### Impact + +No impact. This is not a runtime issue and we do not compile untrusted code. + +
+ +### Affected Products + +Not applicable. + +
+ +### Patches + +Not applicable. + +
+ +### Workarounds + +Not applicable. + +
+ + +### References + +- [CVE-2023-29402](https://nvd.nist.gov/vuln/detail/CVE-2023-29402) + + +- [GO-2023-1842](https://pkg.go.dev/vuln/GO-2023-1842) + + + +## September 01, 2023 - CVE-2023-24540 - HTML Template Security Advisory - 9.8 CVSS + + +Not all valid JavaScript whitespace characters are considered to be whitespace. JavaScript templates containing whitespace characters outside of the character set `\t\n\f\r\u0020\u2028\u2029` may not be properly sanitized during execution. + +
+ +### Impact + +No impact - We use the Go package [html/template](https://pkg.go.dev/html/template) but our HTML templates are static. We also do not accept or parse any provided user data. + +
+ +### Patches + +Not applicable. + +
+ +### Workarounds + +Not applicable. + +### References + +- [CVE-2023-24540](https://nvd.nist.gov/vuln/detail/CVE-2023-24540) + + +- [GO-2023-1752](https://pkg.go.dev/vuln/GO-2023-1752) + + + + +## March 20, 2023 - CVE-2023-22809 Sudo Vulnerability in Palette - 7.8 CVSS + +A security vulnerability in `sudo -e` option (aka *sudoedit*) allows a malicious user with sudoedit privileges to edit arbitrary files. The Palette container `palette-controller-manager:mold-manager` incorporates a sudo version affected by sudoers policy bypass in sudo when using sudoedit. + +All versions of Palette before v2.6.70 are affected. + +
+ +#### Impact + +A local user with permission to edit files can use this flaw to change a file not permitted by the security policy, resulting in privilege escalation. + +
+ +#### Resolution + +* For Palette SaaS, this has been addressed and requires no user action. +* For ​​Palette self-hosted deployments, please upgrade to newer versions greater than or equal to v2.6.70 to address the reported vulnerability. + +
+ +#### Workarounds + +None. + +
+ +#### References + +* [CVE-2023-22809](https://nvd.nist.gov/vuln/detail/cve-2023-22809) + + +## August 4, 2022 - CVE-2022-1292 c_rehash script vulnerability in vSphere CSI pack - 9.8 CVSS + +On May 3 2022, OpenSSL published a security advisory disclosing a command injection vulnerability in the `c_rehash` script included with the OpenSSL library. Some operating systems automatically execute this script as a part of normal operations, which could allow an attacker to execute arbitrary commands with elevated privileges. + +Palette is not directly affected by this vulnerability. However, if your cluster profile is using the vSphere CSI pack, version v2.3 or below, it contains a vulnerable version of the `c_rehash` script. + + +
+ +#### Impact + +The `c_rehash` script does not sanitize shell metacharacters properly to prevent command injection. This script is distributed by some operating systems, and by extension, in container images, in a manner where it is automatically executed. On such operating systems, an attacker could execute arbitrary commands with the privileges of the script. + +
+ +#### Resolution + +This vulnerability has been addressed in the vSphere CSI pack greater than or equal to version v2.6. + +
+ +#### Workarounds + +Update cluster profiles using the vSphere CSI pack to version v2.6 or greater. Apply the updated cluster profile changes to all clusters consuming the cluster profile. + +
+ +#### References + +- [CVE-2022-1292](https://nvd.nist.gov/vuln/detail/CVE-2022-1292) \ No newline at end of file diff --git a/docs/docs-content/security/security-bulletins/security-bulletins.md b/docs/docs-content/security/security-bulletins/security-bulletins.md new file mode 100644 index 0000000000..19a1680fb5 --- /dev/null +++ b/docs/docs-content/security/security-bulletins/security-bulletins.md @@ -0,0 +1,35 @@ +--- +sidebar_label: "Security Bulletins" +title: "Security Bulletins" +description: "Palette Security bulletins for Common Vulnerabilities and Exposures (CVEs)." +icon: "" +hide_table_of_contents: false +tags: ["security", "cve"] +--- + + +The following are security advisories for Palette and other Spectro Cloud-related resources. + +Our security advisories follow the [CVSS standards](https://www.first.org/cvss/v3.1/specification-document#Qualitative-Severity-Rating-Scale). + +| Rating | CVSS Score | +|----------|------------| +| None | 0.0 | +| Low | 0.1- 3.9 | +| Medium | 4.0 - 6.9 | +| High | 7.0 - 8.9 | +| Critical | 9.0 - 10.0 | + + +You can review Common Vulnerabilities and Exposures (CVE) for Palette in [CVE Reports](cve-reports.md). An index of all Palette-related CVEs is availaable in the [CVE Index](cve-index.md). + + +## Resources + + +- [CVE Reports](cve-reports.md) + + +- [CVE Index](cve-index.md) + +
\ No newline at end of file diff --git a/docs/docs-content/security/security.md b/docs/docs-content/security/security.md new file mode 100644 index 0000000000..7b2fa45aee --- /dev/null +++ b/docs/docs-content/security/security.md @@ -0,0 +1,83 @@ +--- +sidebar_label: "Security" +title: "Spectro Cloud Security" +description: "Get an overview of Palette's security controls, security-aware culture, and where you can report any security issues." +hide_table_of_contents: false +sidebar_custom_props: + icon: "lock" +tags: ["security"] +--- + +We view security as more than a feature. It is a fundamental aspect of our business and culture through transparency, continuous learning, and a security-first mindset. + +By instilling a sense of collective responsibility for security, everyone at Spectro Cloud contributes to our overall security posture. Our dedication to security helps protect your interests and enhances the quality and reliability of our software and services. + +
+ +## Security-Aware Culture + +All Spectro Cloud employees receive mandatory security training. Our developers receive additional training focused on software security, as they are the first line of defense through secure coding practices, and they complete Open Worldwide Application Security Project (OWASP) Top 10 training to understand, identify and mitigate the most critical security risks and vulnerabilities that affect web applications. + +
+ +## Product Security + +Palette uses a micro services-based architecture, and we take steps to ensure each service is secured. Product functionality is broken down logically into isolated services within containers. Containers are deployed in a Kubernetes cluster, called a management cluster, that Palette hosts and manages in SaaS mode or that users can host and manage in a self-hosted environment. Learn more by reviewing [Secure Product Architecture](product-architecture). + +
+ +## Compliance & Standards + +We believe adherence to industry standards and regulations is critical to maintaining the highest levels of security for our customers. We ensure our software complies with all relevant laws and regulations, and we continuously evaluate and update our compliance efforts to stay current with emerging regulations and requirements. To learn about our product certifications, check out the [Compliance](/compliance) reference. + +
+ +## Transparency + +We list any Common Vulnerabilities and Exposure (CVE) issues that affect Palette or any part of its infrastructure in our [Security Bulletins](security-bulletins) along with the fix applied and any workarounds. + +
+ +## Report Security Issues + +Please contact our Security team at security@spectrocloud.com to report any security issues. + + +
+ +## Resources + + +- [Core Principles](core-principles.md) + + +- [Lifecycle](lifecycle/lifecycle.md) + + +- [Secure Development](lifecycle/secure-development.md) + + +- [Release Process](lifecycle/release-process.md) + + +- [Secure Product Architecture](product-architecture/product-architecture.md) + + +- [Platform Security](product-architecture/platform-security.md) + + +- [Data Encryption](product-architecture/data-encryption.md) + + +- [SaaS Operation](product-architecture/saas-operation.md) + + +- [Self-Hosted Operation](product-architecture/self-hosted-operation.md) + + +- [Tenant Cluster Security](product-architecture/tenant-cluster.md) + + +- [Security Bulletins](security-bulletins/security-bulletins.md) + + diff --git a/docs/docs-content/spectro-downloads.md b/docs/docs-content/spectro-downloads.md new file mode 100644 index 0000000000..3a41b65d95 --- /dev/null +++ b/docs/docs-content/spectro-downloads.md @@ -0,0 +1,87 @@ +--- +sidebar_label: "Downloads" +title: "Downloads" +description: "Overview of Palette downloads and their respective URL and checksums." +hide_table_of_contents: false +sidebar_position: 240 +sidebar_custom_props: + icon: "cloud-arrow-down" +tags: ["downloads"] +--- + + +The following Palette downloads are available: + + +## Self-Hosted + +You can deploy a self-hosted Palette to your environment. Refer to the [Self-Hosted Installation](/enterprise-version/) documentation for additional guidance on how to install Palette. Palette VerteX installation guide can be found in the [Palette VerteX install](/vertex/install-palette-vertex) document. + +
+ +:::caution + + +Starting with Palette 4.0.0, the Palette CLI, and the Helm Chart, are the only supported methods for installing Palette. The Palette OVA installation method is only available for versions 3.4 and earlier. Refer to the [Install Enterprise Cluster](/enterprise-version/deploying-an-enterprise-cluster), or the [Kubernetes Install Helm Chart](/enterprise-version#kubernetesinstallhelmchart) guides for additional guidance on how to install Palette. + +::: + +
+ +## SAAS - Private Cloud Gateway (PCG) + + +Palette supports on-prem environments through the Private Cloud Gateway (PCG) component. PCG provides support for isolated private cloud or data center environments. When installed on-prem, PCG registers itself with Palette, allowing for secure communication between the SaaS portal and the private cloud environment. The gateway also enables end-to-end lifecycle management of Kubernetes clusters in private cloud environments directly from the SaaS portal. + +
+ +:::caution + +Starting with Palette 4.0, the installation of PCG is done through the Palette CLI. Refer to the Palette CLI [PCG command](/palette-cli/commands/#pcg) document for installation guidance. + +::: + +
+ +### vSphere PCG Image + +|Version|URL| Checksum (SHA256) | +|---|---|---| +|1.8.0|https://software.spectrocloud.com/pcg/installer/v1.8.0/gateway-installer-v1.8.0.ova| `c860682c8e7dc55c6873ff1c5a0f337f91a74215b8cae92e4fa739b6ddc62720` | +|1.6.0|https://software.spectrocloud.com/pcg/installer/v1.6.0/gateway-installer-v1.6.0.ova| `2cf85c974e00524a2051be514484695ae51065af861bf1eb2c69aeb76816b0ff` | +|1.4.0|https://software.spectrocloud.com/pcg/installer/v1.4.0/gateway-installer-v1.4.0.ova| `67973c6ada136f64d9316dc05cda81d419997487c8007b6d58802bec12fb80dd` | +------ + +### MAAS PCG Image + +|Version|URL| Checksum (SHA256) | +|---|---|---| +|1.0.12|https://gcr.io/spectro-images-public/release/spectro-installer:1.0.12| `a229d2f7593d133a40c559aa0fb45feca8b0cd1b2fcebfe2379d76f60bfe038b`| +--------- + +### OpenStack PCG Image + +|Version|URL| Checksum (SHA256) | +|---|---|---| +|1.0.12|https://gcr.io/spectro-images-public/release/spectro-installer:1.0.12| `a229d2f7593d133a40c559aa0fb45feca8b0cd1b2fcebfe2379d76f60bfe038b`| +------- + + +## Palette CLI + +The Palette Command Line Interface (CLI) is a tool that you can use to interact with Palette programmatically. Check out the [Palette CLI](/palette-cli/install-palette-cli) document for installation guidance. + +|Version| Operating System | Checksum (SHA256) | +|---|---|---| +|4.0.2| [Linux-amd64](https://software.spectrocloud.com/palette-cli/v4.0.2/linux/cli/palette)| `01e6b9c73368319fe2855aedcf073526ab73b4ff635997257f8c10a11efd8f0c` | +|4.0.1| [Linux-amd64](https://software.spectrocloud.com/palette-cli/v4.0.1/linux/cli/palette)| `cd6b8fe35ded298fb5bdd0adcaea05774fcdcb62230430c6c8f915fa8464c49a` | +|4.0.0| [Linux-amd64](https://software.spectrocloud.com/palette-cli/v4.0.0/linux/cli/palette)| `44fe237d2dc8bec04e45878542339cbb5f279ed7374b5dfe6118c4cbe94132b4` | + + + +## Palette Edge CLI + +|Version| Operating System | Checksum (SHA256) | +|-------|---| --- | +|4.0.2 | [Linux-amd64](https://software.spectrocloud.com/stylus/v4.0.2/cli/linux/palette-edge) | `257d868b490979915619969815fd78aa5c7526faba374115f8d7c9d4987ba05d`| + diff --git a/docs/docs-content/system-profile.md b/docs/docs-content/system-profile.md new file mode 100644 index 0000000000..35d4a6b4d7 --- /dev/null +++ b/docs/docs-content/system-profile.md @@ -0,0 +1,76 @@ +--- +sidebar_label: "System Profiles" +title: "Understanding System Profiles" +description: "Understanding the System Profiles Concept and how they make Palette powerful" +hide_table_of_contents: false +sidebar_position: 50 +sidebar_custom_props: + icon: "bundles" +tags: ["system-profiles", "profiles"] +--- + + +System profiles provide a way to bootstrap an edge appliance with an initial set of virtual and containerized applications. Similar to [cluster profiles](/cluster-profiles), system profiles are templates created using one or more layers that are based on packs or helm charts. + +System profiles modeled on Palette UI should be downloaded and provided input to the edge system. Upon bootstrap, when the edge appliance registers back with the SaaS console, it links to the system profile. Any subsequent changes made to the profile after registration are propagated down to the edge appliance. +
+ +## Create a System Profile + +Here are the steps to create a system profile: + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Go to **Profiles**, open the **System Profile** tab, and click **Add System Profiles**. + + +3. Provide profile information such as system profile name, description (optional), and tags (optional) system profile. + + +4. Add one or more layers using one of the following methods: + + * **Add New Pack** - Add a Palette Pack from a pack registry or a [Helm Chart](registries-and-packs/helm-charts.md) from a chart registry. The public Spectro Cloud Pack registry and a few popular helm chart repositories are already available out of the box. Additional pack registries or public/private chart registries can be added to Palette. + * **Add Manifest** - Layers can be constructed using raw manifests to provide Kubernetes resources unavailable via Palette or Charts. In addition, pack Manifests provide a pass-through mechanism wherein additional Kubernetes resources can be orchestrated onto a cluster along with the rest of the stack. + + +5. Click the `Confirm and Create` button to save the configuration. + + +6. Click the `Next` button to review the information and `Finish` to create and save the system profile. +
+ +**Note:** Palette enables the [Export](cluster-profiles/cluster-profile-import-export.md#export-cluster-profile) and [Import](cluster-profiles/cluster-profile-import-export.md#import-cluster-profile) of System profiles across multiple environments, projects and tenants. + +## Download System Profile + +1. Login to Palette management console. + + +2. Go to ‘Profiles’ and open the ‘System Profile’. + + +3. Open an existing system profile. + + +4. Click the `Download System Profile` button at the bottom of the panel to download the profile definition as an archive (with extension `tgz`). +
+ + +## Sample Snapshots + +### System Profile dashboard + +![system-profile-1.png](/system-profile-1.png) + +### Add a new pack while creating the system profile + +![system-profile-2.png](/system-profile-2.png) + +### Add a new manifest while creating the system profile + +![system-profile-3.png](/system-profile-3.png) + +### Download system profile + +![system-profile-4.png](/system-profile-4.png) diff --git a/docs/docs-content/tenant-settings/_category_.json b/docs/docs-content/tenant-settings/_category_.json new file mode 100644 index 0000000000..e71bd72817 --- /dev/null +++ b/docs/docs-content/tenant-settings/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 170 +} diff --git a/docs/docs-content/tenant-settings/login-banner.md b/docs/docs-content/tenant-settings/login-banner.md new file mode 100644 index 0000000000..87ce02c30d --- /dev/null +++ b/docs/docs-content/tenant-settings/login-banner.md @@ -0,0 +1,85 @@ +--- +sidebar_label: "Login Banner" +title: "Login Banner" +description: "Learn how to set a login banner for your Palette tenant." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["tenant-administration"] +--- + + +You can set up a login banner message that all users must acknowledge and accept before they log in to Palette. The message is limited to 1300 characters, and only plain text is supported. + +
+ + +:::caution + +The login banner message is only accessible when users attempt to log in to Palette through the tenant URL. Using the default Palette SaaS login URL of `https://console.spectrocloud.com` will not display the login banner message. Users of self-hosted Palette use the tenant URL defined during the Palette installation. + +::: + + + +## Prerequisite + +* Tenant admin access. + + +## Set Up Login Banner + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and select **Tenant Settings**. + + +3. Next, click on **Platform Settings** from the **Tenant Settings Menu**. + + +4. Toggle the **Display Login Banner** button. + + +5. Fill out the text box with the message you want all Palette users in your tenant to acknowledge before a login. + + +
+ + ![A view of the tenant settings platform page with an example login banner message.](/tenant-settings_login-banner_settings-page-view.png) + + +
+ +6. Select **Save Message** to save your changes. + +
+ + + + +## Validate + +You can validate the banner message is set up correctly by using the following steps. + + +1. Log out of [Palette](https://console.spectrocloud.com). + + +2. From your web browser, navigate to the Palette tenant URL for your organization. + +
+ + :::info + + For Palette SaaS, the tenant URL is prefixed with your tenant name. For example, the tenant `spectrodocs` has the URL `spectrodocs-spectrocloud.console.spectrocloud.com`. Users of self-hosted instances of Palette should use the tenant URL defined during the Palette installation. + + ::: + + +3. Acknowledge the login banner message. + +
+ + ![A view of a tenant login banner message](/tenant-settings_login-banner_tenant-banner-view.png) + diff --git a/docs/docs-content/tenant-settings/tenant-settings.md b/docs/docs-content/tenant-settings/tenant-settings.md new file mode 100644 index 0000000000..b0c442c9c7 --- /dev/null +++ b/docs/docs-content/tenant-settings/tenant-settings.md @@ -0,0 +1,33 @@ +--- +sidebar_label: "Tenant Administration" +title: "Tenant Administration" +description: "Familiarize yourself with the available Tenant settings and how you can control the behavior of your tenant." +hide_table_of_contents: false +sidebar_custom_props: + icon: "gears" +tags: ["tenant-administration"] +--- + + + + +In Palette, the *tenant admin* role can access tenant settings. This role is the equivalent of a system administrator. As a tenant admin, you can configure Palette and its features to behave in a manner that best fits your organization and its users. + +
+ +:::info + +To learn more about the permissions and privileges available to the tenant role, refer to the [Tenant Scope Roles and Permissions](../user-management/palette-rbac/tenant-scope-roles-permissions.md) reference page. + +::: + + +Use the following resources to become familiar with the available tenant settings and how to change the tenant settings. + + +## Resources + +- [Login Banner](login-banner.md) + + +
\ No newline at end of file diff --git a/docs/docs-content/terraform.md b/docs/docs-content/terraform.md new file mode 100644 index 0000000000..70041bf2bb --- /dev/null +++ b/docs/docs-content/terraform.md @@ -0,0 +1,52 @@ +--- +sidebar_label: "Palette Terraform Support" +title: "Palette Terraform Support" +description: "Understanding, installing and operating Spectro Cloud's Terraform Provider." +hide_table_of_contents: false +sidebar_position: 200 +sidebar_custom_props: + icon: "terraform" +tags: ["terraform"] +--- + + + +Palette supports the open-source Infrastructure as Code (IaC) software tool, [Terraform](https://www.terraform.io/), to provide consistent CLI workflow support to multiple cloud services. + +Terraform organizes cloud APIs into declarative, configuration files. Terraform supports the ability to write configuration files, checks whether the execution plan for a configuration matches your expectations (before deployment), and applies the changes to all the managed resources. + +## Spectro Cloud Provider + +Spectro Cloud Palette's SaaS and On-Premise management API can be used with the Spectro Cloud Terraform provider. The provider is available in the HashiCorp Terraform registry as [Spectro Cloud Provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs). +
+ +### Release Notes +Information about the latest changes in the Spectro Cloud provider can be found in the [release notes](https://github.com/spectrocloud/terraform-provider-spectrocloud/releases). +
+ +### Provider Documentation +Detailed documentation on supported data sources and resources are available in the Terraform Spectro Cloud Provider [documentation](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) page. +
+ +## Prerequisites +The Spectro Cloud provider has the following requirements: +* Spectro Cloud Palette account - [Sign up for a free trial account](https://www.spectrocloud.com/free-trial) +* Terraform (minimum version 0.13+) +* Kubernetes/kubectl CLI (minimum version 1.16+) +
+ +## Usage + +For an end-to-end cluster provisioning example, check out the [end-to-end examples](https://github.com/spectrocloud/terraform-provider-spectrocloud/tree/main/examples/e2e). + +You can find resource examples in the [resource directory](https://registry.terraform.io/providers/spectrocloud/spectrocloud). + + +## Modules + +There are two modules available to help you provision Spectro Cloud infrastructure resources. + +- [Palette Edge Native Terraform Module](https://registry.terraform.io/modules/spectrocloud/edge/spectrocloud/latest) +- [Spectro Cloud Terraform Modules](https://registry.terraform.io/modules/spectrocloud/modules/spectrocloud/latest) + +Review the [Spectro Cloud modules readme](https://github.com/spectrocloud/terraform-spectrocloud-modules#module-resources--requirements) document to learn more about supported provider versions and other requirements. diff --git a/docs/docs-content/troubleshooting/_category_.json b/docs/docs-content/troubleshooting/_category_.json new file mode 100644 index 0000000000..6374007d98 --- /dev/null +++ b/docs/docs-content/troubleshooting/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 210 +} diff --git a/docs/docs-content/troubleshooting/cluster-deployment.md b/docs/docs-content/troubleshooting/cluster-deployment.md new file mode 100644 index 0000000000..6ac9a30ce6 --- /dev/null +++ b/docs/docs-content/troubleshooting/cluster-deployment.md @@ -0,0 +1,150 @@ +--- +sidebar_label: "Cluster Deployment" +title: "Troubleshooting steps for errors during a cluster deployment" +description: "Troubleshooting steps for errors during a cluster deployment." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["troubleshooting", "cluster-deployment"] +--- + + + + + +# Cluster Deployment Errors Scenarios + +The following steps will help you troubleshoot errors in the event issues arise while deploying a cluster. + + +## Scenario - Instances Continuously Delete Every 30 Minutes + +An instance is launched and terminated every 30 minutes prior to completion of its deployment, and the **Events Tab** lists errors with the following message: + +
+ +```hideClipboard bash +Failed to update kubeadmControlPlane Connection timeout connecting to Kubernetes Endpoint +``` + +This behavior can occur when Kubernetes services for the launched instance fail to start properly. +Common reasons for why a service may fail are: + +- The specified image could not be pulled from the image repository. +- The cloud init process failed. + +### Debug Steps + +1. Initiate an SSH session with the Kubernetes instance using the SSH key provided during provisioning, and log in as user `spectro`. If you are initiating an SSH session into an installer instance, log in as user `ubuntu`. + + ```shell + ssh --identity_file <_pathToYourSSHkey_> spectro@X.X.X.X + ``` + +2. Elevate the user access. + + ```shell + sudo -i + ``` +3. Verify the Kubelet service is operational. + ```shell + systemctl status kubelet.service + ``` + +4. If the Kubelet service does not work as expected, do the following. If the service operates correctly, you can skip this step. + 1. Navigate to the **/var/log/** folder. + ```shell + cd /var/log/ + ``` + 2. Scan the **cloud-init-output** file for any errors. Take note of any errors and address them. + ``` + cat cloud-init-output.log + ``` + +5. If the kubelet service works as expected, do the following. + - Export the kubeconfig file. + + ```shell + export KUBECONFIG=/etc/kubernetes/admin.conf + ``` + - Connect with the cluster's Kubernetes API. + + ```shell + kubectl get pods --all-namespaces + ``` + - When the connection is established, verify the pods are in a *Running* state. Take note of any pods that are not in *Running* state. + + ```shell + kubectl get pods -o wide + ``` + + - If all the pods are operating correctly, verify their connection with the Palette API. + - For clusters using Gateway, verify the connection between the Installer and Gateway instance: + ```shell + curl -k https://:6443 + ``` + - For Public Clouds that do not use Gateway, verify the connection between the public Internet and the Kube endpoint: + ```shell + curl -k https://:6443 + ``` + + :::info + You can obtain the URL for the Kubernetes API using this command: kubectl cluster-info + ::: + +6. Check stdout for errors. You can also open a support ticket. Visit our [support page](http://support.spectrocloud.io/). + + +## Gateway Installer Registration Failures + +There are a couple reasons the Gateway Installer might fail: + +- A bootstrap error might have occurred. When the Gateway Installer VM is powered on, it initiates a bootstrap process and registers itself with the tenant portal. This process typically takes 5 to 10 minutes. If the installer fails to register with the tenant portal during this time, it indicates a bootstrapping error. + + To address the issue, SSH into the Installer virtual machine using the key provided during OVA import and inspect the log file located at *'/var/log/cloud-init-output.log'*. + + The log file contains error messages about any failures that occur while connecting to the Spectro Cloud management platform portal, authenticating, or downloading installation artifacts. + + A common cause for these errors is that the Spectro Cloud management platform console endpoint or the pairing code is typed incorrectly. Ensure that the tenant portal console endpoint does not have a trailing slash. If these properties were incorrectly specified, power down and delete the installer VM and re-launch with the correct values. + + +- The VM may not have an outbound connection. The Gateway Installer VM requires outbound connectivity directly or using a proxy. Adjust proxy settings, if applicable, to fix the connectivity or power down and delete the Installer VM, then relaunch it in a network that enables outbound connections. + +If these steps do not resolve the Gateway Installer issues, copy the following script to the Installer VM to generate a logs archive. Open a support ticket by visiting our [support page](http://support.spectrocloud.io/). Attach the logs archive so the Spectro Cloud Support team can troubleshoot the issue and provide further guidance: + +```bash +#!/bin/bash + +DESTDIR="/tmp/" + +CONTAINER_LOGS_DIR="/var/log/containers/" +CLOUD_INIT_OUTPUT_LOG="/var/log/cloud-init-output.log" +CLOUD_INIT_LOG="/var/log/cloud-init.log" +KERN_LOG="/var/log/kern.log" +KUBELET_LOG="/tmp/kubelet.log" +SYSLOGS="/var/log/syslog*" + +FILENAME=spectro-logs-$(date +%-Y%-m%-d)-$(date +%-HH%-MM%-SS).tgz + + +journalctl -u kubelet > $KUBELET_LOG + +tar --create --gzip -h --file=$DESTDIR$FILENAME $CONTAINER_LOGS_DIR $CLOUD_INIT_LOG $CLOUD_INIT_OUTPUT_LOG $KERN_LOG $KUBELET_LOG $SYSLOGS + +retVal=$? +if [ $retVal -eq 1 ]; then + echo "Error creating spectro logs package" +else + echo "Successfully extracted spectro cloud logs: $DESTDIR$FILENAME" +fi +``` + +## Gateway Cluster Provisioning Failures + +Installation of the Gateway cluster may run into errors or get stuck in the provisioning state for various reasons like lack of infrastructure resources, lack of availability of IP addresses, inability to perform NTP sync, etc. + +While these are the most common failures, some other issues might be related to the underlying VMware environment. The **Cluster Details** page, which you can access by clicking anywhere on the Gateway widget, contains details of every orchestration step, including an indication of the current task. + +Intermittent errors are displayed on the **Cluster Details** page next to the relevant orchestration task. The **Events** tab on this page also provides helpful insights into lower-level operations currently being performed. Suppose you believe the orchestration is stuck or failed due to an invalid selection of infrastructure resources or an intermittent problem with the infrastructure. You may reset the Gateway by clicking on the **Reset** button on the Gateway widget. The Gateway state will transition to Pending. A Gateway in the Pending state allows you to reconfigure the Gateway and start provisioning a new Gateway cluster. If the problem persists, don't hesitate to contact Spectro support via the Service Desk or our [support page](http://support.spectrocloud.io/). + +
\ No newline at end of file diff --git a/docs/docs-content/troubleshooting/edge.mdx b/docs/docs-content/troubleshooting/edge.mdx new file mode 100644 index 0000000000..e52c6b005d --- /dev/null +++ b/docs/docs-content/troubleshooting/edge.mdx @@ -0,0 +1,35 @@ +--- +sidebar_label: "Edge" +title: "Edge" +description: "Troubleshooting steps for common Edge scenarios." +icon: "" +hide_table_of_contents: false +sidebar_position: 40 +tags: ["edge", "troubleshooting"] +--- + +# Edge + +The following are common scenarios that you may encounter when using Edge. + +# Scenario - Override or Reconfigure Read-only File System Stage + +If you need to override or reconfigure the read-only file system, you can do so using the following steps. + +## Debug Steps + +
+ +1. Power on the Edge host. + +2. Press the keyboard key `E` after highlighting the menu in `grubmenu`. + +3. Type `rd.cos.debugrw` and press `Enter`. + +![The grubmenu displays with the command rd.cos.debugrw typed in the terminal.](/troubleshooting_edge_grub-menu.png) + +4. Press `Ctrl+X` to boot the system. + +5. Make the required changes to the image. + +6. Reboot the system to resume the default read-only file system. diff --git a/docs/docs-content/troubleshooting/kubernetes-tips.md b/docs/docs-content/troubleshooting/kubernetes-tips.md new file mode 100644 index 0000000000..c9fd5c4828 --- /dev/null +++ b/docs/docs-content/troubleshooting/kubernetes-tips.md @@ -0,0 +1,26 @@ +--- +sidebar_label: "Kubernetes Debugging" +title: "Kubernetes Debugging Tips" +description: "Learn tips and tricks related to Kubernetes dubbging." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["k8s-tips", "troubleshooting"] +--- + + + +# Kubernetes Debug + +Spectro Cloud provisions standard, upstream Kubernetes clusters using `kubeadm` and `cluster-api`. The official Kubernetes documentation related to support and troubleshooting are great troubleshooting resources that you should also consider reviewing. The official Kubernetes [debugging guide](https://kubernetes.io/docs/tasks/debug-application-cluster/debug-cluster) is about cluster troubleshooting and offers excellent advice on how to resolve common issues that may arise. + + +## Log Tips + +The table below displays useful Kubernetes log types that can aid you in the debugging process. The [Kubernetes Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) page is a good resource you can review to help gain a better understanding of the logging architecture. + +| **Log Type** | **Access Method** | +|----------|---------------| +|Kubelet |`journalctl -u kubelet`| +|Container | `kubectl logs` OR `/var/log/containers` and `/var/log/pods` | +| Previous Container| `kubectl logs --previous` \ No newline at end of file diff --git a/docs/docs-content/troubleshooting/nodes.md b/docs/docs-content/troubleshooting/nodes.md new file mode 100644 index 0000000000..82a20d0a9c --- /dev/null +++ b/docs/docs-content/troubleshooting/nodes.md @@ -0,0 +1,94 @@ +--- +sidebar_label: "Nodes & Clusters" +title: "Nodes and Clusters" +description: "Troubleshooting steps for Kubernetes nodes and clusters when managed by Palette." +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["troubleshooting", "nodes", "clusters"] +--- + +This page covers common debugging scenarios for nodes and clusters after they have been deployed. + +## Nodes + +## Scenario - Repaved Nodes + +Palette performs a rolling upgrade on nodes when it detects a change in the `kubeadm` config. Below are some actions that cause the `kubeadm` configuration to change and result in nodes being upgraded: + +* OS layer changes +* Kubernetes layer changes +* Kubernetes version upgrade +* Kubernetes control plane upsize +* Machine pool updates for disk size +* Changes in availability zones +* Changes in instance types +* Certificate renewal + +Logs are provided in Palette for traceability. However, these logs may be lost when the pods are relaunched. To ensure that the cause and context is persisted across repaving, refer to the `status.upgrades: []` field in the in the `SpectroCluster` object in the `/v1/dashboard/spectroclusters/:uid/overview` API. + +The following example shows the `status.upgrades` field for a cluster that had Kubernetes configuration changes that resulted in a node repave. The API payload is incomplete for brevity. + +```json hideClipboard +"upgrades": [ + { + "reason": [ + "{v1beta1.KubeadmConfigSpec}.ClusterConfiguration.APIServer.ControlPlaneComponent.ExtraArgs[\"oidc-client-id\"] changed from to xxxxxxxxxxx", + "{v1beta1.KubeadmConfigSpec}.ClusterConfiguration.APIServer.ControlPlaneComponent.ExtraArgs[\"oidc-groups-claim\"] changed from to groups", + "{v1beta1.KubeadmConfigSpec}.ClusterConfiguration.APIServer.ControlPlaneComponent.ExtraArgs[\"oidc-issuer-url\"] changed from to https://console.spectrocloud.com/v1/oidc/tenant/XXXXXXXXXXXX", + "{v1beta1.KubeadmConfigSpec}.ClusterConfiguration.APIServer.ControlPlaneComponent.ExtraArgs[\"oidc-username-claim\"] changed from to email" + ], + "timestamp": "2023-09-18T19:49:33.000Z" + } +] +``` + + +For detailed information, review the cluster upgrades [page](../clusters/clusters.md). + +
+ +## Clusters + +## Scenario - vSphere Cluster and Stale ARP Table + +Sometimes vSphere clusters encounter issues where nodes with an assigned Virtual IP Address (VIP) cannot contact the node with a VIP. The problem is caused by Address Resolution Protocol (ARP) entries becoming stale on non-VIP nodes. + +To minimize this situation, vSphere clusters deployed through Palette now have a daemon set that cleans the ARP entry cache every five minutes. The cleaning process forces the nodes to periodically re-request an ARP entry of the VIP node. This is done automatically without any user action. + +You can verify the cleaning process by issuing the following command on non-VIP nodes and observing that the ARP cache is never older than 300 seconds. + +
+ +```shell +watch ip -statistics neighbour +``` + + +## EKS Cluster Worker Pool Failures + +If your EKS cluster worker pool ends up in `Failed`, `Create Failed` or `Error nodes failed to join` state, refer to the Amazon EKS [Runbook](https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-awssupport-troubleshooteksworkernode.html +) for troubleshooting guidance. + +
+ +## Palette Agents Workload Payload Size Issue + + +A cluster comprised of many nodes can create a situation where the workload report data the agent sends to Palette exceeds the 1 MB threshold and fails to deliver the messages. If the agent encounters too many workload report deliveries, the agent container may transition into a *CrashLoopBackOff* state. + +If you encounter this scenario, you can configure the cluster to stop sending workload reports to Palette. To disable the workload report feature, create a *configMap* with the following configuration. Use a cluster profile manifest layer to create the configMap. + +
+ +```shell +apiVersion: v1 +kind: ConfigMap +metadata: + name: palette-agent-config + namespace: "cluster-{{ .spectro.system.cluster.uid }}" +data: + feature.workloads: disable +``` + +
\ No newline at end of file diff --git a/docs/docs-content/troubleshooting/pack-issues.md b/docs/docs-content/troubleshooting/pack-issues.md new file mode 100644 index 0000000000..468e5457d9 --- /dev/null +++ b/docs/docs-content/troubleshooting/pack-issues.md @@ -0,0 +1,18 @@ +--- +sidebar_label: "Packs" +title: "Troubleshooting steps for errors during a cluster deployment" +description: "Troubleshooting steps for errors during a cluster deployment." +icon: "" +hide_table_of_contents: false +sidebar_position: 30 +tags: ["troubleshooting", "packs"] +--- + + + + +# Packs + +Packs documentation contains usage and other related documentation, such as troubleshooting steps. If any issue is encountered with Pack, visit the respective Pack documentation for troubleshooting steps. + +
diff --git a/docs/docs-content/troubleshooting/palette-dev-engine.md b/docs/docs-content/troubleshooting/palette-dev-engine.md new file mode 100644 index 0000000000..6ad23ed3c7 --- /dev/null +++ b/docs/docs-content/troubleshooting/palette-dev-engine.md @@ -0,0 +1,38 @@ +--- +sidebar_label: "Palette Dev Engine" +title: "Palette Dev Engine" +description: "Troubleshooting steps for errors encountered with Palette Dev Engine." +icon: "" +hide_table_of_contents: false +sidebar_position: 40 +tags: ["troubleshooting", "pde", "app mode"] +--- + + + + +# Palette Dev Engine (PDE) + +Use the following content to help you troubleshoot issues you may encounter when using Palette Dev Engine (PDE). + +
+ + +## Resource Requests + +All [Cluster Groups](../clusters/cluster-groups/cluster-groups.md) are configured with a default [*LimitRange*](https://kubernetes.io/docs/concepts/policy/limit-range/). The LimitRange configuration is in the Cluster Group's Virtual Cluster configuration section. Packs deployed to a virtual cluster should have the `resources:` section defined in the **values.yaml** file. Pack authors must specify the `requests` and `limits` or omit the section entirely to let the system manage the resources. + + +If you specify `requests` but not `limits`, the default limits imposed by the LimitRange will likely be lower than the requests, causing the following error. + +
+ +```hideClipboard shell +Invalid value: "300m": must be less than or equal to CPU limit spec.containers[0].resources.requests: Invalid value: "512Mi": must be less than or equal to memory limit +``` +
+ +The workaround is to define both the `requests` and `limits`. + + +
\ No newline at end of file diff --git a/docs/docs-content/troubleshooting/palette-upgrade.md b/docs/docs-content/troubleshooting/palette-upgrade.md new file mode 100644 index 0000000000..9c35d2a4cb --- /dev/null +++ b/docs/docs-content/troubleshooting/palette-upgrade.md @@ -0,0 +1,57 @@ +--- +sidebar_label: "Palette Upgrade" +title: "Palette Upgrade" +description: "Troubleshooting steps for errors encountered with upgrade actions." +icon: "" +hide_table_of_contents: false +sidebar_position: 60 +tags: ["troubleshooting", "palette-upgrade"] +--- + + + + +# Palette Upgrades + +We recommend you review the [Release Notes](../release-notes.md) and the [Upgrade Notes](../enterprise-version/upgrade.md) before attempting to upgrade Palette. Use this information to address common issues that may occur during an upgrade. + + + +## Ingress Errors + +If you receive the following error message when attempting to upgrade to Palette versions greater than Palette 3.4.X in a Kubernetes environment, use the debugging steps to address the issue. + +
+ +```hideClipboard text +Error: UPGRADE FAILED: failed to create resource: admission webhook "validate.nginx.ingress.kubernetes.io" denied the request: host "_" and path "/v1/oidc" is already defined in ingress default/hubble-auth-oidc-ingress-resource +``` + + +## Debug Steps + +1. Connect to the cluster using the cluster's kubeconfig file. Refer to the [Access Cluster with CLI](../clusters/cluster-management/palette-webctl.md) for additional guidance. + + + +2. Identify all Ingress resources that belong to *Hubble* - an internal Palette component. + +
+ + ```shell + kubectl get ingress --namespace default + ``` + +3. Remove each Ingress resource listed in the output that starts with the name Hubble. Use the following command to delete an Ingress resource. Replace `REPLACE_ME` with the name of the Ingress resource you are removing. + +
+ + ```shell + kubectl delete ingress --namespace default + ``` + + +4. Restart the upgrade process. + +
+ diff --git a/docs/docs-content/troubleshooting/pcg.md b/docs/docs-content/troubleshooting/pcg.md new file mode 100644 index 0000000000..d9f6d75120 --- /dev/null +++ b/docs/docs-content/troubleshooting/pcg.md @@ -0,0 +1,401 @@ +--- +sidebar_label: "Private Cloud Gateway" +title: "Private Cloud Gateway" +description: "Troubleshooting steps for deploying a Private Cloud Gateway." +icon: "" +hide_table_of_contents: false +sidebar_position: 50 +tags: ["troubleshooting", "pcg"] +--- + + +# Private Cloud Gateway + +When you deploy a Kubernetes cluster in a private data center environment, you must already have a Private Cloud Gateway (PCG) cluster deployed in the data center environment. A PCG enables secure communication between Palette and the private data center environment. + +The following are the high-level steps of deploying a PCG in a private data center environment: +
+ +1. Initiate the installation in Palette. In this step, you get a pairing code and an installer image. +2. Deploy the PCG installer in the data center environment. +3. Configure the cloud gateway in Palette, and launch the PCG cluster. + +While deploying a PCG, you may encounter one of the following scenarios during the above-mentioned steps. Some scenarios below apply to all data center environments, whereas others apply to a specific data center environment, such as VMware. Each scenario covers a specific problem, including an overview, possible causes, and debugging steps. +
+ +## Scenario - Jet CrashLoopBackOff + +After you finish configuring the PCG in Palette, Palette starts provisioning the PCG cluster. During the provisioning, one of the internal Palette components may undergo a *CrashLoopBackOff* state. + +The internal component, *Jet*, will transition to a healthy state once the PCG cluster is successfully registered with Palette. +
+ +## Debug Steps + +Wait 10-15 minutes for the PCG installation to finish so that the internal component receives the required authorization token from Palette. Once the internal component is authorized, the PCG cluster will complete the initialization successfully. +
+ +## Scenario - PCG Installer VM Unable to Register With Palette + +When deploying the PCG installer in VMware vSphere, you use an OVF template and then power on the PCG installer Virtual Machine (VM). After powering it on, the PCG installer goes through a bootstrap process and attempts to register with Palette. This process typically takes between five to ten minutes. + +If the installer fails to register with Palette within the expected timeframe, it could indicate a bootstrapping error. The error can occur due to network connectivity issues, incorrect pairing code, or an incorrect endpoint configuration for Palette in the PCG installer template settings. +
+ +## Debug Steps + +
+ +1. SSH into the PCG installer VM using the username `ubuntu` and the SSH key you provided during the OVA import. + + +2. Inspect the log file located at **/var/log/cloud-init-output.log**. + +
+ + ```bash + cat /var/log/cloud-init-output.log + ``` + + The **cloud-init-output.log** file will contain error messages if there are failures with connecting to Palette, authenticating, or downloading installation artifacts. A common cause for these errors is incorrect values provided to the OVF template deployment wizard, such as the Palette endpoint or a mistyped pairing code. + + The screenshot below highlights the OVF template properties you must carefully configure and verify before deploying a PCG installer VM. + + ![A screenshot displaying the OVF template properties you configure while deploying the PCG installer VM](/troubleshooting-pcg-template_properties.png) + + +3. Double-check the accuracy of the pairing code used for the PCG installer VM. A pairing code is a unique authentication code Palette generates for each PCG installer instance. Confirm that it matches the value you copied from Palette. + + +4. Ensure the Palette endpoint is correct and has no trailing slash `/`. If you use Palette SaaS, the default endpoint is `https://console.spectrocloud.com`. If you are using a self-hosted Palette instance, use the domain name as applicable to you. If the Palette endpoint is incorrectly specified, relaunch a new PCG installer VM with the correct values. + + +5. Another potential issue may be a lack of outbound connectivity from the PCG installer VM to Palette. The installer VM needs to have outbound connectivity directly or via a proxy to download the installation artifacts from Spectro Cloud. Check for any network restrictions or firewall rules in the network settings that may block communication. Adjust the proxy settings, if applicable, to fix the connectivity. Alternatively, you can relaunch a new PCG installer VM in a network that supports outbound connections to Palette. + + +6. If the problem persists, issue the following command in the PCG installer VM to create a script to generate a log bundle. +
+ + ``` bash + cat > pcg-debug.sh << 'EOF' + #!/bin/bash + DESTDIR="/tmp/" + CONTAINER_LOGS_DIR="/var/log/containers/" + CLOUD_INIT_OUTPUT_LOG="/var/log/cloud-init-output.log" + CLOUD_INIT_LOG="/var/log/cloud-init.log" + KERN_LOG="/var/log/kern.log" + KUBELET_LOG="/tmp/kubelet.log" + SYSLOGS="/var/log/syslog*" + FILENAME=spectro-logs-$(date +%-Y%-m%-d)-$(date +%-HH%-MM%-SS).tgz + journalctl -u kubelet > $KUBELET_LOG + tar --create --gzip -h --file=$DESTDIR$FILENAME $CONTAINER_LOGS_DIR $CLOUD_INIT_LOG $CLOUD_INIT_OUTPUT_LOG $KERN_LOG $KUBELET_LOG $SYSLOGS + retVal=$? + if [ $retVal -eq 1 ]; then + echo "Error creating spectro logs package" + else + echo "Successfully extracted spectro cloud logs: $DESTDIR$FILENAME" + fi + EOF + ``` + + + +7. Start the script to generate a log archive. By default, the script places the log archive in the **/tmp/** folder. The log archive file name starts with the prefix **spectro-logs-** followed by a timestamp value. +
+ + ```shell + chmod +x pcg-debug.sh && ./pcg-debug.sh + ``` + + +8. Contact our support team by emailing [support@spectrocloud.com](mailto:support@spectrocloud.com) and attach the logs archive to the ticket so the support team can troubleshoot the issue and provide you with further guidance. + +
+ +## Scenario - PCG Installer VM IP Address Assignment Error + +When deploying the PCG installer in VMware vSphere, you use an OVF template and then power on the PCG installer VM. After powering it on, the PCG installer VM may fail to get an IP address. + +If the PCG installer VM fails to get an IP address assigned, it implies a networking error or an incomplete cloud-init. The selected IP allocation scheme specified in the network settings of the PCG installer OVF template assigns an IP address to the PCG installer VM. The IP allocation scheme offers two options - static IP or DHCP. You must check the selected IP allocation scheme for troubleshooting. +
+ +## Debug Steps +
+ +1. If you chose the static IP allocation scheme, ensure you have correctly provided the values for the gateway IP address, DNS addresses, and static IP subnet prefix. Check that the subnet prefix you provided allows the creation of an IP pool with sufficient IP addresses to allocate to the new PCG installer VM. + + +2. If you chose the DHCP allocation scheme, check that the DHCP service is available on the DHCP server. Restart the service if it's not in an active state. + + +3. If the DHCP server is active, recheck the DHCP scope and the DHCP reservations. The DHCP scope defines the range of IP addresses that the DHCP server allocates on the selected network. You must have sufficient IP addresses from the DHCP scope for dynamic allocation. + + +4. If you chose the DHCP allocation scheme, ensure Dynamic DNS is enabled in the DHCP server. A Dynamic DNS is only required if you are using DHCP. Dynamic DNS is not required for a static IP allocation scheme. + + +5. If there are no network-related issues, SSH into the PCG installer VM using the username `ubuntu` and the SSH public key you provided during the OVA import step. Alternatively, you can open the web console of the PCG installer VM. + + +6. Inspect the log files in the **/var/log** directory. + + +7. Examine the cloud-init logs for potential errors or warnings related to the IP address assignment. + + +8. If the problem persists, email the log files to our support team at [support@spectrocloud.com](mailto:support@spectrocloud.com). + +
+ +## Scenario - PCG Installer Deployment Failed + +When deploying the PCG installer in VMware, you deploy the OVF template and power on the PCG installer VM. If the VM instance is supposed to receive a public IP address and the deployment fails, you cannot configure the cloud gateway in Palette. + +The PCG installer deployment can fail due to internet connectivity or internal misconfigurations, such as an incorrect pairing code. +
+ +## Debug Steps + +If the PCG installer VM has a public IP address assigned, you can access the PCG installer's deployment status and system logs from the monitoring console. Follow the steps below to review the deployment status and logs. +
+ +1. Open a web browser on your local machine and visit the `https://[IP-ADDRESS]:5080` URL. Replace the `[IP-ADDRESS]` placeholder with your PCG installer VM's public IP address. + + +2. Provide the username and password when prompted. You can use the default installation credentials: + - username: admin + - password: admin + + +3. Once you are logged in, review the PCG installer's deployment status, system logs, and diagnostic tasks, as highlighted in the screenshot below. The monitoring console allows you to check the high-level status and download the individual log files. + + ![A screenshot of the monitoring console of the PCG installer.](/troubleshooting-pcg-monitoring_console.png) + + +4. If any of the statuses is not **Done** after waiting for a while, download the concerned logs. The screenshot below displays the **Logs** tab in the monitoring console. + + ![A screenshot of the logs in the monitoring console of the PCG installer.](/troubleshooting-pcg-monitoring_logs.png) + + +5. Examine the log files for potential errors and root causes. + + +6. Check if the deployment failed due to a lack of outbound internet connectivity from the PCG installer VM. Use the following steps to check outbound internet connectivity: + - SSH into the PCG installer VM using the username `ubuntu` and the public SSH key you provided during the OVA import. + - Use the ping command to check if the VM can reach a public IP address. For example, ping well-known public IPs like Google's public DNS server (8.8.8.8) or any other public IP address. +
+ + ```bash + ping 8.8.8.8 + ``` + - If you receive responses from the ping requests, it indicates that the VM has outbound internet connectivity. + - Suppose you do not receive a response from the ping requests. In that case, go to the next step for further troubleshooting steps. + + +7. Check for any network restrictions or firewall rules in the data center's network settings that may block communication. Adjust the proxy settings, if applicable, to fix the connectivity. Alternatively, you can power down and delete the PCG installer VM and relaunch a new one in a network that supports outbound internet connections. + + +8. If the problem persists, email the log files to our support team at [support@spectrocloud.com](mailto:support@spectrocloud.com). +
+ +## Scenario - PCG Cluster Provisioning Stalled or Failed + +After you finish configuring the cloud gateway in Palette, the PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. + +However, if the PCG cluster provisioning gets stuck, it could hint at incorrect cloud gateway configurations, unavailable IP addresses for the worker nodes, or the inability to perform a Network Time Protocol (NTP) sync. +
+ +## Debug Steps +
+ +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the left **Main Menu** and select **Tenant Settings**. From the **Tenant settings** menu, select **Private Cloud Gateways**. + + +3. Click on the newly provisioned PCG cluster to review its details. + + +4. Click on the **Events** tab. + + +5. Examine all events in the **Events** tab to identify specific errors or issues. Each event will have a status, timestamp, associated service name, and orchestration details. + + +6. If you encounter one of the following error events - `Failed to deploy image: Failed to create govomiClient` or `No route to host`, refer to the remediation steps outlined in the [Scenario - Failed to Deploy Image](#scenario---failed-to-deploy-image) or the [Scenario - No Route to the Kubernetes API Server](#scenario---no-route-to-the-kubernetes-api-server) section, respectively. + + +7. If you encounter errors other than the ones mentioned in the previous step, it is possible that the cluster configuration or the DNS settings are not set correctly. You can review and edit the cluster configuration in the cluster settings. The screenshot below highlights the cluster configuration section in the cluster settings blade. + ![A screenshot highlighting the cluster configuration section in the cluster settings blade.](/troubleshooting-pcg-cluster_settings.png) + + +8. If the cluster settings look correct, ensure the search domain is correctly defined in the fault domain's DNS settings. The screenshot below highlights how you can review and edit the DNS mapping of an existing PCG cluster. + ![A screenshot highlighting the DNS mapping settings.](/troubleshooting-pcg-dns.png) + + +9. If the problem persists, download the cluster logs from Palette. The screenshot below will help you locate the button to download logs from the cluster details page. + + ![A screenshot highlighting how to download the cluster logs from Palette.](/troubleshooting-pcg-download_logs.png) + + +10. Share the logs with our support team at [support@spectrocloud.com](mailto:support@spectrocloud.com). +
+ + +## Scenario - No Progress After Creating the Container Manager +After you finish configuring the cloud gateway in Palette, Palette starts provisioning the PCG cluster. The PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. You can navigate to the cluster details page and review the progressive events in the **Events** tab while the cluster is provisioning. Suppose the PCG events display no progress after the specific event, `Created container manager`. + +This issue can occur when the PCG installer VM fails to connect to the Palette API endpoint and download the installation artifacts. Another potential reason is that the PCG installer may not have the required permissions to store the installation artifacts in the **spectro-templates** folder. The installer downloads the images for the worker nodes and stores them in the **spectro-templates** folder during the cluster provisioning. +
+ +## Debug Steps +
+ +1. Check the outbound internet connectivity from the PCG installer VM. Internet connectivity is needed to communicate with the Palette API endpoint, `https://api.spectrocloud.com`, or your self-hosted Palette's API endpoint. Use the following steps to check the outbound internet connectivity: + - SSH into the PCG installer VM using the username `ubuntu` and the public SSH key you provided during the OVA import. + - Use the ping command to check if the VM can reach a public IP address. For example, ping well-known public IPs like Google's public DNS server (8.8.8.8) or any other public IP address. +
+ + ```bash + ping 8.8.8.8 + ``` + - If you receive responses from the ping requests, it indicates that the VM has outbound internet connectivity. + - Suppose you do not receive a response from the ping requests. In that case, it indicates the machine does not have outbound internet connectivity. Go to the next step for further troubleshooting steps. + + +2. Check for any network restrictions or firewall rules in the data center's network settings that may block communication. Adjust the proxy settings, if applicable, to fix the connectivity. Alternatively, you can power down and delete the PCG installer VM and relaunch a new one in a network that supports outbound internet connections. + + +3. Ensure you have the necessary write permissions for the **spectro-templates** folder in the data center environment. +
+ +## Scenario - Failed to Deploy Image + +After you finish configuring the cloud gateway in Palette, Palette starts provisioning the PCG cluster. The PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. You can navigate to the cluster details page and review the progressive events in the **Events** tab while the cluster is provisioning. Suppose one of the events displays the `Failed to deploy image: Failed to create govomiClient` error. + +The error can occur if there is a preceding "https://" or "http://" string in the vCenter server URL or if the PCG installer VM lacks outbound internet connectivity. +
+ +## Debug Steps +
+ +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to the **Tenant Settings** > **Private Cloud Gateways** page. + + +3. Click on the newly provisioned PCG cluster to review its details. + + +4. Click on the **Events** tab. + + +5. In the **Events** tab, search the `Failed to deploy image: Failed to create govomiClient` error. If the error has occurred due to a preceding "https://" or "http://" string in the vCenter server URL, the error details will mention "https://" twice, as highlighted in the screenshot below. + + ![A screenshot highlighting the "https://" prepended twice to the data center server URL.](/troubleshooting-pcg-http_error.png) + +6. Palette does not allow you to edit the vCenter server URL you used for authentication. Therefore, you must redeploy the PCG cluster with the following considerations: + + - Check the VMware vCenter server field. The field expects a URL or an IP address for authentication. If you use a URL, ensure the URL does not include the preceding "http://" or "https://" string. Also, select the **Use Qualified Network Name** checkbox if you use a URL. The screenshot below displays the vCenter server field you configure in Palette. + ![A screenshot displaying the vCenter server field you configure in Palette](/troubleshooting-pcg-cluster_config_1.png) + + - Ensure the VMware cloud properties are specified correctly in the cloud gateway configuration. You must use the vSphere data center and the folder where you have permission to create resources. + + - If you choose the DHCP option, enable the Dynamic DNS in your DNS server. The screenshot below displays the VMware cloud properties you configure in Palette. + ![A screenshot displaying the VMware cloud properties you configure in Palette](/troubleshooting-pcg-cluster_config_2.png) + + +7. If the steps above do not resolve the issue, check if the deployment failed due to a lack of outbound internet connectivity from the PCG installer VM. Use the following steps to check outbound internet connectivity: + - SSH into the PCG installer VM using the username `ubuntu` and the public SSH key you provided during the OVA import. + - Use the ping command to check if the VM can reach a public IP address. For example, ping well-known public IPs like Google's public DNS server (8.8.8.8) or any other public IP address. +
+ + ```bash + ping 8.8.8.8 + ``` + - If you receive responses from the ping requests, it indicates that the VM has outbound internet connectivity. + - Suppose you do not receive a response from the ping requests. In that case, it indicates the machine does not have outbound internet connectivity. Go to the next step for further troubleshooting steps. + + +8. Check for any network restrictions or firewall rules in the data center's network settings that may block communication. Adjust the proxy settings, if applicable, to fix the connectivity. Alternatively, you can power down and delete the PCG installer VM and relaunch a new one in a network that supports outbound internet connections. +
+ + +## Scenario - No Route to the Kubernetes API Server + +After you finish configuring the cloud gateway in Palette, Palette starts provisioning the PCG cluster. The PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. You can navigate to the cluster details page and review the progressive events in the **Events** tab while the cluster is provisioning. Suppose one of the events displays the `No route to host.` error. + +The error indicates an issue with the PCG cluster nodes attempting to connect to the cluster's Kubernetes API server. This issue can occur due to improper networking configuration or an error in the cloud-init process. +
+ +## Debug Steps +
+ +1. Check the data center network settings. Ensure no network restrictions, firewalls, or security groups block communication between the nodes and the API server. + + +2. If you use the DHCP allocation scheme, check that the DHCP service is available on the DHCP server. Restart the service if it's not in an active state. + + +3. If you use the DHCP allocation scheme, ensure Dynamic DNS is enabled in the DHCP server. A Dynamic DNS is only required if you are using DHCP. Dynamic DNS is not required for a static IP allocation scheme. + + +4. Check the Kubernetes API server status. The Kubernetes API server must be active and healthy on the control plane node. Use the following steps to check the status. + + - Switch to [Palette](https://console.spectrocloud.com). + + - Navigate to the **Tenant Settings** > **Private Cloud Gateways** page. + + - Click on the newly provisioned PCG cluster to review its details. + + - Download the PCG cluster's kubeconfig file from the **Overview** tab. Click on the kubeconfig file name to download it to your local machine, as highlighted in the screenshot below. + + ![A screenshot highlighting the kubeconfig file to download from Palette.](/troubleshooting-pcg-download_kubeconfig.png) + + - After you download the PCG cluster's kubeconfig file, use the following commands to make a GET request to one of the [Kubernetes API server endpoints](https://kubernetes.io/docs/reference/using-api/health-checks/#api-endpoints-for-health), `/readyz` or `'/livez'`. Replace `[path_to_kubeconfig]` placeholder with the path to the kubeconfig file you downloaded in the previous step. A status code `ok` or `200` indicates the Kubernetes API server is healthy. +
+ + ```bash + kubectl --kubeconfig [path_to_kubeconfig] get --raw='/readyz' + ``` + + - If the previous command does not return an `ok`, use the command below to make a verbose GET request by specifying the `verbose` parameter. The output will display the individual health checks so you can decide on further debugging steps based on the failed checks. +
+ + ```bash + kubectl --kubeconfig [path_to_kubeconfig] get --raw='/readyz?verbose' + ``` +
+ + +5. If the PCG installer VM has a public IP address assigned, SSH into the VM using the username `ubuntu` and the public SSH key you provided during the OVA import. + + +6. Navigate to the **/var/log** directory containing the log files. + + +7. Examine the cloud-init and system logs for potential errors or warnings. +
+ +## Scenario - Permission Denied to Provision + +After you finish configuring the cloud gateway in Palette, Palette starts provisioning the PCG cluster. The PCG cluster provisioning process may take up to 15 minutes to finish the PCG cluster deployment. You can navigate to the cluster details page and review the progressive events in the **Events** tab while the cluster is provisioning. Suppose one of the events displays the `Permission to perform this operation denied` error. + + +You must have the necessary permissions to provision a PCG cluster in the VMware environment. If you do not have adequate permissions, the PCG cluster provisioning will fail, and you will get the above-mentioned error in the events log. +
+ +## Debug Steps +
+ +1. Ensure you have all the permissions listed in the [VMware Privileges](../clusters/data-center/vmware.md#vmware-privileges) section before proceeding to provision a PCG cluster. + + +2. Contact your VMware administrator if you are missing any of the required permissions. + + +3. Delete the existing PCG cluster and redeploy a new one so that the new permissions take effect. + +
\ No newline at end of file diff --git a/docs/docs-content/troubleshooting/troubleshooting.md b/docs/docs-content/troubleshooting/troubleshooting.md new file mode 100644 index 0000000000..d799b2b26a --- /dev/null +++ b/docs/docs-content/troubleshooting/troubleshooting.md @@ -0,0 +1,74 @@ +--- +sidebar_label: "Troubleshooting" +title: "Common issues and their solutions" +description: "Common issues and their solutions in the deployment of Spectro Cloud Clusters" +hide_table_of_contents: false +sidebar_custom_props: + icon: "screwdriver-wrench" +tags: ["troubleshooting"] +--- + + + + +# Troubleshooting + +Use the following troubleshooting resources to help you address issues that may arise. You can also reach out to our support team by opening up a ticket through our [support page](http://support.spectrocloud.io/). + +
+ +- [Kubernetes Debugging](kubernetes-tips.md) + + +- [Cluster Deployment](cluster-deployment.md) + + +- [Nodes & Clusters](nodes.md) + + +- [Packs](pack-issues.md) + + +- [Palette Dev Engine](palette-dev-engine.md) + + +- [Edge](edge.mdx) + + +- [Private Cloud Gateway](pcg.md) + + +- [Palette Upgrade](palette-upgrade.md) + + + +## Download Cluster Logs +At times it might be required to work with the Spectro Cloud support team to troubleshoot an issue. Spectro Cloud provides the ability to aggregate logs from the clusters it manages. Problems that occur during the orchestration life cycle may require access to the various containers, nodes, and Kube system logs. Spectro Cloud automates this log collection process and provides an easy download option from the Spectro Cloud UI console. Hence reduces the burden on the operator to login into various cluster nodes individually and fetch these logs. + +Follow the link for more details: [Download Cluster Logs](../clusters/clusters.md#download-cluster-logs) + +## Event Stream + +Spectro Cloud maintains an event stream with low-level details of the various orchestration tasks being performed. This event stream is a good source for identifying issues in the event an operation does not complete for a long time. + +
+ +:::caution + + Due to Spectro Cloud’s reconciliation logic, intermittent errors show up in the event stream. As an example, after launching a node, errors might show up in the event stream regarding being unable to reach the node. However, the errors clear up once the node comes up.

+ Error messages that persist over a long time or errors indicating issues with underlying infrastructure are an indication of a real problem. + +::: + + +## Lifecycle Behaviors + +Typically when a cluster life cycle action such as provisioning, upgrade, or deletion runs into a failure, it does not result in an outright error on the cluster. The Spectro Cloud orchestration engine follows the reconciliation pattern wherein the system repeatedly tries to perform various orchestration tasks to bring the cluster to its desired state until it succeeds. Initial cluster provisioning or subsequent updates can run into a variety of issues related to cloud infrastructure availability, lack of resources, networking issues, etc. + +## Cluster conditions + +Spectro Cloud maintains specific milestones in a life cycle and presents them as “conditions”. Examples include: Creating Infrastructure, Adding Control Plane Node, Customizing Image, etc. The active condition indicates what task Spectro Cloud’s orchestration system is trying to perform. If a task results in failures, the condition is marked as failed, with relevant error messages. Reconciliation however continues behind the scenes and continuous attempts are made to perform the task. Failed conditions are a great source of troubleshooting provisioning issues. + +For example, failure to create a virtual machine in AWS due to the vCPU limit being exceeded would cause this error is shown to the end-users. They could choose to bring down some workloads in the AWS cloud to free up space. The next time a VM creation task is attempted, it would succeed and the condition would be marked as a success. + +
diff --git a/docs/docs-content/user-management/_category_.json b/docs/docs-content/user-management/_category_.json new file mode 100644 index 0000000000..16dd4f29d1 --- /dev/null +++ b/docs/docs-content/user-management/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 120 +} diff --git a/docs/docs-content/user-management/new-user.md b/docs/docs-content/user-management/new-user.md new file mode 100644 index 0000000000..3b643a1714 --- /dev/null +++ b/docs/docs-content/user-management/new-user.md @@ -0,0 +1,82 @@ +--- +sidebar_label: "Create a New User" +title: "Create a New User in Palette " +description: "Create a new user in Palette " +hide_table_of_contents: false +sidebar_position: 10 +tags: ["user-management"] +--- + + + +The section guides you on how to create a user in Palette. + +
+ +## Prerequisites + +- A [Palette account](https://console.spectrocloud.com). +- Tenant Admin access. + +< br /> + +## Create a New User + +To create a new user in Palette: + +1. Log in to Palette as a Tenant Admin. + + +2. Select **Users and Teams** from the left **Main Menu** and click on **+Create User** button. + + +3. Provide the following information to the **Create User** wizard: + * First Name + * Last Name + * Email + * Team(s) + + +4. Click on the **Confirm** button to complete the wizard. + + +## Validate + +* A display message will pop up confirming the user creation. + +* The validation can also be done from the Tenant console. Go to **Tenant Settings** from the **Left Main Menu**. + +* Click the **Users & Teams** tab from the **Left Menu**. This page will list all the users under the Tenant scope. + + +## Create Custom Role + +Use the following steps to create a custom resource role: + +1. Log in to Palette as Tenant Admin and select **Roles** from the left **Main Menu**. + + +2. Go to the **Resource Roles** tab from the top menu and click on the **+Create Resource Role** button to open the **Add New Role (Resource)** wizard. Fill out the following inputs: + * Name of the role. + * Assign permissions and operations. + + +3. Once the required permissions are selected, click the **Save** button. + + +4. To **Edit and Delete** the role from the role listing page, click the role to be deleted or edited to go to the role details page. + + +5. Click on **Delete Role or Edit Role** button to delete or edit the role respectively. + + +## Validate + +* A display message will pop up confirming the user creation. You can validate the user creation from the **Users & Teams** page in the Tenant Admin console. + +* The validation can also be done from the **Tenant console**. Go to **Tenant Settings** from the left **Main Menu**. + +* Click the **Roles** tab from the left **Main Menu** and click on the **Resource Roles** tab to find the new role name listed. + +* To **Reset Password** or **Delete** the user, click the user name and go to the **User Details** page. Then, click on **Delete** or **Reset Password** to perform the corresponding operation. + diff --git a/docs/docs-content/user-management/palette-rbac/_category_.json b/docs/docs-content/user-management/palette-rbac/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/user-management/palette-rbac/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/user-management/palette-rbac/palette-rbac.md b/docs/docs-content/user-management/palette-rbac/palette-rbac.md new file mode 100644 index 0000000000..604650507f --- /dev/null +++ b/docs/docs-content/user-management/palette-rbac/palette-rbac.md @@ -0,0 +1,389 @@ +--- +sidebar_label: "Palette RBAC" +title: "Palette User Access using RBAC " +description: "Palette User Access control using RBAC" +icon: "" +hide_table_of_contents: false +tags: ["user-management", "rbac"] +--- + + +RBAC stands for Role-Based Access Control. RBAC allows a single user to have different types of access control based on the resource being accessed. RBAC is the scenario that allows the Tenant Admin to grant full and unrestricted access to some parts of the system and withhold it for some others. + +Palette enforces a very well-structured RBAC design on how to grant granular access to resources and their operations within our management console. We maintain precise Roles and Resource Access Control List. Role-based access control primarily focuses on assigning permissions to roles instead of individual users and then assigning these roles to users. Multiple roles can be assigned to a user, which defines the permitted actions on the resource. This module lists and enumerates all the roles available within the Palette console within specific scopes. + +Palette enables: + +* A role can have multiple permissions. We encourage custom role creation, coupling the wide range of Palette permissions. + +* Multiple roles can be assigned to a single user, defining the permitted actions on a Palette resource. + +## Palette RBAC Model + +The Palette RBAC Model, is based on the following three components: + + +* Scopes +* Permissions +* Roles + +### Scopes + +A Scope defines the resources on which the role has coverage. The scope will be either `Tenant` or `Project`. For example, a role within the scope project can operate within the projects. The combination of user and roles indicates the totality of the accessibility available to that user. Scopes are structured in a parent-child relationship. Each level of hierarchy makes the Scope more specific. The roles are assigned at any of these levels of Scope. The level you select determines how widely the role is applied. Lower levels inherit role permissions from higher levels. +![palette-rbac-scope.png](/palette-rbac-scope.png) + +The following are the major properties of Palette driven Scopes: + + +* Scopes control the visibility of the resource. + + +* Resource created in the higher scope will be visible in the lower scope as read-only. The cluster profiles created by a tenant will be available to all the projects created by that tenant. + + +* Resource Isolation: Resources within the same scope will be restricted to the respective scope entity. + * Cluster Profile created in project-1 will not be available in project-2 of the same tenant + + +* Resource with the same name can co-exist across scopes and will be distinguished with scope prefix (icon) + * A profile with the same name can be created in tenant and project scope. The resource will have the scope information, which helps to distinguish them. + + + +Palette resources can be allocated to roles under **Three Scopes**: + + + +* **System** (The system admin internal to Palette) + + +* **Tenant** + + +* **Project** + + +
+ + +![A diagram of Palette's RBAC model](/user-management_palette-rback_palette-rbac-model.png) + +
+ +### Permissions + + +Permissions determine the type of operations allowed on a resource. Permissions can be defined in the following format: + +`resourceKey.operation` + +Examples: + +* `cluster.create` +* `cluster.edit` +* `cluster.delete` + +Each permission has a defined scope. The role creation is based on scope, type and permissions. + +
+ +#### Palette Permissions + +Palette has a wide range of permissions and these permissions can be combined in any combination as per the user requirements to create a role. If the Palette built-in roles does not meet the specific needs of your organization, custom roles can be created using different combination of these permissions. Just like built-in roles, you can assign custom roles to users or teams within a specific scope (Tenant or Project). Refer to the available set of permissions in the [Palette Resource Scope Matrix](#resource-scope-matrix). + +
+
+ +### Roles +A Role is a collection of permissions. When a role is assigned to a user, it means all the permissions the role contains are assigned to that user. The Role will have a **Scope**. The Type signifies the creator's scope and the Scope signifies the role visibility. The permissions will be restricted to the permission's scope list based on the role's scope. The ProfileEditor will be visible under Tenant, but neither the Tenant nor the Project admins are allowed to modify the Project Scopes. + +
+ +## Access Modes +* Tenant +* Project + +### Tenant +Tenant is an isolated workspace within the Palette. `Users` and `Teams` with specific `Roles` can be associated with the Tenant(s) you create. Palette provides a [wide set of permissions](tenant-scope-roles-permissions.md) under the scope of a Tenant. Everyone is a user and there should be at least one user with Tenant Admin privilege to control the product operations. +
+ +### Project + +The Global Project Scope holds a group of resources, in a logical grouping, to a specific project. The project acts as a namespace for resource management. Users and Teams with specific roles can be associated with the project, cluster, or cluster profile you create. Users are members of a tenant who are assigned [project scope roles](project-scope-roles-permissions.md#global-project-scope) that control their access within the platform. +
+ +## Palette Specific (Default) Roles: + +Palette RBAC has several built-in roles that can be assigned to users and teams. Role assignments are the way you control access to Palette resources. +
+ +### Tenant Scope Default Roles: + +The Global Tenant Scope holds all the tenant resources of Palette. The list of `Role` types within the `Tenant Scope` are as follows: +
+ +1. [Tenant Administrator Role](tenant-scope-roles-permissions.md#tenant-admin) + + +2. [Tenant Viewer Role](tenant-scope-roles-permissions.md#tenant-viewer) + + +3. [Tenant Project Admin Role](tenant-scope-roles-permissions.md#tenant-project-admin) + + +4. [Tenant Cluster Profile Admin Role](tenant-scope-roles-permissions.md#tenant-cluster-group-admin) + + +3. [Tenant Role Admin Role](tenant-scope-roles-permissions.md#tenant-team) + + +4. [Tenant Team Admin Role](tenant-scope-roles-permissions.md#tenant-admin) + + +5. [Tenant User Admin Role](tenant-scope-roles-permissions.md#tenant-user) + + +
+ +### Project Scope Default Roles: + +The Global Project Scope holds a group of resources in a logical grouping. Users and Teams with specific Roles can be associated with the Project(s) you create. Below is a list of Role types within the Project Scope built in to the Palette console. These Roles can neither be deleted nor edited. + +
+ +1. [Project Administrator Role](project-scope-roles-permissions.md#project-admin) + + +2. [Project Editor Role](project-scope-roles-permissions.md#project-editor) + + +3. [Project Viewer Role](project-scope-roles-permissions.md#project-viewer) + + +4. [Cluster Profile Admin Role](project-scope-roles-permissions.md#cluster-profile-admin) + + +5. [Cluster Profile Editor Role](project-scope-roles-permissions.md#cluster-profile-editor) + + +6. [Cluster Profile Viewer Role](project-scope-roles-permissions.md#cluster-profile-viewer) + + +7. [Cluster Admin Role](project-scope-roles-permissions.md#cluster-account-admin) + + +8. [Cluster Editor Role](project-scope-roles-permissions.md#cluster-account-editor) + + +9. [Cluster Viewer Role](project-scope-roles-permissions.md#cluster-account-viewer) + + +10. [Cluster Account Admin Role](project-scope-roles-permissions.md#cluster-admin) + + +11. [Cluster Account Editor Role](project-scope-roles-permissions.md#cluster-editor) + + +12. [Cluster Account Viewer Role](project-scope-roles-permissions.md#cluster-viewer) + + +13. [Workspace Admin Role](project-scope-roles-permissions.md#workspace-admin) + + +14. [Workspace Operator Role](project-scope-roles-permissions.md#workspace-operator) + + + +## Assign Palette Specific Roles to Users + +The Default (built-in) roles of Palette can be directly assigned to a user. The roles needs to be assigned based on who needs the access. The roles can be assigned to `Users` or `Teams`. The appropriate role needs to be selected from the list of several built-in roles. If the built-in roles are not meeting the specific needs of your organization, you can [create your own custom roles](#custom-roles-in-palette). + +
+ +1. Login to Palette console as `Tenant Admin`. + + +2. Select **Users and Teams** from the left **Main Menu** to list the [created users](../user-management.md#user-management). + + +3. From the list of users **select the user** to be assigned with role to open the role addition wizard. + + +4. Make the choice of role category from the top tabs: + * Project Role + * Tenant Role + * Workspace Role + + +5. Once the choice of category is made Click on **+ New Role**. + + +6. In the **Add Roles to User-name** wizard, select the project name from the drop down and select the roles from the list. + + +7. Confirm to complete the wizard. + + +8. The role user association can be edited and deleted from the **left Main Menu**. + +
+ +## Custom Roles in Palette +Palette enables the users to have custom Roles. These custom roles can be created either under the Tenant Scope or the Project Scope, but not both. These roles need to have unique names for identification. The names are case-insensitive. To create custom role in Palette Platform, we need to understand the components and operations in the Palette Platform enumerated as a `Resource Scope Matrix` as below: + + +## Resource Scope Matrix + +|Component|Resource Key|Operations|Scope|Usage| +|---------|------------|----------|-----|-----| +|API Key|apiKey|create, get, list, update, delete|Tenant|API Key related operations| +|Appliance|edgehost|create,get,list,update,delete|Project|Edge appliance deployment and management| +|Audit|audit|get, list|Tenant Project|Audit log access| +|Cloud Account|cloudaccount|create, get,list,update,delete|Tenant Project|Cloud account creation and management| +|Cloud Config |cloudconfig|create,update,delete,get,list|Project|Cluster level cloud configuration | +|Cluster|cluster|create,get,list,update,delete|Project|Creation and management of Palette workload clusters| +|Cluster Profile|clusterProfile|update,publish,delete,create,get,list|Tenant Project|Creation and management of Palette cluster profiles| +|DNS Mapping|dnsMapping|create,get,list,update,delete|Project|Domain Name Server mapping services creation and management| +|Location|location|create,get,list,update,delete|Tenant Project| location services related to backup and restore| +|Macro|macro|create,get,list,update,delete|Tenant Project|Key value management for Palette resources | +|Machine|machine|create,get,list,delete,update|Project|Palette node pool management| +|Private Gateway|privateGateway|create,get,list,update,delete|Tenant|PCG creation and maintenance|ack Registry creation and management| +|Registry|packRegistry|create, get, list, update, delete|Tenant|Creation and management of registries| +|Role|role|create,update,delete,get,list|Tenant|creation and management of Palette roles | +|Project|project|create,get,list,delete,update|Project|Creation and management of Palette roles | +|Workspace|workspace|create,list,update,delete,backup,restore,get|Project|Workspace operations including backup and restore|| +|Team|team|create,list,update,delete,get|Tenant|Creation and management of user teams in Palette| +|User|user|create,update,delete,get,list|Tenant|Creation and management of users in Palette| + + + +## Create Custom Role in Palette +To create a custom role, login to the Palette console as `Tenant Admin`: + + +1. Go to **Roles** from the left ribbon menu + + +2. Click **Create Role**, to open the `Add New Role` wizard + + +3. Give a `Role Name` of user choice. + + +4. Clicking on a `Role Name` will show the permissions available under this role. `Default Roles` (built-in into the Palette system) cannot be edited or deleted. Select the scope from the available options: + + * Tenant + * Project + + +5. Make your choice of **Permissions** and **Operations** to create a custom Palette role. After entering the `Role Name`, use the checkboxes to select the permissions. The checkbox list can be expanded to fine-tune the required permissions. + + +6. The created role can be viewed under the `Global Roles` list + + +7. Click on the name of the role to: + + * `View` + * `Edit Role` + * `Delete Role` + +
+ +**Example:** + +If the user is creating a role under the Tenant scope for API Key operations, select the `API Key Permissions` and then from the drop-down menu of that permission, check (tick) the required API operations listed under API Key permissions. Similarly, several permissions can be combined to create a **Custom Role**. [The created role can be assigned to an existing or new user.](/user-management#rolesandpermissions) + +
+
+ +### Assign Custom Roles to Users + +1. Login to Palette console as `Tenant Admin`. + + +2. Select **Users and Teams** from the left ribbon menu to list the [created users](../user-management.md#user-management). + + +3. From the list of users **select the user** to be assigned with role to open the role addition wizard. + + +4. Make the choice of role category from the top tabs: + * Project Role + * Tenant Role + * Workspace Role + + +5. Once the choice of category is to br made by clicking on **+ New Role**. + + +6. In the **Add Roles to User-name** wizard, select the project name from the drop down and select the roles from the list. + + +7. Confirm to complete the wizard. + + +8. The role user association can be edited and deleted from the `kebab menu`. + +## Example Scenario: + +Palette has a number of permissions that you can potentially include in your custom role. Here is an example scenario enumerating the minimum permissions required for a user to **Create a Cluster** in Palette platform. + +
+ +#### 1. Decide the actions, scopes and permissions required by the user to Create a Cluster. + +The role creation is done from the `Tenant Admin` console. For the above scenario, two roles needs to be created under `Project` and `Tenant` scope and attached to the user. + +
+ +#### 2. Identify the Permissions required under `Project Scope`: + + * Add the minimum `Project` management permissions + * project.list + * project.get + + + * Add the minimum permissions required for `Cloud Account` creation + * cloudaccount.create + * cloudaccount.get + * cloudaccount.list + + + * Add the `ClusterProfile` permissions + * clusterProfile.create + * clusterProfile.delete + * clusterProfile.get + * clusterProfile.list + * clusterProfile.publish + * clusterProfile.update + + + * Add the `Cluster` permissions (for creating and listing the cluster) + * cluster.create + * cluster.list + * cluster.get + + + * Add the `Location` permission. + * location.list + + + * Add the `Cloud Configuration` permissions for node pool management + * cloudconfig.create + + +#### 3. Identify the Permissions required under `Tenant Scope`: + +To attach the Packs and Integrations from Palette public repository, add the `Registry Permissions`. +The minimum permission required in this scenario is: + + * packRegistry.get + + +#### 4. Attach Roles to the User and Create the Cluster + +* Once both the roles are created with the above scopes, attach them to the user. + +* Login to Palette console using the user credentials to create the cluster profile and the cluster. + diff --git a/docs/docs-content/user-management/palette-rbac/project-scope-roles-permissions.md b/docs/docs-content/user-management/palette-rbac/project-scope-roles-permissions.md new file mode 100644 index 0000000000..2802332271 --- /dev/null +++ b/docs/docs-content/user-management/palette-rbac/project-scope-roles-permissions.md @@ -0,0 +1,828 @@ +--- +sidebar_label: "Project Scope Roles and Permissions" +title: "Project Roles" +description: "The list of Global Project Roles under Project Scope" +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["user-management", "rbac"] +--- + + + + + + +# Global Project Scope + +The Global Project Scope holds a group of resources, in a logical grouping, to a specific project. Users and Teams with specific Roles can be associated with the Project, Cluster, or Cluster Profile you create. + +Palette has adopted the security principle of least privilege. Each user is assigned Roles and Permissions to the Scopes, Resources, and Components. The Permissions format is `resourceKey.operation`, where **resourceKey** refers to a resource or the API functionality, and *operation* refers to the action or activity allowed. + +To view a list of the predefined roles and permissions, go to **Tenant Settings** > **Roles**, and you will find the list of **Global Roles**. If you need to extend your permissions, use the **Create Role** option. + +Below is the predefined list of Roles and Permissions for the Global Project Scope: + +
+ + +## App Deployment +-------------------------------- + +|Role Name | Description | +|---|---| +|App Deployment Admin |Provides administrative privilege to perform all the App operations on App resources. | +|App Deployment Editor|Allows the user to perform edit operations on an App but not to create or delete an App.| +|App Deployment Viewer|Allows the user to view all the App resources but not to make modifications.| + +
+
+ + + +
+ +## App Deployment Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **appDeployment** | √ | √ | √ | √ | √ | | | | | +| **appProfile** | | | √ | √ | | | | | | +| **cloudaccount** | | | √ | √ | | | | | | +| **clusterGroup** | | | √ | √ | | | | | | +| **location** | √ | √ | √ | √ | √ | | | | | +| **machine** | | | √ | √ | | | | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **project** | | | √ | √ | | | | | | +| **sshKey** | √ | √ | √ | √ | √ | | | | | +| **tag** | | | | | √ | | | | | +| **virtualCloudconfig**| √ | √ | √ | √ | √ | | | | | +| **virtualCluster** | √ | √ | √ | √ | √ | | | | | + + + +
+ + +
+ +## App Deployment Editor + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **appDeployment** | | | √ | √ | √ | | | | | +| **appProfile** | | | √ | √ | | | | | | +| **cloudaccount** | | | √ | √ | | | | | | +| **clusterGroup** | | | √ | √ | | | | | | +| **location** | | | √ | √ | √ | | | | | +| **machine** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **project** | | | √ | √ | | | | | | +| **sshKey** | | | √ | √ | √ | | | | | +| **tag** | | | | | √ | | | | | +| **virtualCloudconfig**| | | √ | √ | √ | | | | | +| **virtualCluster** | | | √ | √ | √ | | | | | + +
+ + +
+ + +
+ +## App Deployment Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **appDeployment** | | | √ | √ | | | | | | +| **appProfile** | | | √ | √ | | | | | | +| **cloudaccount** | | | √ | √ | | | | | | +| **clusterGroup** | | | √ | √ | | | | | | +| **location** | | | √ | √ | | | | | | +| **machine** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **project** | | | √ | √ | | | | | | +| **sshKey** | | | √ | √ | | | | | | +| **virtualCloudconfig**| | | √ | √ | | | | | | +| **virtualCluster** | | | √ | √ | | | | | | + + +
+
+ +
+ + +## App Profile +-------------------------------- + +|Role Names | Description | +|---|---| +|App Profile Admin |Provides administrative privilege to perform all the App operations on App profile resources. | +|App Profile Editor|Allows the user to perform edit operations on App profiles but not to create or delete an App profile.| +|App Profile Viewer|Allows the user to view all the App profile resources but not to modify them.| + +
+
+ + + +
+ +## App Profile Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **appProfile** | √ | √ | √ | √ | √ | | | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **project** | | | √ | √ | | | | | | + +
+ + +
+ +## App Profile Editor + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **appProfile** | | | √ | √ | √ | | | | | +| **macro** | | | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **project** | | | √ | √ | | | | | | + +
+ + +
+ + +
+ +## App Profile Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **appProfile** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **project** | | | √ | √ | | | | | | + + +
+
+ +
+ + +## Project +-------------------------------- + +|Role Names | Description | +|---|---| +|Project Admin |The Project Admin role is a closure of all the project operations. It is a administrative privilege for the project resources | +|Project Editor|The Project Editor role can perform edit operations within a project, but the user is not able to create or delete a project| +|Project Viewer|The Project Viewer will be able to view all the resources within a project, but not privileged to make modifications| + +
+
+ + + +
+ +## Project Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **audit** | | | √ | √ | | | | | | +| **cloudaccount** | √ | √ | √ | √ | √ | | | | | +| **cloudconfig** | √ | √ | √ | √ | √ | | | | | +| **cluster** | √ | √ | √ | √ | √ | √ | | | | +| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | +| **clusterRbac** | √ | √ | √ | √ | √ | | | | | +| **dnsMapping** | √ | √ | √ | √ | √ | | | | | +| **edgehost** | √ | √ | √ | √ | √ | | | | | +| **location** | √ | √ | √ | √ | √ | | | | | +| **machine** | √ | √ | √ | √ | √ | | | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **privateGateway** | √ | √ | √ | √ | √ | | | | | +| **project** | | | √ | √ | √ | | | | | +| **sshKey** | √ | √ | √ | √ | √ | | | | | +| **tag** | | | | | √ | | | | | +| **workspace** | √ | √ | √ | √ | √ | | | √ | √ | + +
+ + +
+ +## Project Editor + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **audit** | | | √ | √ | | | | | | +| **cloudaccount** | | | √ | √ | √ | | | | | +| **cloudconfig** | √ | | √ | √ | √ | | | | | +| **cluster** | | | √ | √ | √ | | | | | +| **clusterProfile** | | | √ | √ | √ | | √ | | | +| **clusterRbac** | | | √ | √ | √ | | | | | +| **dnsMapping** | | | √ | √ | √ | | | | | +| **edgehost** | | | √ | √ | √ | | | | | +| **location** | | | √ | √ | √ | | | | | +| **machine** | | √ | √ | √ | √ | | | | | +| **macro** | | | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **privateGateway** | | | √ | √ | √ | | | | | +| **project** | | | √ | √ | √ | | | | | +| **sshKey** | | | √ | √ | √ | | | | | +| **tag** | | | | | √ | | | | | +| **workspace** | | | √ | √ | √ | | | √ | √ | + +
+ + +
+ + +
+ +## Project Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **audit** | √ | | | | √ | | | | | +| **cloudaccount** | √ | | | | √ | | | | | +| **cloudconfig** | √ | | | | √ | | | | | +| **cluster** | √ | | | | √ | | | | | +| **clusterProfile** | √ | | | | √ | | | | | +| **dnsMapping** | √ | | | | √ | | | | | +| **edgehost** | √ | | | | √ | | | | | +| **location** | √ | | | | √ | | | | | +| **machine** | √ | | | | √ | | | | | +| **macro** | √ | | | | √ | | | | | +| **packRegistry** | √ | | | | √ | | | | | +| **privateGateway** | √ | | | | √ | | | | | +| **project** | √ | | | | √ | | | | | +| **sshKey** | √ | | | | √ | | | | | +| **workspace** | √ | | | | √ | | | | | + + +
+
+ +
+ + +## Cluster Profile +----------------------------- + +The user with these permissions can manage the Cluster Profiles within a project. + +
+ +|Role Names| Description | +|---|---| +|Cluster Profile Admin |Cluster Profile Admin role has admin privileges to all the cluster profile operations| +|Cluster Profile Editor|Cluster Profile Editor role has privileges to edit and list operations on the cluster profile| +|Cluster Profile Viewer|Cluster Profile Viewer role has read-only privileges to cluster profiles| + +
+ + + +
+ +## Cluster Profile Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | √ | √ | | | | | | | | +| **tag** | | | | | √ | | | | | + +
+ +
+ + +
+ +## Cluster Profile Editor + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterProfile** | | | √ | √ | √ | | √ | | | +| **macro** | | | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **tag** | | | | | √ | | | | | + +
+ +
+ + +
+ +## Cluster Profile Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterProfile** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | | | | | | +| **packRegistry** | | | √ | √ | | | | | | + +
+ +
+
+ +
+ +## Cluster +-------------------------------------- +
+ +
+ +|Role Names| Description | +|---|---| +|Cluster Admin | A cluster admin in Project scope has all the privileges related to cluster operation| +|Cluster Editor | A cluster editor in Project scope has the privileges to update, delete,get and list cluster resources. This role is not privileged for cluster creation | +|Cluster Viewer | A cluster viewer in Project scope is a read-only privilege to cluster operations | + +
+ + + + + +
+ +## Cluster Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | | | √ | √ | | | | | | +| **cloudconfig** | √ | √ | √ | √ | √ | | | | | +| **cluster** | √ | √ | √ | √ | √ | √ | | | | +| **clusterProfile** | √ | √ | | | | | | | | +| **clusterRbac** | √ | √ | √ | √ | √ | | | | | +| **dnsMapping** | √ | √ | √ | √ | √ | | | | | +| **edgehost** | √ | √ | √ | √ | √ | | | | | +| **location** | √ | √ | √ | √ | √ | | | | | +| **machine** | √ | √ | √ | √ | √ | | | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | √ | √ | | | | | | | | +| **privateGateway** | √ | √ | | | | | | | | +| **tag** | | | | | √ | | | | | +| **sshKey** | √ | √ | √ | √ | √ | | | | | + +
+ +
+ + +
+ +## Cluster Editor +
+ + + + + +
resourceKeysOperations
+
+ + +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | | | √ | √ | | | | | | +| **cloudconfig** | | | √ | √ | √ | | | | | +| **cluster** | | | √ | √ | √ | | | | | +| **clusterProfile** | | | √ | √ | | | | | | +| **clusterRbac** | | | √ | √ | √ | | | | | +| **dnsMapping** | | | √ | √ | √ | | | | | +| **edgehost** | | | √ | √ | √ | | | | | +| **location** | | | √ | √ | √ | | | | | +| **machine** | | √ | √ | √ | √ | | | | | +| **macro** | | | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **privateGateway** | | | √ | √ | | | | | | +| **tag** | | | | | √ | | | | | +| **sshKey** | | | √ | √ | √ | | | | | + +
+ +
+ + +
+ +## Cluster Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | | | √ | √ | | | | | | +| **cloudconfig** | | | √ | √ | | | | | | +| **cluster** | | | √ | √ | | | | | | +| **clusterProfile** | | | √ | √ | | | | | | +| **clusterRbac** | | | √ | √ | | | | | | +| **dnsMapping** | | | √ | √ | | | | | | +| **edgehost** | | | √ | √ | | | | | | +| **location** | | | √ | √ | | | | | | +| **machine** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **privateGateway** | | | √ | √ | | | | | | +| **sshKey** | | | √ | √ | | | | | | + +
+ +
+
+ +
+ +## Cloud Account +----------------------------- + +
+ +|Role Names| Description | +|---|---| +|Cluster Account Admin | An administrative access to cloud account operations| +|Cluster Account Editor | An editor access to cloud cloud account operations | +|Cluster Account Viewer | A read-only role for cloud account operations | + +
+ + + + + +
+ +## Cluster Account Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ---------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | √ | √ | √ | √ | √ | | | | | + +
+ +
+ + +
+ +## Cluster Account Editor +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ---------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | | | √ | √ | √ | | | | | + +
+ +
+ + +
+ +## Cluster Account Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ---------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | | | √ | √ | | | | | | + +
+ +
+
+ +## Workspace +------------------------------------ + +
+ +|Role Names| Description | +|---|---| +|Workspace Admin | Administrator role to workspace operations| +|Workspace Editor | Editor role to workspace operations | + +
+ + + + +
+ +## Workspace Admin +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **workspace** | √ | √ | √ | √ | √ | | | √ | √ | + + +
+ +
+ + +
+ +## Workspace Operator + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **workspace** | | | √ | √ | | | | √ | √ | + +
+
+
+ +
+
+ + +## Virtual Cluster +-------------------------------- + +|Role Names | Description | +|---|---| +|Virtual Cluster Admin |Provides administrative privilege to perform all virtual cluster operations on App resources.| +|Virtual Cluster Editor|Allows the user to perform edit operations on a virtual cluster but not to create or delete a virtual cluster.| +|Virtual Cluster Viewer|Allows the user to view all the virtual cluster resources but not to modify them.| + +
+
+ + + +
+ +## Virtual Cluster Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterGroup** | | | √ | √ | | | | | | +| **location** | | | √ | √ | | | | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **project** | | | √ | √ | | | | | | +| **tag** | | | | | √ | | | | | +| **virtualCloudconfig**| √ | √ | √ | √ | √ | | | | | +| **virtualCluster** | √ | √ | √ | √ | √ | | | | | + + + +
+ + +
+ +## Virtual Cluster Editor + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterGroup** | | | √ | √ | | | | | | +| **location** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | √ | | | | | +| **project** | | | √ | √ | | | | | | +| **tag** | | | | | √ | | | | | +| **virtualCloudconfig**| | | √ | √ | √ | | | | | +| **virtualCluster** | | | √ | √ | √ | | | | | + +
+ + +
+ + +
+ +## Virtual Cluster Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterGroup** | | | √ | √ | | | | | | +| **location** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | | | | | | +| **project** | | | √ | √ | | | | | | +| **virtualCloudconfig**| | | √ | √ | | | | | | +| **virtualCluster** | | | √ | √ | | | | | | + +
+ + +
+
+ +
+ +
+
+
diff --git a/docs/docs-content/user-management/palette-rbac/resource-scope-roles-permissions.md b/docs/docs-content/user-management/palette-rbac/resource-scope-roles-permissions.md new file mode 100644 index 0000000000..a54cd151f1 --- /dev/null +++ b/docs/docs-content/user-management/palette-rbac/resource-scope-roles-permissions.md @@ -0,0 +1,298 @@ +--- +sidebar_label: "Palette Resource Roles" +title: "Palette Global and Custom Resource Roles " +description: "Palette contains global resource roles and supports the ability to create custom resource roles." +hide_table_of_contents: false +sidebar_position: 20 +tags: ["user-management", "rbac"] +--- + + + + +Palette support two types of resource roles, global resource roles and custom resource roles: + +
+ +* Global Resource Roles are a set of roles built in and available to you. + +* Custom Resource Roles, are roles you can create in Palette using a set of permissions and operations. + +To learn how to create a custom role. Review the [Create Custom Role](#palette-custom-resource-roles) guide. + + +## Palette Global Resource Roles + +Palette provides the following built-in global resource roles: + +
+ +* [Cluster](#cluster) + * Resource Cluster Admin + + * Resource Cluster Editor + + * Resource Cluster Viewer + +* [Cluster Profile](#cluster-profile) + + * Resource Cluster Profile Admin + + * Resource Cluster Profile Editor + + * Resource Cluster Profile Viewer + + +
+ +## Cluster + +
+ +|Role Names| Description | +|---|---| +|Resource Cluster Admin | A cluster admin in Project scope has all the privileges related to cluster operation| +|Resource Cluster Editor | A cluster editor in Project scope has the privileges to update, delete,get and list cluster resources. This role is not privileged for cluster creation | +|Resource Cluster Viewer | A cluster viewer in Project scope is a read-only privilege to cluster operations | + +
+ + + + + +
+ +### Resource Cluster Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | | | √ | √ | | | | | | +| **cloudconfig** | √ | √ | √ | √ | √ | | | | | +| **cluster** | √ | √ | √ | √ | √ | √ | | | | +| **clusterProfile** | √ | √ | | | | | | | | +| **clusterRbac** | √ | √ | √ | √ | √ | | | | | +| **dnsMapping** | √ | √ | √ | √ | √ | | | | | +| **edgehost** | √ | √ | √ | √ | √ | | | | | +| **location** | √ | √ | √ | √ | √ | | | | | +| **machine** | √ | √ | √ | √ | √ | | | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | √ | √ | | | | | | | | +| **privateGateway** | √ | √ | | | | | | | | +| **sshKey** | √ | √ | √ | √ | √ | | | | | + +
+ +
+ + +
+ +### Resource Cluster Editor +
+ + + + + +
resourceKeysOperations
+
+ + +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | | | √ | √ | | | | | | +| **cloudconfig** | | | √ | √ | √ | | | | | +| **cluster** | | | √ | √ | √ | | | | | +| **clusterProfile** | | | √ | √ | | | | | | +| **clusterRbac** | | | √ | √ | √ | | | | | +| **dnsMapping** | | | √ | √ | √ | | | | | +| **edgehost** | | | √ | √ | √ | | | | | +| **location** | | | √ | √ | √ | | | | | +| **machine** | | √ | √ | √ | √ | | | | | +| **macro** | | | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **privateGateway** | | | √ | √ | | | | | | +| **sshKey** | | | √ | √ | √ | | | | | + +
+ +
+ + +
+ +### Resource Cluster Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | | | √ | √ | | | | | | +| **cloudconfig** | | | √ | √ | | | | | | +| **cluster** | | | √ | √ | | | | | | +| **clusterProfile** | | | √ | √ | | | | | | +| **clusterRbac** | | | √ | √ | | | | | | +| **dnsMapping** | | | √ | √ | | | | | | +| **edgehost** | | | √ | √ | | | | | | +| **location** | | | √ | √ | | | | | | +| **machine** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **privateGateway** | | | √ | √ | | | | | | +| **sshKey** | | | √ | √ | | | | | | + +
+ +
+
+ +
+ + +## Cluster Profile + + +The user with these permissions can manage the Cluster Profiles within a project. + +
+ +|Role Names| Description | +|---|---| +|Cluster Profile Admin |Cluster Profile Admin role has admin privileges to all the cluster profile operations| +|Cluster Profile Editor|Cluster Profile Editor role has privileges to edit and list operations on the cluster profile| +|Cluster Profile Viewer|Cluster Profile Viewer role has read-only privileges to cluster profiles| + +
+ + + +
+ +### Resource Cluster Profile Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | √ | √ | | | | | | | | + +
+ +
+ + +
+ +### Resource Cluster Profile Editor + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterProfile** | | | √ | √ | √ | | √ | | | +| **macro** | | | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | + +
+ +
+ + +
+ +### Resource Cluster Profile Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterProfile** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | | | | | | +| **packRegistry** | | | √ | √ | | | | | | + +
+ +
+
+ +
+ + + +## Palette Custom Resource Roles + +
+ +The following is a list of platform permissions and operations supported by Palette. Use these permissions to [create custom role](../new-user.md#create-custom-role) to control the cluster access. For every **Resource Keys** available **operations** can be added as per your requirements. + +
+ +## List of Custom Permissions + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cloudaccount** | | | √ | √ | | | | | | +| **cloudconfig** | | √ | √ | √ | √ | | | | | +| **cluster** | | √ | √ | √ | √ | | | | | +| **clusterProfile** | | √ | √ | √ | √ | | √ | | | +| **dnsMapping** | | | √ | √ | | | | | | +| **location** | | | √ | √ | | | | | | +| **machine** | | | √ | √ | | | | | | +| **macro** | | | √ | √ | | | | | | +| **packRegistry** | | | √ | √ | | | | | | + + +## Resources + +[Resource Scope Matrix](palette-rbac.md#resource-scope-matrix) + + diff --git a/docs/docs-content/user-management/palette-rbac/tenant-scope-roles-permissions.md b/docs/docs-content/user-management/palette-rbac/tenant-scope-roles-permissions.md new file mode 100644 index 0000000000..1018d6904e --- /dev/null +++ b/docs/docs-content/user-management/palette-rbac/tenant-scope-roles-permissions.md @@ -0,0 +1,343 @@ +--- +sidebar_label: "Tenant Scope Roles and Permissions" +title: "Tenant Roles" +description: "The list of Global Tenant Roles under Tenant Scope" +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["user-management", "rbac"] +--- + + + + +## Global Tenant Scope + +Tenant is an isolated workspace within the Palette Console. Users and teams with specific roles can be associated with the [tenants](../../glossary-all.md#organization) and [projects](../../glossary-all#project) you create. + + Each user is assigned a role and permissions, which apply to the scopes, resources, and resourceKey. The Permissions format is `resourceKey.operation`, where resourceKey refers to resource or the API functionality, and Operation refers to the permitted action or activity. + +To view the list of the predefined roles and permissions, ensure you are in the project scope **Tenant**. Next, navigate to the left **Main Menu** and click on **Tenant Settings** > **Roles**, and you will find the list of **Global Roles**. If you need to extend permissions, create a custom role by using the [Create Role](palette-rbac.md#create-custom-role-in-palette) option. + +Below is the list of Roles and Permissions that already predefined for the Global Tenant Scope. + +
+ +:::info + +All users can view tags assigned to a resource. In technical terms, all users inherit the permission `tag.get` by default. + +::: + +
+ +## Tenants +---------------------------- + +|Role Names | Description | +|---|---| +|Tenant Admin |Allows the user to create projects and manage projects within the tenant, covered under all operations related to projects| +|Tenant Viewer| Provides a read only access to all the project resources| +|Tenant Project Admin|The role with complete access to an existing project| + +The table enlists the role wise resourceKeys and Operations that are predefined under the Global Tenant Scope: + +
+
+ + + + + +
+ +## Tenant Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **apiKey** | √ | √ | √ | √ | √ | | | | | +| **audit** | | | √ | √ | | | | | | +| **cloudaccount** | √ | √ | √ | √ | √ | | | | | +| **cloudconfig** | √ | √ | √ | √ | √ | | | | | +| **cluster** | √ | √ | √ | √ | √ | √ | | | | +| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | +| **clusterRbac** | √ | √ | √ | √ | √ | | | | | +| **dnsMapping** | √ | √ | √ | √ | √ | | | | | +| **edgehost** | √ | √ | √ | √ | √ | | | | | +| **location** | √ | √ | √ | √ | √ | | | | | +| **machine** | √ | √ | √ | √ | √ | | | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | √ | √ | √ | √ | √ | | | | | +| **privateGateway** | √ | √ | √ | √ | √ | | | | | +| **project** | √ | √ | √ | √ | √ | | | | | +| **role** | √ | √ | √ | √ | √ | | | | | +| **sshKey** | √ | √ | √ | √ | √ | | | | | +| **team** | √ | √ | √ | √ | √ | | | | | +| **tag** | | | | | √ | | | | | +| **user** | √ | √ | √ | √ | √ | | | | | +| **workspace** | √ | √ | √ | √ | √ | | | √ | √ | + +
+
+
+ +
+ + + +## Tenant Viewer + +
+ + + + + +
resourceKeysOperations
+
+ + | | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | + | ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | + | **apiKey** | | | √ | √ | | | | | | + | **audit** | | | √ | √ | | | | | | + | **cloudaccount** | | | √ | √ | | | | | | + | **cloudconfig** | | | √ | √ | | | | | | + | **cluster** | | | √ | √ | | | | | | + | **clusterProfile** | | | √ | √ | | | | | | + | **clusterRbac** | | | √ | √ | | | | | | + | **dnsMapping** | | | √ | √ | | | | | | + | **edgehost** | | | √ | √ | | | | | | + | **location** | | | √ | √ | | | | | | + | **machine** | | | √ | √ | | | | | | + | **macro** | | | √ | √ | | | | | | + | **packRegistry** | | | √ | √ | | | | | | + | **privateGateway** | | | √ | √ | | | | | | + | **project** | | | √ | √ | | | | | | + | **role** | | | √ | √ | | | | | | + | **sshKey** | | | √ | √ | | | | | | + | **team** | | | √ | √ | | | | | | + | **user** | | | √ | √ | | | | | | + | **workspace** | | | √ | √ | | | | | | + + +
+ + +
+ +## Tenant Project Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Get** | **Delete** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ------- | ---------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **apiKey** | | √ | | √ | | | | | | +| **audit** | | √ | | √ | | | | | | +| **cloudaccount** | √ | √ | √ | √ | √ | | | | | +| **cloudconfig** | √ | √ | √ | √ | √ | | | | | +| **cluster** | √ | √ | √ | √ | √ | √ | | | | +| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | +| **clusterRbac** | √ | √ | √ | √ | √ | | | | | +| **dnsMapping** | √ | √ | √ | √ | √ | | | | | +| **edgehost** | √ | √ | √ | √ | √ | | | | | +| **location** | √ | √ | √ | √ | √ | | | | | +| **machine** | √ | √ | √ | √ | √ | | | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | √ | √ | √ | √ | √ | | | | | +| **privateGateway** | √ | √ | √ | √ | √ | | | | | +| **project** | √ | √ | √ | √ | √ | | | | | +| **sshKey** | √ | √ | √ | √ | √ | | | | | +| **tag** | | | | | √ | | | | | +| **workspace** | √ | √ | √ | √ | √ | | | √ | √ | + + +
+
+ +
+
+ +## Cluster Profile + +---------------------------- + +|Role Names | Description | +|---|---| +|Tenant Cluster Profile Admin | A role which has complete access to all the `Cluster Profile` related operations| + +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **clusterProfile** | √ | √ | √ | √ | √ | | √ | | | +| **macro** | √ | √ | √ | √ | √ | | | | | +| **packRegistry** | | | √ | √ | | | | | | +| **tag** | | | | | √ | | | | | +
+
+
+ +## Tenant Role + +---------------------------- + +|Role Names | Description | +|---|---| +|Tenant Role Admin | A role which has complete access to all the `Role` related perations | + +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| -------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **role** | √ | √ | √ | √ | √ | | | | | + + +
+
+
+ +## Tenant Team + +---------------------------- + +|Role Names | Description | +|---|---| +|Tenant Team Admin | A role which has complete access to all the `Team` related operations | + +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ---------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **apiKey** | | | √ | √ | | | | | | +| **audit** | | | √ | √ | | | | | | +| **team** | √ | √ | √ | √ | √ | | | | | +| **user** | | | √ | √ | | | | | | + + +
+
+
+ +## Tenant User + +---------------------------- + +|Role Names | Description | +|---|---| +|Tenant User Admin Role|A role which has complete access to all the `User` related operations| + + + +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ---------- | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **apiKey** | √ | √ | √ | √ | √ | | | | | +| **audit** | | | √ | √ | | | | | | +| **user** | √ | √ | √ | √ | √ | | | | + + +
+ + +## Tenants Cluster Group +---------------------------- + +|Role Names | Description | +|---|---| +|Tenants Cluster Group Admin |Allows the user to create and manage cluster groups within the tenant, covered under all operations related to cluster groups| +|Tenants Cluster Group Editor|The role can perform edit operations related to a cluster group, but the user is not able to create or delete a cluster group| +|Tenants Cluster Group Viewer|Provides a read only access to all the cluster group resources| + +The table lists role resourceKeys and operations that are predefined under the Global Tenant Scope: + +
+
+ + + + + +
+ +## Tenant Cluster Group Admin + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cluster** | | | √ | √ | | | | | | +| **clusterGroup** | √ | √ | √ | √ | √ | | | | | +| **tag** | | | | | √ | | | | | + + +
+
+
+ +
+ + + +## Tenant Cluster Group Editor + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cluster** | | | √ | √ | | | | | | +| **clusterGroup** | | | √ | √ | √ | | | | | +| **tag** | | | | | √ | | | | | + + + +
+ + +
+ +## Tenant Cluster Group Viewer + +
+ + + + + +
resourceKeysOperations
+
+ +| | **Create** | **Delete** | **Get** | **List** | **Update** | **Import** | **Publish** | **Backup** | **Restore** | +| ------------------ | ---------- | ---------- | ------- | -------- | ---------- | ---------- | ----------- | ---------- | ----------- | +| **cluster** | | | √ | √ | | | | | | +| **clusterGroup** | | | √ | √ | | | | | | + + +
+
+ +
+ + diff --git a/docs/docs-content/user-management/palette-resource-limits.md b/docs/docs-content/user-management/palette-resource-limits.md new file mode 100644 index 0000000000..fd834134ef --- /dev/null +++ b/docs/docs-content/user-management/palette-resource-limits.md @@ -0,0 +1,68 @@ +--- +sidebar_label: "Palette Resource Limits" +title: "Default Palette Resource Limits" +description: "Understand the default resource limits for Palette and learn how to set resource limits for your Palette tenant." +icon: "" +hide_table_of_contents: false +sidebar_position: 40 +tags: ["user-management"] +--- + + + +## Default Palette Resource Limits + + +Tenant admins can set and update resource limits for Palette. The resource limits determine the maximum number of resources that can be created in Palette. The resource limits are set at the tenant level and apply to all projects in the tenant. + +The following table lists the default resource limits for Palette: + +|**Resources** | **Max Limit** | **Scope** | +|--------------------|----------------------| ---- | +|Users | 300 | Tenant| +|Teams | 100 | Tenant| +|Projects | 50 | Tenant | +|Workspaces | 50 | Tenant | +|Roles | 100 | Tenant | +|Cloud Accounts | 200 | Tenant | +|Cluster Profiles | 200 | Tenant | +|Registries | 50 | Tenant | +|Private Gateway | 50 | Tenant | +|API Keys | 20 | User | +|Backup Locations | 100 | Tenant | +|Certificates | 20 | Tenant | +|Macros | 200 | Project| +|SSH Keys | 300 | Tenant | +|Alerts or Webhook | 100 | Project| +|Clusters | 10,000 | Tenant | +|Edge Hosts | 200 | Tenant | + +## Set Resource Limit + +Use the following steps to set or update resource limits for your Palette tenant. + +## Prerequisites + +* You must have access to the *tenant admin* role. + + +## Update Limits + +1. Log in to [Palette](https://console.spectrocloud.com) as a tenant admin. + + +2. Navigate to the left **Main Menu** and select **Tenant Settings**. + + +3. Select **Resource Limits** from the **Tenant Settings Menu**. + + +4. Set the values for the different Palette resources. + + +5. Save your changes. + + +## Validate + +You can validate the updated resource limits by creating a resource of the same type you updated. For example, you can create five API keys if you updated the **API Key** to five. If you attempt to create a sixth API key, you will receive an error message. diff --git a/docs/docs-content/user-management/project-association.md b/docs/docs-content/user-management/project-association.md new file mode 100644 index 0000000000..6a610077ef --- /dev/null +++ b/docs/docs-content/user-management/project-association.md @@ -0,0 +1,65 @@ +--- +sidebar_label: "Project Association" +title: "Associate a User or Team with a Project" +description: "Associate a User or Team with a Project" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["user-management"] +--- + + +Associating a user or team with a specific project creates a clear distinction between who is allowed in the project and their access control permissions. By grouping resources together, you ensure that only designated members have access to and control over the project resources, preventing others from accidentally or intentionally modifying them. This improves resource accountability, reduces confusion and conflicts, and helps maintain the integrity and security of the project. + +User permissions are determined by the combination of their tenant and project roles, as well as any roles inherited from their team association. If a user is a *Tenant Admin*, they have admin permissions in all projects. A user with the *Project Viewer* role at the tenant level has *View* permissions for all projects. However, if a user or team has the *Project Viewer* role assigned to a specific project, they only have view access to that project. The extent of user permissions, either at the tenant or project level, determines the number of projects they can access. + +## Associate a User or Team + +To associate a user or team with a project, use the following steps. + +## Prerequisites + +* Tenant Admin access. + +* An available project. Check out the [Create a Project](../projects.md) guide to learn how to create a project. + +* A user or a team. + +## Enablement + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Select the **Tenant Admin** scope. + + +3. Navigate to the left **Main Menu** and select **Users & Teams**. + + +4. Select the tab of the resource you want to associate with a project, either a user or a team. + + +5. Click on the row of the user or team to display its overview page. + + +6. Select **Project Roles**. + + +7. A **drop-down Menu** containing all the available projects in the tenant is available. Select the project you want to associate with the project role. + + +8. Next, assign permissions to the project role. To learn more about each permission and available roles, check out the [Palette RBAC](palette-rbac/palette-rbac.md) documentation. + + +Click **Confirm** to create the project role and complete the project association process. + + +## Validate + +1. Have a user or a user assigned to a team log in to [Palette](https://console.spectrocloud.com). + +2. Ask the user to switch the scope to the project you associated their role with. + +The user will now be able to select the associated project and complete actions within the scope of their project role permissions. + + +
diff --git a/docs/docs-content/user-management/saml-sso/_category_.json b/docs/docs-content/user-management/saml-sso/_category_.json new file mode 100644 index 0000000000..ae9ddb024d --- /dev/null +++ b/docs/docs-content/user-management/saml-sso/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 50 +} diff --git a/docs/docs-content/user-management/saml-sso/enable-saml.md b/docs/docs-content/user-management/saml-sso/enable-saml.md new file mode 100644 index 0000000000..b25514fc57 --- /dev/null +++ b/docs/docs-content/user-management/saml-sso/enable-saml.md @@ -0,0 +1,87 @@ +--- +sidebar_label: 'Enable SSO in Palette' +title: 'Enable SSO in Palette' +description: 'Learn how to enable SSO in Palette' +icon: "" +hide_table_of_contents: false +hiddenFromNav: false +sidebar_position: 0 +tags: ["user-management", "saml-sso"] +--- + + +With Spectro Cloud Palette, you can use SAML 2.0 protocols for single sign-on (SSO) authentication using your IdP. + +
+ +## Set Up SAML-based SSO + +To set up Spectro Cloud Palette with an identity provider (IdP) SAML based SSO:


+1. Log in to the Palette console as a Tenant Admin.


+2. Select **Tenant Settings** > **SSO Auth Type** > **SAML** to view the SAML panel.


+3. Complete the assertion form with the requested parameters. See below for more details specific to the supported IdPs.


+ The following options will be available for configuring SAML SSO within Palette:


+ + - **Service** - Choose your IdP (Azure Active Directory, Okta, Keycloak, OneLogin, ADFS, Other).


+ - **Identity Provider Metadata** - Enter the Identity Provider Metadata.


+ - **Default Teams** - Add the authenticated user's Default Team(s) Palette membership.


+ - **NameID Format** - Choose the appropriate version of the format in use (SAML 1.1, SAML 2.0, email address, other).


+ + The following parameters will enable Spectro Cloud Palette as a **Service Provider** (SP) in your IdP. Your IdP will require some or all the information listed below to enable SSO with Palette.


+ + - **Single Logout URL** - The IdP will use the logout URL for the SAML SSO configuration.


+ - **EntityId** - https://www.spectrocloud.com


+ - **FirstName** - Attribute in First Name format.


+ - **LastName** - Attribute in Last Name format.


+ - **Email** - Attribute in Email format.


+ - **SpectroTeam** - Attribute in SpectroTeam format.


+ - **Service Provider Metadata** - Provide the EntityDescriptor.


+ +4. Edit each parameter as necessary and click **Enable** to complete the setup wizard.


+ +
+ + +## Set Up OIDC-based SSO + +Spectro Cloud Palette supports OpenID Connect (OIDC), a de facto standard of contemporary authentication that provides secured identity management in a highly interoperable format.


+ +## Procedure + +To set up an OIDC-based SSO in Spectro Cloud Palette perform the following steps:


+ +1. Log in to the Palette console as the Tenant Admin.


+2. Select the **Tenant Settings** > **SSO** > **OIDC** to view the panel.


+3. Enable Spectro Cloud as the **Service Provider** by completing the form with the following parameters. Select the tabs below for more details specific to IdPs supported with Palette.


+ + * **Issuer URL** - The URL of the OpenID identity provider.
**Note**: For AWS users, Issuer URL needs to be generated in the format as described below:


+ `https://cognito-idp.[REGION].amazonaws.com/[USER-POOL-ID]`


+ + - **Client ID** - The ID for the client application that makes authentication requests.


+ - **Client Secret** - Enter the secret created by the IdP.


+ - **Default Teams** - The Default Palette Team(s) to which authenticated members are assigned automatically.


+ - **Scopes** - The user's details will be used as part of SSO, like *email*, *firstname*, *lastname* or *groups*. Each scope returns a set of user attributes, called claims.


Microsoft Azure AD Example: "openid, profile, email, allatclaims"


+ - **REQUIRED CLAIMS** - These are the parameter values, claimed by the user, to be mapped with the Identity Provider Platform. Complete the Required Claims:


+ - **Email** - Azure AD Example: "email"


+ - **First Name** - Azure AD Example: "given_name"


+ - **Last Name** - Azure AD Example: "family_name"


+ - **Spectro Team Name** - Azure AD Example: "groups".


Any non-admin user that is added to a Tenant, must be added to at least one Team. This Team can be changed later if needed. See the [Teams](../../glossary-all.md#team) section for more details on Teams.


+ + - If a user is not added to a Team, the user can still log in successfully but will not be able to see the console until proper Project or Tenant permissions are applied (Tenant Admin, Project Admin, Project Viewer, and so on). The **SpectroTeam** attribute carries forward the available team(s) for the user being authorized. This gives the administrator the flexibility to grant access to Spectro Cloud Palette using either Users or Groups in their IdP or by adding users directly to a Palette Team(s).


+ + - The values of the **SpectroTeam** parameter is case-sensitive, so the Tenant Admin should ensure that the team names are identical on both consoles. To sync an IdP group with a Palette Team, ensure the IdP group Name (or if it's Azure Active Directory, use the Object Id corresponding to the IdP group Name) matches the Palette Team name.


+ + - A use case example can be where a new member is to be added to the Palette Tenant by the Tenant Admin. The administrator can configure a default Palette Team or a synced IdP group that is common to all authenticated users. This default Palette Team/IdP group can be applied to the Palette SAML Panel as a one-time setting.


+ +:::info +Your IdP may require the following settings to configure OIDC SSO with Palette: + + - **Callback URL** - The URL to which Auth0 redirects users after they authenticate. Ensure that this value is configured for the application you registered with the OIDC Identity Provider. + + - **Logout URL** - The IdP will use the logout URL for the OIDC SSO configuration. + +::: + +## Results +You have now established the minimum configuration that is required to configure Palette OIDC, capable of communicating with other IdPs configured as OpenID Connect Providers. + diff --git a/docs/docs-content/user-management/saml-sso/palette-sso-azure-ad.md b/docs/docs-content/user-management/saml-sso/palette-sso-azure-ad.md new file mode 100644 index 0000000000..7497375ba4 --- /dev/null +++ b/docs/docs-content/user-management/saml-sso/palette-sso-azure-ad.md @@ -0,0 +1,208 @@ +--- +sidebar_label: 'Palette SSO with Azure Active Directory' +title: 'Palette SSO with Azure Active Directory' +description: 'Learn how to enable SSO in Palette with Azure Active Directory' +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +hiddenFromNav: false +tags: ["user-management", "saml-sso"] +--- + +## Azure Active Directory and OIDC-Based Setup + +After configuration, your organization can integrate Microsoft Azure Active Directory to authenticate access to Spectro Cloud Palette. + +## Prerequisites + +- Microsoft Azure Active Directory with appropriate permissions to create and modify users, groups, Enterprise Applications (SAML) or App Registrations (OIDC).


+- Access to Palette - Request access for a [Free Trial](../../getting-started/palette-freemium.md).


+- Appropriate rights and [enabled token IDs](https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-protocols-oidc#enable-id-tokens) in Azure.


+- [kubelogin](https://github.com/int128/kubelogin) - This is a `kubectl` plugin for Kubernetes OpenID Connect (OIDC) authentication, also known as `kubectl` oidc-login. + +
+ +
Kubelogin Architecture
+ +
+ + ![kubelogin](https://github.com/int128/kubelogin/raw/master/docs/credential-plugin-diagram.svg "Credential Plugin Diagram from kubelogin") + +
+ +## Steps for OIDC Integration in Microsoft Azure Active Directory + +From within Microsoft Azure AD, log in and find the Azure Active Directory service page. The following two libraries contain the necessary parameters to configure Palette. + +
+ +1. **App registrations** - You will use Azure AD App registrations to configure OIDC SSO with Spectro Cloud Palette.


+ +2. **Enterprise applications** - You will use Azure AD Enterprise registrations to configure SAML SSO with Spectro Cloud Palette.


+ +![enterprise-app-registration](/oidc-azure-images/enterprise-app-registration.png) +

+ +## Integrating OIDC SSO for authenticating access to Kubernetes clusters using Microsoft Azure Active Directory + +This section describes how to enable Azure AD SSO authentication to access a Kubernetes cluster. + +1. From the sidebar menu, select **Tenant Settings** and ensure the **Tenant Admin** from dropdown is selected.


+2. Go to **Profiles** from within Tenant Admin or a Project and select an existing Cluster Profile. Alternatively, if a Cluster Profile does not exist, create a new Cluster Profile with a CNCF Kubernetes distribution. Once you select a profile, you will see the Infrastructure layers in the picture.


+3. Choose the **Kubernetes** layer and select the **Pack Values** to modify.


+4. The Pack Version Settings are exposed with the appropriate privileges (Tenant Admin). Notate the following **Variable** within the pack settings.


+ +## Configuring the Application OpenID Configuration in the Cluster + +1. Go to the **Kubeadminconfig**:**apiServer**:**extraArgs** section of the pack layer.


+ + - **oidc-groups-claim** - "Groups"


+ - **oidc-username-claim** - "Email"


+ - **oidc-issuer-url** - "Issuer's URL"


+ - **oidc-client-id** - "Client ID"


+ + ![kubeadminconfig](/oidc-azure-images/kubeadmconfig.png) + +


+ +2. Next, find the **clientConfig** section and modify the following parameters:


+ + - **oidc-issuer-url** - This is the provider URL which allows the Palette to discover public signing keys.


+ - **oid-client-id** - The client ID is found under the Application Registration/Enterprise Application.


+ - **oidc-client-secret** - The secret provided by Azure AD.


+ - **oidc-extra-scope** - The scope tags.


+ +![oidc](/oidc-azure-images/client-config.png) +


+ +## Binding the Cluster Admin Role AD to Cluster Admin via RBAC + +Configure the Role Based Access Control Pack (RBAC).


+ +### Adding an RBAC Pack + +1. Under **Tenant Admin**, create an **RBAC Cluster** profile.


+2. Go to **Cluster Profile** > +**Add Cluster Profile** and complete the Basic Information.


+3. Enter the **Name**, **Version**, and **Description** (Optional) and click **Next**.


+4. Under **Type**, select **+Add-on New Pack**.


+5. Select **Authentication** as the Pack Type.


+6. From the **Registry** dropdown, click **Public Repo**.


+7. Choose **Spectro RBAC** as the Pack Name.


+8. Select the Pack Version.


+9. Click the **spectro-rbac 1.0.0** Pack Values to edit the pack layer settings.

+ **Note**: This is where you will edit the role settings.


+10. Click the **Confirm & Create** button.


+ +### Editing the RBAC Cluster Profile + +1. From Palette, go to **Profiles** and choose the **RBAC** cluster profile.


+2. Click the layer image and specify the ClusterRoleBindings.


+3. Go to the **clusterRoleBindings**:**role** section and type **cluster-admin**.


+4. Change the settings to your requirements and specific groups.


+ +For Azure AD integration with RBAC, edit your RBAC pack value to below. Or, copy and paste the entire block to your RBAC pack and modify you inputs where appropriate: + +```yml +pack: + spectrocloud.com/install-priority: "0" +charts: + spectro-rbac: + # Specify one or more ClusterRoleBinding + # Note that the _name_ attribute is optional + clusterRoleBindings: + - role: cluster-admin + name: bind-cluster-admin-role-to-cluster-admin + subjects: + #- type: User + #name: user5 + - type: Group + # For "name", input the Azure AD Group ID name and add a comment on what the Azure AD displayname is that corresponds to the Azure AD Group Name + # Example: Azure AD Group Object Id "70d19fd6-####-####-####-##c6c915e301" is tied to the Azure AD Security Group with the display name of "cluster-admin-role". + # name: "AZURE AD GROUP ID NAME" + name: "INSERT AZURE AD GROUP ID For Cluster Admins" + - role: admin + name: bind-admin-role-to-admin + subjects: + #- type: User + #name: user5 + - type: Group + # For "name", input the Azure AD Group ID name and add a comment on what the Azure AD displayname is that corresponds to the Azure AD Group Name + # Example: Azure AD Group Object Id "064f2e40-####-####-####-##b9f7927976" is tied to the Azure AD Security Group with the display name of "admin-role". + # name: "AZURE AD GROUP ID NAME" + name: "INSERT AZURE AD GROUP ID For Admins" + - role: view + name: bind-view-role-to-view + subjects: + #- type: User + #name: user6 + - type: Group + # For "name", input the Azure AD Group ID name and add a comment on what the Azure AD displayname is that corresponds to the Azure AD Group Name + # Example: Azure AD Group Object Id "732edc96--####-####-####-##851dee3380" is tied to the Azure AD Security Group with the display name of "view-role". + # name: "AZURE AD GROUP ID NAME" + name: "INSERT AZURE AD GROUP ID For Viewers" + #- type: ServiceAccount + #name: group6 + #namespace: foo + - role: edit + name: bind-edit-role-to-edit + subjects: + #- type: User + #name: user6 + - type: Group + # For "name", input the Azure AD Group ID name and add a comment on what the Azure AD displayname is that corresponds to the Azure AD Group Name + # Example: Azure AD Group Object Id "21b55c08-6-####-####-####-##a3e2245ad7" is tied to the Azure AD Security Group with the display name of "edit-role". + # name: "AZURE AD GROUP ID NAME" + name: "INSERT AZURE AD GROUP ID For Edit" + #- type: ServiceAccount + #name: group6 + #namespace: foo + #namespaces: + # Specify one or more RoleBindings + #- namespace: team1 + #createNamespace: true + #roleBindings: + #- role: admin + #name: special-override-name-admin-role + #kind: ClusterRole + #subjects: + #- type: User + #name: user3 + #- type: Group + #name: team1namespaceadmin + #- role: view + #kind: ClusterRole + #subjects: + #- type: User + #name: user4 + #- type: Group + #name: team1namespaceview + #- namespace: team2 + #createNamespace: true + #roleBindings: + #- role: admin + #name: special + #kind: ClusterRole + #subjects: + #- type: User + #name: user1 + #- type: Group + #name: group1 +``` + +**Example**: + +**Azure AD Group Object ID** "70\*\*\*\*\*\*\-355a-453b-aadf-\*\*\*\*\*\*\*\*\*301" is linked to the **Azure AD Security Group** with the display name of **cluster-admin-role**. + +**name**: "AZURE AD GROUP ID NAME" + +![oidc](/oidc-azure-images/client-config.png) + +## Results + +You have now established SSO authentication integrating Microsoft Azure AD and Spectro Cloud Palette using OIDC. + +## References + +[Microsoft Active Directory](https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-protocols-oidc)
+[Credential Plugin Diagram](https://github.com/int128/kubelogin/raw/master/docs/credential-plugin-diagram.svg)
+[kubelogin](https://github.com/int128/kubelogin)
\ No newline at end of file diff --git a/docs/docs-content/user-management/saml-sso/palette-sso-with-adfs.md b/docs/docs-content/user-management/saml-sso/palette-sso-with-adfs.md new file mode 100644 index 0000000000..06430f9187 --- /dev/null +++ b/docs/docs-content/user-management/saml-sso/palette-sso-with-adfs.md @@ -0,0 +1,298 @@ +--- +sidebar_label: 'Palette SSO with Microsoft AD FS' +title: 'Set up Palette SSO with Microsoft Active Directory Federation Service (AD FS)' +description: 'Set up Palette SSO with Microsoft Active Directory Federation Service (AD FS)' +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +hiddenFromNav: false +tags: ["user-management", "saml-sso"] +--- + + + + + +## Enable SSO with Microsoft Active Directory Federation Service (AD FS) + +Single sign-on (SSO) is an authentication method that enables users to log in to multiple applications and websites with one set of credentials. SSO works upon a trust relationship established and maintained between the service provider (SP) and an identity provider (IdP) using certificates. Palette supports SSO based on either SAML or OIDC. + +The following steps will guide you to enable Palette SSO with [Microsoft AD FS](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/development/ad-fs-openid-connect-oauth-concepts) based on OIDC. + +
+ +:::caution + + You cannot use Microsoft AD FS for SAML-based SSO with Palette. Microsoft AD FS does not support the Canonical XML 1.1 standard that Palette employs. You can only use the OIDC-based approach for Microsoft AD FS. + +::: + + +## Prerequisites +In order to setup OIDC-based SSO with Microsoft AD FS, you need to use one of the following versions: +* Microsoft AD FS 2022 (comes with Windows Server 2022) +* Microsoft AD FS 2019 (comes with Windows Server 2019) +* Microsoft AD FS 2016 (comes with Windows Server 2016) + +If you need to be able to your AD FS service from outside your corporate network, you will also need an AD FS Reverse Proxy. An official Microsoft tutorial for setting up an AD FS Reverse Proxy is not available, but you can use this blog post from [Matrixpost](https://blog.matrixpost.net/set-up-active-directory-federation-services-ad-fs-5-0-adfs-reverse-proxy-part-2/) for additional guidance. + + +## Enablement +## Create the AD FS Application Group for Palette + +1. Open the AD FS Management console on your Windows Server and add a new Application Group for Palette: + +
+ +![Add AD FS Application Group](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-application-group.png) + +
+ +2. Provide a suitable name for the application group and select **Server Application** from the list of templates. Then click **Next**: + +
+ +![Name Application Group](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-application-group.png) + +
+ +3. The next screen displays the **Client Identifier** for this Application Group: + +![Get Client Identifier](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_get-client-identifier.png) + +
+ +4. Copy the client identifier value and save it somewhere. You will need to enter this value into the Palette SSO configuration later. + + +5. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **SSO** and click **OIDC**. Click the button next to **Callback URL** to copy this value to the clipboard: + +![Copy Callback URL](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-callback-url.png) + +
+ +6. Switch back to your AD FS Management console and paste the copied value into the **Redirect URI** field, then click **Add** to add it to the list: + +![Paste Redirect URI](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-redirect-uri.png) + +
+ +7. Switch back to Palette in the web browser and click the button next to **Logout URL** to copy this value to the clipboard: + +![Copy Logout URL](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-logout-url.png) + +
+ +8. Switch back to your AD FS Management console and paste the copied value into the **Redirect URI** field, then click **Add** to add it to the list: + +![Paste Logout URI](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_paste-logout-uri.png) + +
+ +9. These two redirect URIs are required for SSO to work with Palette. You can also add additional redirect URIs. The URIs in the table below are useful when you want to use AD FS for OIDC authentication into your Kubernetes clusters. + +| URL | Type of Access | +| --- | --- | +| `http://localhost:8000` | Using kubectl with the kube-login plugin from a workstation | +| `https://console.spectrocloud.com/v1/shelly/oidc/callback` | Using the web-based kubectl console | +| `https:///oauth/callback` | Using OIDC authentication into Kubernetes Dashboard | + +10. When you have completed entering redirect URIs, click **Next**. On the next page of the wizard, select **Generate a shared secret** and click **Copy to clipboard** to copy the secret value and save it somewhere. You will need to enter this value into the Palette SSO configuration later: + +![Copy Shared Secret](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_copy-shared-secret.png) + +
+ +11. Click **Next** and on the Summary screen, click **Next** again to complete the wizard. You need to add another application to the application group. Select the newly created application group and click **Properties**: + +![Open Application Group](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_open-oidc-app.png) + +
+ +12. In the Properties screen, click **Add application...**. In the wizard that opens, select **Web API** and click **Next**: + +![Add Web API application](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-web-api.png) + +
+ +13. In the **Identifier** field, add the following entries: +* The **Client Identifier** value you saved when creating the application group. +* The base URL of your Palette tenant. This is equal to the URL shown by your browser when logged into Palette minus the path. Example `https://johndoe-spectrocloud.console.spectrocloud.com`. + +
+ +![Find Base URL](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_base-url.png) + +
+ +![Add Web API Identifiers](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-identifiers.png) + +
+ +14. Click **Next** when done. On the next screen, select a suitable policy for who can use this SSO and under what circumstances. If you're not sure which policy to choose, select **Permit everyone**, then click **Next**: + +
+ +![Select Access Control Policy](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_select-policy.png) + +
+ +15. On the next screen, by default only the **openid** scope is ticked. However, to include the user's groups in the OIDC claim, you need to also enable the **allatclaims** scope. If your AD FS server does not yet have an **allatclaims** scope in the list, click **New scope...** and type `allatclaims` in the Name field, then click **OK** to add it. Ensure both scopes are enabled and then click **Next**: + +![Enable Permitted Scopes](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_enable-scopes.png) + + +16. On the Summary screen, click **Next** to finish the wizard. You need to set the **Issuance Transform Rules** for the Web API application. Open the application again by double-clicking on the Web API entry or clicking **Edit**. + +![Re-open Web API Application](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_reopen-webapi-app.png) + +
+ +17. Navigate to the **Issuance Transform Rules** tab and click **Add Rule**. + +![Add Issuance Transform Rule 1](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-1.png) + +
+ +18. Select the **Send LDAP Attributes as Claims** template and click **Next**: + +![Send LDAP As Claims Rule](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_ldap-as-claims.png) + +
+ +19. Name the rule `OpenID - LDAP Attribute Claims`. Select **Active Directory** as the Attribute store and add the following LDAP mappings: +* **E-Mail-Addresses** --> `email` +* **Given Name** --> `given_name` +* **Surname** --> `family_name` + +You can select the items on the left from the list. You will need to type the items on the right manually. Ensure you use all lowercase characters for the values on the right: + +![Set LDAP Claims](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-ldap-claims.png) + +
+ +20. Click **Finish** to add the rule. Now click on **Add Rule...** again to add the second rule: + +![Add Issuance Transform Rule 2](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-transform-rule-2.png) + +
+ +21. Select the **Send Group Membership as Claims** template and click **Next**: + +![Send Groups As Claims Rule](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_groups-as-claims.png) + +
+ +22. In the next screen, define the group claim as desired. In the following example, a group in Active Directory is called `SpectroTeam - Admins`. The desired behavior is for anyone that is a member of that group, to be issued a `groups` claim with the value `Admins`. In Palette this user will automatically be mapped to a group with the same name, `Admins`. You can assign RBAC permissions to that group in Palette to give it the desired access. + +![Set Group Claim](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_set-group-claim.png) + +
+ +23. Click **Finish** to add the rule. Click **OK** to save the changes to the Web API rule and click **OK** again to save the changes to the application group. + +24. Take note of your AD FS identifier, you will need this for Palette in the next step. Typically this is your AD FS name plus `/adfs`. You can also take the Federation Service identifier and remove `/services/trust` from that URL: + +![Note AD FS Name](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_note-adfs-name.png) + +
+ +## Enable OIDC SSO in Palette + +25. Open a web browser and navigate to your [Palette](https://console.spectrocloud.com) subscription. + +Navigate to **Tenant Settings** --> **SSO** and click on **OIDC**. Enter the following information. + +| Parameter | Value | +|-------------------|--------------------------------------------------------------------| +| Issuer URL | Your AD FS issuer URL. Typically this is your AD FS name plus /adfs.| +| Client ID | The client identifier that you saved in step **4**. | +| Client Secret | The shared secret that you generated in step **8**. | +| Default Teams | Leave blank if you don't want users without group claims to be assigned to a default group. If you do, enter the desired default group name. If you use this option, be careful with how much access you give to the group. | +| Scopes | Set this to `openid` and `allatclaims`. | +| Email | Keep `email` as the default. | +| First Name | Keep `given_name` as the default. | +| Last Name | Keep `family_name` as the default. | +| Spectro Team | Keep `groups` as the default. | + +![Enable Palette OIDC SSO](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_configure-palette-oidc.png) + + +26. When all the information has been entered, click **Enable** to enable SSO. You will receive a message stating **OIDC configured successfully**. + + +## Create Teams in Palette + +The remaining step is to create teams in Palette for the group claims that you configured in AD FS, and give them the appropriate permissions. For this example, you will create the `Admins` team and give it **Tenant Admin** permissions. You can repeat this for any other team that you configured with group claims. + +27. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **Users & Teams** --> **Teams** tab, and click **+ Create Team**. + +![Create Palette Team](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_create-team.png) + +
+ +28. Specify `Admins` in the **Team name** field. You don't need to set any members now, as this will happen automatically from the SSO. Click **Confirm** to create the team. + +![Name Palette Team](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_name-team.png) + +
+ +29. The list of teams displays again. Select the newly created **Admins** team to review its details. To give this team administrative access to the entire tenant and all the projects in it, assign the **Tenant Admin** role. Select **Tenant Roles** and click **+ Add Tenant Role**: + +![Palette Tenant Roles](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_tenant-roles.png) + +
+ +30. Click on **Tenant Admin** to enable the role. Click **Confirm** to add the role. + +![Add Tenant Role](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_add-tenant-role.png) + +
+ +You will receive a message stating **Roles have been updated**. Repeat this procedure for any other teams, taking care to ensure they are given the appropriate permissions. + +31. Click the **X** next to **Team Details** in the top left corner to exit this screen. + +You have now successfully configured Palette SSO based on OIDC with Microsoft AD FS. + + +## Validate + +1. Log in to Palette through SSO as a user that is a member of the `SpectroTeam - Admins` group in Active Directory to verify that users are automatically added to the `Admins` group in Palette. + +If you're still logged into Palette with a non-SSO user, log out by selecting **Logout** in the **User Menu** at top right. + +![User Logout](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_user-logout.png) + +
+ + +2. The Palette login screen now displays a **Sign in** button and no longer presents a username and password field. Below the **Sign In** button, there is an **SSO issues? --> Use your password** link. This link can be used to bypass SSO and log in with a local Palette account in case there is an issue with SSO and you need to access Palette without SSO. + +Click on the **Sign in** button to log in via SSO. + +![User SSO Login](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_palette-login.png) + +
+ +3. If this is the first time you are logging in with SSO, you will be redirected to the Microsoft AD FS login page. Depending on your organization's SSO settings, this could be a simple login form or require MFA (Multi-Factor Authentication). + +Make sure you log in as a user that is a member of the `SpectroTeam - Admins` group in Active Directory. Once authenticated, you will automatically be redirected back to Palette and logged into Palette as that user. + +4. You are now automatically added to the `Admins` team in Palette. To verify, navigate to the left **Main Menu**, select **Tenant Settings** --> **Users & Teams** --> **Teams** tab. Click the **Admins** team and view the team members section. + +![Palette Team Members](/palette-sso-with-adfs-images/how-to_palette-sso-with-adfs_team-members.png) + + +The user you logged in as has automatically been added to this team. + + +## Resources + +- [Microsoft AD FS](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/development/ad-fs-openid-connect-oauth-concepts) + +- [Microsoft AD FS Reverse Proxy](https://blog.matrixpost.net/set-up-active-directory-federation-services-ad-fs-5-0-adfs-reverse-proxy-part-2/) + +- [Palette User Management](../user-management.md) + +- [Palette SSO](./saml-sso.md) diff --git a/docs/docs-content/user-management/saml-sso/palette-sso-with-okta.md b/docs/docs-content/user-management/saml-sso/palette-sso-with-okta.md new file mode 100644 index 0000000000..9d98c69cd9 --- /dev/null +++ b/docs/docs-content/user-management/saml-sso/palette-sso-with-okta.md @@ -0,0 +1,336 @@ +--- +sidebar_label: 'Palette SSO with Okta' +title: 'Set up Palette SSO with Okta' +description: 'Set up Palette SSO with Okta' +icon: "" +hide_table_of_contents: false +sidebar_position: 30 +hiddenFromNav: false +tags: ["user-management", "saml-sso"] +--- + + + + + +# Enable SSO with Okta + +Single Sign-On (SSO) is an authentication method that enables users to log in to multiple applications and websites with one set of credentials. SSO uses certificates to establish and maintain a trust relationship between the Service Provider (SP) and an Identity Provider (IdP). Palette supports SSO based on either the Security Assertion Markup Language (SAML) or OpenID Connect (OIDC). + +The following steps will guide you on how to enable Palette SSO with [Okta Workforce Identity Cloud](https://www.okta.com/products/single-sign-on/) based on OIDC. + + +## Prerequisites + +- You need to have either a free or paid subscription with Okta. Okta provides free [developer subscriptions](https://developer.okta.com/signup/) for testing purposes. + + +- If you want to use the same Okta application for OIDC-based SSO into your Kubernetes cluster itself, you need to install [kubelogin](https://github.com/int128/kubelogin) on your local workstation to handle retrieval of access tokens for your cluster. + + +## Enablement +## Create the Okta Application + +1. Log in to your Okta Admin console and navigate to **Applications** --> **Applications**. Click the **Create App Integration** button. + +
+ + :::info + + Your Okta login URL has the following format, + `https://{your-okta-account-id}-admin.okta.com/admin/getting-started`. + Replace `{your-okta-account-id}` with your Okta account ID. + + ::: + + +2. In the screen that opens, select **OIDC - OpenID Connect**` for the sign-in method, then select **Web Application** for the application type. Then click **Next**. + + +3. The following screen allows you to configure the new Web App Integration. On the **App integration name** field, change the name from `My Web App` to `Spectro Cloud Palette OIDC`. If desired, you can also upload a logo for the application. Leave the **Grant type** to its default value - **Authorization Code**. + +
+ + ![Configure General Settings](/oidc-okta-images/oidc-okta_okta-general-settings.png) + +
+ + +4. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **SSO** and click **OIDC**. Click the button next to **Callback URL** to copy the value to the clipboard. + +
+ + ![Copy Callback URL](/oidc-okta-images/oidc-okta_copy-callback-url.png) + +
+ +5. Switch back to your Okta Admin console and paste the copied value into the **Sign-in redirect URIs** field, replacing the existing value: + +
+ + ![Paste Redirect URI](/oidc-okta-images/oidc-okta_paste-redirect-uri.png) + +
+ +6. Switch back to Palette in the web browser and click the button next to **Logout URL** to copy the value to the clipboard. + +
+ + ![Copy Logout URL](/oidc-okta-images/oidc-okta_copy-logout-url.png) + +
+ +7. Switch back to your Okta Admin console and paste the copied value into the **Redirect URI** field, then click **Add** to add it to the list: + +
+ + ![Paste Logout URI](/oidc-okta-images/oidc-okta_paste-logout-uri.png) + +
+ +8. These two redirect URIs are required for SSO to work with Palette. You can also add additional redirect URIs. The URIs in the table below are useful when you want to use Okta for OIDC authentication into your Kubernetes clusters. + +
+ + | URL | Type of Access | + | --- | --- | + | `http://localhost:8000` | Using kubectl with the kube-login plugin from a workstation. | + | `https://console.spectrocloud.com/v1/shelly/oidc/callback` | Using the web-based kubectl console. | + | `https:///oauth/callback` | Using OIDC authentication into Kubernetes Dashboard. | + +
+ +9. When you have completed entering redirect URIs, scroll down to the **Assignments** section and section and select **Allow everyone in your organization to access**. Leave the **Enable immediate access with Federation Broker Mode** option enabled and click **Save**. + +
+ + ![Configure Assignments](/oidc-okta-images/oidc-okta_assignments.png) + +
+ +10. You have now created the Okta Application! Next, you need to retrieve the Client ID and Client Secret information, which you will use in the following steps. You should have landed on the **General** tab of your Okta Application. Click the **Copy to clipboard** button next to the **Client ID** to copy the secret value and save it somewhere. You will need this value for later. + +
+ + ![Copy Client ID](/oidc-okta-images/oidc-okta_copy-client-id.png) + +
+ +11. Click the **Copy to clipboard** button next to the **Client Secret** to copy the secret value and save it. You will need this value for a later step. + +
+ + ![Copy Shared Secret](/oidc-okta-images/oidc-okta_copy-shared-secret.png) + +
+ +## Create an Okta Authorization Server + +To ensure Okta issues OIDC tokens with the correct claims, you must create a custom Authorization Server. A custom Authorization Server is required to customize the authorization tokens issued by Okta so that they contain the necessary OIDC claims required by Palette and Kubernetes. + +
+ +12. Navigate to **Security** --> **API** and on the **Authorization Servers** tab and click **Add Authorization Server**. + +
+ + ![Add Authorization Server](/oidc-okta-images/oidc-okta_add-authz-server.png) + +
+ +13. Enter a name for the server, for example `Palette OIDC`. For the **Audience** field, enter the client identifier that you saved in step **10**. Optionally provide a description. Then click **Save**. + +
+ + ![Name Authorization Server](/oidc-okta-images/oidc-okta_name-authz-server.png) + +
+ +14. Navigate to the **Claims** tab and click **Add Claim**. + +
+ + ![Add Claims](/oidc-okta-images/oidc-okta_add-claims.png) + + +15. Enter the required information from the following tables below and click **Create**. Use this flow to create three claims in total. First, create two claims for the user information. + +
+ + | Claim Name | Include in token type | Value Type | Value | Disable claim | Include In | + |------------|-----------------------|------------|-------|---------------|------------| + | u_first_name | ID Token (Always) | Expression | `user.firstName` | Unchecked | Any scope | + | u_last_name | ID Token (Always) | Expression | `user.lastName` | Unchecked | Any scope | + + +16. Next, create a claim for group membership. The example below will include the names of any groups that the Okta user is a member of, that start with `palette-`, in the `groups` claim of the ticket. For Palette SSO, Palette will make the user a member of Teams in Palette that have the identical name. + +
+ + | Claim Name | Include in token type | Value Type | Filter | Disable claim | Include In | + |------------|-----------------------|------------|-------|---------------|------------| + | groups | ID Token (Always) | Groups | Starts with: `palette-` | Unchecked | Any scope | + +
+ + ![Claims Result](/oidc-okta-images/oidc-okta_claims-result.png) + +
+ +17. Click **<-- Back to Authorization Servers** at the top of the page to navigate back to the list of all servers. The authorization server you created is displayed in the list. Select the **Issuer URI** shown and copy it to the clipboard. Save this value as you will use it in a later step. + +
+ + ![Get Issuer URI](/oidc-okta-images/oidc-okta_get-issuer-uri.png) + +
+ +18. Navigate to the **Access Policies** tab and click **Add Policy**. + +
+ + ![Add Access Policy](/oidc-okta-images/oidc-okta_add-access-policy.png) + +
+ +19. Set the **Name** and **Description** fields to `Palette`, then change the **Assign to** option to the Okta Application you created in step three -`Spectro Cloud Palette OIDC`. Type in the first few characters of the application name and wait for a search result to come up that you can click on. + +
+ + ![Name Access Policy](/oidc-okta-images/oidc-okta_name-access-policy.png) + +
+ +20. Click the **Add rule** button to add a rule to this Access Policy: + +
+ + ![Add Policy Rule](/oidc-okta-images/oidc-okta_add-policy-rule.png) + +
+ +21. Set the **Rule Name** to `AuthCode`. Then deselect all Grant types but one, only leaving **Authorization Code** selected. Then click **Create Rule**. + +
+ + ![Configure Policy Rule](/oidc-okta-images/oidc-okta_configure-policy-rule.png) + +
+ +You have now completed all configuration steps in Okta. +
+ +## Enable OIDC SSO in Palette + +22. Open a web browser and navigate to your [Palette](https://console.spectrocloud.com) subscription. + +Navigate to **Tenant Settings** --> **SSO** and click on **OIDC**. Enter the following information. + +| Parameter | Value | +|-------------------|--------------------------------------------------------------------| +| Issuer URL | The Issuer URI that you saved in step **15**.| +| Client ID | The client identifier that you saved in step **10**. | +| Client Secret | The shared secret that you generated in step **11**. | +| Default Teams | Leave blank if you don't want users without group claims to be assigned to a default group. If you do, enter the desired default group name. If you use this option, be careful with how much access you assign to the group. | +| Scopes | Keep `openid`, `profile` and `email` as the default. | +| Email | Keep `email` as the default. | +| First Name | Set this to `u_first_name`. | +| Last Name | Set this to `u_last_name`. | +| Spectro Team | Keep `groups` as the default. | + +
+ + ![Enable Palette OIDC SSO](/oidc-okta-images/oidc-okta_configure-palette-oidc.png) + +
+ +23. When all the information has been entered, click **Enable** to activate SSO. You will receive a message stating **OIDC configured successfully**. + + +## Create Teams in Palette + +The remaining step is to create teams in Palette for the group that you allowed to be passed in the OIDC ticket in Okta, and give them the appropriate permissions. For this example, you will create the `palette-tenant-admins` team and give it **Tenant Admin** permissions. You can repeat this for any other team that you have a matching Okta group for. + +24. Open a web browser and navigate to your Palette subscription. Navigate to **Tenant Settings** --> **Users & Teams** --> **Teams** tab, and click **+ Create Team**. + +
+ + ![Create Palette Team](/oidc-okta-images/oidc-okta_create-team.png) + +
+ +25. Specify `palette-tenant-admins` in the **Team name** field. You don't need to set any members now, as this will happen automatically from the SSO. Click **Confirm** to create the team. + +
+ + ![Name Palette Team](/oidc-okta-images/oidc-okta_name-team.png) + +
+ +26. The list of teams displays again. Select the newly created **palette-tenant-admins** team to review its details. To give this team administrative access to the entire tenant and all the projects in it, assign the **Tenant Admin** role. Select **Tenant Roles** and click **+ Add Tenant Role**: + +
+ + ![Palette Tenant Roles](/oidc-okta-images/oidc-okta_tenant-roles.png) + +
+ +27. Click on **Tenant Admin** to enable the role. Click **Confirm** to add the role. + +
+ + ![Add Tenant Role](/oidc-okta-images/oidc-okta_add-tenant-role.png) + +
+ +You will receive a message stating **Roles have been updated**. Repeat this procedure for any other teams while ensuring they are given the appropriate access permissions. + +28. Click the **X** next to **Team Details** in the top left corner to exit this screen. + +You have now successfully configured Palette SSO based on OIDC with Okta. + + +## Validate + +1. Log in to Palette through SSO as a user that is a member of the `palette-tenant-admins` group in Okta to verify that users are automatically added to the `palette-tenant-admins` group in Palette. If you're still logged into Palette with a non-SSO user, log out by selecting **Logout** in the **User Drop-down Menu** at the top right. + +
+ + ![User Logout](/oidc-okta-images/oidc-okta_user-logout.png) + +
+ + +2. The Palette login screen now displays a **Sign in** button and no longer presents a username and password field. Below the **Sign In** button, there is an **SSO issues? --> Use your password** link. This link can be used to bypass SSO and log in with a local Palette account in case there is an issue with SSO and you need to access Palette without SSO. Click on the **Sign in** button to log in via SSO. + +
+ + ![User SSO Login](/oidc-okta-images/oidc-okta_palette-login.png) + +
+ +3. If this is the first time you are logging in with SSO, you will be redirected to the Okta login page. Depending on your organization's SSO settings, this could be a simple login form or require MFA (Multi-Factor Authentication). + +
+ + :::info + + Make sure you log in as a user that is a member of the `palette-tenant-admins` group in Okta. Once authenticated, you will automatically be redirected back to Palette and logged into Palette as that user. + + ::: + +
+ +4. You are now automatically added to the `palette-tenant-admins` team in Palette. To verify, navigate to the left **Main Menu**, select **Tenant Settings** --> **Users & Teams** --> **Teams** tab. Click the **palette-tenant-admins** team and view the team members section. + + + +## Resources + +- [Okta Workforce Identity Cloud](https://www.okta.com/products/single-sign-on/) + + +- [Palette User Management](../user-management.md) + + +- [Palette SSO](./saml-sso.md) diff --git a/docs/docs-content/user-management/saml-sso/saml-sso.md b/docs/docs-content/user-management/saml-sso/saml-sso.md new file mode 100644 index 0000000000..2400d096f8 --- /dev/null +++ b/docs/docs-content/user-management/saml-sso/saml-sso.md @@ -0,0 +1,35 @@ +--- +sidebar_label: "SAML and SSO Setup" +title: "SAML and SSO Setup" +description: "Detailed instructions on creating Single Sign-on to log in to Palette using SAML 2.0" +icon: "" +hide_table_of_contents: false +tags: ["user-management", "saml-sso"] +--- + + + +Palette supports Single Sign-On (SSO) with a variety of Identity Providers (IDP). You can enable SSO in Palette by using the following protocols for authentication and authorization. + +
+ + +- Security Assertion Markup Language (SAML) - SAML is a standalone protocol that requires a centralized identity provider (IDP) to manage user identities and credentials. SAML supports SSO and is commonly used for enterprise applications. + + +- OpenID Connect (OIDC) - OIDC more modern protocol designed for web and mobile applications. OIDC is built on top of [OAuth 2.0](https://www.rfc-editor.org/rfc/rfc6749), a widely used authorization framework. OIDC supports distributed identity providers and supports social login providers such as Google or GitHub. + +Enable SSO by following our [Enable SSO in Palette](enable-saml.md) guide. + +## Resources + +- [Enable SSO in Palette](enable-saml.md) + + +- [Palette SSO with Azure Active Directory](palette-sso-azure-ad.md) + + +- [Enable SSO with Microsoft Active Directory Federation Service (AD FS)](palette-sso-with-adfs.md) + + +- [Palette SSO with Okta](palette-sso-with-okta.md) diff --git a/docs/docs-content/user-management/user-authentication.md b/docs/docs-content/user-management/user-authentication.md new file mode 100644 index 0000000000..c83a6c1449 --- /dev/null +++ b/docs/docs-content/user-management/user-authentication.md @@ -0,0 +1,148 @@ +--- +sidebar_label: "User Authentication" +title: "API Key for API Authentication" +description: "Palette's API key for user authentication for API access " +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["user-management"] +--- + + + +Palette supports three types of user authentication methods. + +* [User Interface (UI)](#ui-authentication) authentication + +* [API Key](#api-key) + +* [Authorization Token](#authorization-token) + +The API key and the authorization token method can be used when interacting with Palette REST APIs for automation and programmatic purposes. + + +## UI Authentication + +You can log into Palette by visiting the Palette at [http://console.spectrocloud.com](https://console.spectrocloud.com). If you are a user of a Palette Enterprise instance, then you should use the URL provided by your Palette system +administrator, such as `example-company.console.spectrocloud.com.` + +## Account Sign Up + +You can sign up for a Palette SaaS account by visiting [Palette](https://console.spectrocloud.com) or an Enterprise Palette account under your organization by using your organization's custom Palette URL. + +When you create an account, you can create a username and password or create the account through a third-party identity provider, GitHub, Google, or other OIDC providers that are enabled for your organization. For Palette SaaS, GitHub and Google are automatically enabled for SSO integration. + +## Sign In Flow + +Starting with Palette 3.2, the user sign-in flow can be different depending on how you created your Palette account. If you created your user with a username and password, then you may be prompted to select the organization you wish to log in to. If you are a member of a single organization, then you will not be prompted for an organization selection. + +If you created an account through SSO and are a member of different organizations, then you must first select the organization name you wish to log in to. Click on the **Sign in to your organization** button for the option to specify the organization name. If you need help remembering the organization name, click on the **Forgot your organization name?** button and provide your email address to receive an email containing your organization name and its login URL. + +
+ +:::info + +If you are a Palette Enterprise user, use the custom Palette URL for an optimized login experience and avoid specifying the organization name. +Ask your Palette system administrator for the custom Palette URL. + +::: + +## API Key + +Palette API can also use API Keys to authenticate requests. This is the method of accessing the API without referring to the actual user. + +## Scope of Palette API Keys + +* Tenant admin can create an API Key for any user within the tenant. +* Users can create API Keys for themselves. + +## Creating an API key as a tenant admin + +* Login to Palette using credential with admin role. +* Go to Tenant Settings and select **API Keys**. +* Click on “Add New API Key” to create a new API key. The following information is required for creating a new API Key: + * API Key Name: The tenant/user-specified custom name for the key. + * Description: An optional description about the key. + * Username: Select the user for whom the key is created from the drop-down. + * Expiration Date: Set an expiry date for the key from the options available. The expiration date can be further customized after the key creation. The various options available for the expiration dates are: + * 7 days + * 30 days + * 60 days + * 90 days + * Custom: Select a custom expiry date from the calendar. +* Confirm the information to complete the wizard. + +### Manage API Keys as a tenant admin + +* Log in to Palette using credential with admin role. +* Go to Tenant Settings and select **API Keys**. +* Detailed status of the keys can be observed from the API overview page. In addition to the key's name, description, and expiration date, the overview page displays the API keys, the user to which each key is assigned, and the status of the key. +* To view all the keys assigned to a particular user, select the user's name at **User Name** on top of the page, below the **Manage API Keys**. +* Each API has a settings menu, click on the **three-dot Menu**, to view the following options: + * Edit: The following information can be edited from the menu, + * API Key name + * Description(optional) + * Expiration Date + * Revoke: Change the status of the key from **active** to **inactive**. + * Re-activate: Change the status of the key from ‘inactive’ to ‘active’ as long as expiration date has not passed. + * Delete: Delete the key. + +## Creating an API key for the logged-in user + +* Log in to Palette +* Select **API Keys** from **User Menu**. +* Click on **Add New API Key** to create a new API key. The following information is required for creating a new API Key: + * API Key Name: The tenant/user-specified custom name for the key. + * Description: An optional description about the key. + * Expiration Date: Set an expiry date for the key from the options available. The expiration date can be further customized after the key creation. The various options available for the expiration dates are: + * 7 days + * 30 days + * 60 days + * 90 days + * Custom: Select a custom expiry date from the calendar. +* Confirm the information to complete the wizard. + +### Manage API Keys for the logged-in user + +* Log in to Palette +* Select **API Keys** from the **User Menu**. +* Detailed status of the key can be observed from the API overview page. In addition to the key's name, description, and expiration date, the overview page displays the API keys belonging to the user, and the status of the keys. +* To view all the keys assigned to a particular user, select the user's name at **User Name** on top of the page, below the “Manage API Keys”. +* Each API has a settings menu, click on the **three-dot Menu**, to view the following options: + * Edit: The following information can be edited from the menu, + * API Key name + * Description(optional) + * Expiration Date + * Revoke: Change the status of the key from **active** to **inactive**. + * Re-activate: Change the status of the key from ‘inactive’ to ‘active’ as long as expiration date has not passed. + * Delete: Delete the key. + +## API Key Usage + +You copy your API key from the Palette dashboard and use it for making REST API calls in one of the following ways: + +* Query Parameter - Pass the key as a query parameter`ApiKey=`. Example: + `v1/spectroclusters?ApiKey=QMOI1ZVKVIoW6LM6uXqSWFPsjmt0juvl` +* Request Header - Pass the API Key as a HTTP request header in the following format: + * Key: ApiKey + * Value: API key copied from the Palette Console. E.g.: QMOI1ZVKVIoW6LM6uXqSWFPsjmt0juvl + +## Authorization Token + +You can use authorization tokens to authenticate requests. + +To obtain an authorization token, use the `v1/auth/authenticate` endpoint with a POST request to authenticate and obtain the authorization token. Provide your API key as a header value or query parameter. The authorization token is valid for 15 minutes. You can refresh the token using the refresh token API. + +API requests using the authroization token must use the HTTP header `Authorization` with token as the value. For example: + +```bash +TOKEN=abcd1234 +``` + +```bash +curl --location --request GET 'https://api.spectrocloud.com/v1/projects/alert' \ +--header 'Authorization: $TOKEN' \ +--header 'Content-Type: application/json' +``` + +To refresh the authorization token, use the `v1/auth/refresh` endpoint with a GET request to refresh the authorization token. diff --git a/docs/docs-content/user-management/user-management.md b/docs/docs-content/user-management/user-management.md new file mode 100644 index 0000000000..61070ee354 --- /dev/null +++ b/docs/docs-content/user-management/user-management.md @@ -0,0 +1,61 @@ +--- +sidebar_label: "User Management" +title: "User Management" +description: "Dive into Palette's user management capabilities and how to manage users' access and setting up controls, integrations, and more." +hide_table_of_contents: false +sidebar_custom_props: + icon: "roles" +tags: ["user-management"] +--- + + + + +# User Management + +This section touches upon the initial login aspects for Tenant Admins and non-admin users and the RBAC setup within Palette. + +## User Login + +For a Tenant admin, the password shall be set upon the initial login. The Tenant admin can add non-admin users. For all users, login can be made available using the following options: + +* Using Palette credentials on the login page. + +* SSO using Identity Providers that use SAML 2.0: + * Azure Active Directory + * Okta + * Keycloak + * OneLogin + * Microsoft ADFS + * Others + +## RBAC + +Palette allows the users that have been added to be allowed or restricted access to resources based on the roles set by the tenant admin. This Role-Based Access Control is explained in detail in the RBAC}>Palette's RBAC design allows granting granular access to resources and its operations [page](palette-rbac/palette-rbac.md#permissions). + +## Roles and Permissions + +The Tenant admin can allow or restrict access of resources to users which can differ as per the scenario. A user can have complete access to a specific project but can be restricted access to other projects in which there is no involvement. An intermediate stage is also possible where read-only access can be provided in some projects. The Roles}>A Role is a collection of permissions. and Permissions}>Permissions are associated with specific actions within the platform. sections in the [RBAC](./palette-rbac/palette-rbac.md) page provide more details on this. + +To add a user to a project: + 1. Sign in as a Tenant admin and navigate to the **Users and Teams** section of the Tenant settings Menu. + + 1. Click on the user that you want to enable access to. + + 1. In the **Role** editor that opens to the side, find the **Project Roles** section and click **Add Role**. + + 1. Select the required **Project** from the dropdown menu and enable the **Roles** as needed. + +## Multi-Organization Support for Users + +Palette is incorporating multi-organization support for its users. With this feature, we provide our users with the flexibility of having a unique email address ID across multiple organizations. Hence, the users can maintain SSO credentials across multiple organizations/tenants. + +The key benefits of this feature are: + +* The use of a single email address ID across multiple organizations. +* Within an organization, maintain a unique email ID. +* In the case of password-based authentication, the same password is applicable across multiple organizations. The change of password, made under a particular organization, is applied across other organizations to maintain a single password across all organizations. +* The password policy stays independent of organizations/tenants. Each tenant retains individual password policy. +* For SSO-based authentication, for each organization/tenant, the individual identity provider client application can be configured. Hence, allowing the configuration of a single SSO with multiple identity providers across multiple tenants/organizations mapping each client app to a tenant. +* However, for self-sign-up, the unique email address ID is enforced across tenants to avoid conflicts. +* In the Palette console, the users can switch between the organizations/tenants using the Organization drop down menu of the login page. diff --git a/docs/docs-content/vertex/_category_.json b/docs/docs-content/vertex/_category_.json new file mode 100644 index 0000000000..aee7c4d2b8 --- /dev/null +++ b/docs/docs-content/vertex/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 190 +} diff --git a/docs/docs-content/vertex/fips/_category_.json b/docs/docs-content/vertex/fips/_category_.json new file mode 100644 index 0000000000..3fca6fb9f9 --- /dev/null +++ b/docs/docs-content/vertex/fips/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 0 +} diff --git a/docs/docs-content/vertex/fips/fips-compliant-components.md b/docs/docs-content/vertex/fips/fips-compliant-components.md new file mode 100644 index 0000000000..89f7fda249 --- /dev/null +++ b/docs/docs-content/vertex/fips/fips-compliant-components.md @@ -0,0 +1,164 @@ +--- +sidebar_label: "FIPS-Compliant Components" +title: "FIPS-Compliant Components" +description: "Learn about FIPS-Component Components supported by Palette VerteX." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["vertex", "fips", "compliance", "pxk", "pxke"] +--- + +Federal Information Processing Standards (FIPS) is a series of standards developed by the National Institute of Standards and Technology (NIST) in the United States for computer security and encryption algorithms. + +FIPS 140-2 is a specific standard for security requirements for cryptographic modules. It outlines the criteria these modules must meet to ensure their security and integrity. + + +## FIPS Support in Clusters + +Palette VerteX provides FIPS-compliant infrastructure components in Kubernetes clusters it deploys. These components are: + +
+ +- Operating System (OS) + - Ubuntu Pro + + +- Kubernetes + - Palette eXtended Kubernetes (PXK) + - Palette eXtended Kubernetes - Edge (PXK-E) + + +- Container Network Interface (CNI) + - Calico + + +- Container Storage Interface (CSI) + - vSphere CSI + + +## Management Plane + +All services in the management plane are FIPS compiled with Go using [BoringCrypto libraries](https://pkg.go.dev/crypto/internal/boring) and static linking. Refer to the [Spectro Cloud Cryptographic Module](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4349) resource to learn about our NIST certificate. + +
+ +## FIPS-Compliant Kubernetes + +Our customized version of Kubernetes is FIPS-compliant. Both [Palette eXtended Kubernetes (PXK)](../../integrations/kubernetes.md) and [Palette eXtended Kubernetes-Edge (PXK-E)](../../integrations/kubernetes-edge.md) are compiled with FIPS-compliant compiler and libraries. + +
+ + +:::info + +Refer to the [Palette eXtended Kubernetes (PXK)](../../integrations/kubernetes.md) and [Palette eXtended Kubernetes-Edge (PXK-E)](../../integrations/kubernetes-edge.md) documentation to learn more about the each Kubernetes distribution. + + +::: + +All PXK and PXKE components and supporting open-source components are compiled in their native programming language using language specific FIPS-compliant libraries and static linking. If the component is not available in the form of a FIPS-compliant binary, we compile it with FIPS-compliant compiler and libraries. The following tables list the FIPS-compliant components in PXK and PXK-E: + +
+ + +### Core Kubernetes Components + +| **Component** | **Description** | +| --- | --- | +| API Server | The API server is the central management entity that receives all REST requests for the cluster. | +| Controller Manager | The controller manager is a daemon that embeds the core control loops shipped with Kubernetes. | +| Scheduler | The scheduler is a daemon that finds the best node for a pod, based on the scheduling requirements you specify. | +| Kubelet | The kubelet is the primary *node agent* that is deployed on each node. | +| Kube-proxy | The kube-proxy is a network proxy that runs on each node in your cluster, implementing part of the Kubernetes Service concept. | +| Kubeadm | Kubeadm is a tool built to provide best-practice “fast paths” for creating Kubernetes clusters. | +| Kubectl | Kubectl is a command line interface for issuing commands against Kubernetes clusters. | + + +### Auxiliary Kubernetes Components + +| **Component** | **Description** | +| --- | --- | +| CoreDNS | CoreDNS is a Domain Name System (DNS) server deployed as a cluster DNS service. | +| Etcd | Etcd is a distributed key-value store used as Kubernetes’ backing store for all cluster data. | +| Metrics Server | Metrics Server is a scalable, efficient source of container resource metrics for Kubernetes built-in autoscaling pipelines. | +| Ingress Controller| Nginx is used as the ingress controller. An ingress controller is a piece of software that provides reverse proxy, configurable traffic routing, and Transport Layer Security (TLS) termination for Kubernetes services. | +| Nginx Server| The Nginx server is a web server that can also be used as a reverse proxy, load balancer, mail proxy, and HTTP cache. | +| Nginx Ingress Controller| The Nginx ingress controller uses ConfigMap to store the Nginx configuration. | + + +### Runtime Components + +| **Component** | **Description** | +| --- | --- | +| containerd | Containerd is an industry-standard container runtime with an emphasis on simplicity, robustness, and portability. | +| containerd-shim | Containerd-shim is a shim used by containerd to launch containers. | +| containerd-shim-runc-v1 | Containerd-shim-runc-v1 is a shim used by containerd to launch containers. | +| containerd-shim-runc-v2 | Containerd-shim-runc-v2 is a shim used by containerd to launch containers. | +| ctr| Ctr is a command line interface for containerd. | +| crictl | Crictl is a command line interface for CRI-compatible container runtimes. | +| runc | Runc is a CLI tool for spawning and running containers according to the OCI specification. | + + +### Container Network Interface Components + +| **Component** | **Description** | +| --- | --- | +| Calico | Calico is a Container Network Interface plugin that provides networking and network policy for Kubernetes clusters. | + +### Container Storage Interface Components + +| **Component** | **Description** | +| --- | --- | +| AWS EBS CSI | AWS EBS CSI is a CSI plugin that provides storage for Kubernetes clusters. | +| vSphere CSI | vSphere CSI is a CSI plugin that provides storage for Kubernetes clusters. | +| Longhorn CSI | Longhorn CSI is a CSI plugin that provides storage for Kubernetes clusters. Longhorn is the only supported CSI for PXKE. | + +
+ + +##### AWS EBS CSI Components + +| **Component** | **Description** | +| --- | --- | +| Driver| The driver is a CSI plugin that provides storage for Kubernetes clusters. | +| External Attacher | The external attacher is a CSI plugin that attaches volumes to nodes. | +| External Provisioner | The external provisioner is a CSI plugin that provisions volumes. | +| External Resizer | The external resizer is a CSI plugin that resizes volumes. | +| External Snapshotter | The external snapshotter is a CSI plugin that takes snapshots of volumes. | +| Liveness Probe | The liveness probe is a CSI plugin that checks the health of the driver. | +| Node Driver Registrar | The node driver registrar is a CSI plugin that registers the driver with the kubelet. | + +
+ +##### Longhorn CSI Components + +| **Component** | **Description** | +|---------------------------|--------------| +| Backing image manager | Manages backing images for Longhorn volumes. | +| Attacher | Handles attaching and detaching of volumes to nodes. | +| Provisioner | Manages provisioning and de-provisioning of storage resources. | +| Resizer | Enables resizing of storage volumes. | +| Snapshotter | Manages snapshots of Longhorn volumes. | +| Node driver registrar | Registers the CSI driver with the Kubernetes node. | +| Liveness probe | Monitors health of CSI components. | +| Longhorn engine | Core component that handles read and write operations to the storage backend. | +| Longhorn instance manager | Manages Longhorn engine and replica instances. | +| Longhorn share manager | Manages shared volumes and exposes them via protocols like Network File System (NFS). | +| Longhorn UI | User interface for managing Longhorn components and resources. | +| Longhorn support bundle kit| Collects logs and system information for debugging. | + + + +:::info + +The Longhorn Manager component is partially FIPS-compliant. This component uses utiltities that are not using a FIPS-compliant version of OpenSSL. The following utilities are not FIPS-compliant: + +- openssl +- curl +- nfs-utils +- bind-tools + + +::: + + diff --git a/docs/docs-content/vertex/fips/fips-status-icons.md b/docs/docs-content/vertex/fips/fips-status-icons.md new file mode 100644 index 0000000000..a3f55f01e7 --- /dev/null +++ b/docs/docs-content/vertex/fips/fips-status-icons.md @@ -0,0 +1,47 @@ +--- +sidebar_label: "FIPS Status Icons" +title: "FIPS Status Icons" +description: "Learn how icons can help you identify FIPS compliance when you consume features that are not FIPS compliant." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["vertex", "fips"] +--- + + + +While Palette VerteX brings FIPS 140-2 cryptographic modules to the Palette management platform and deployed clusters, it also provides the capability to consume features that are not FIPS compliant. For example, when the cluster import option is enabled, it allows users to import any type of Kubernetes cluster, including some that are not fully FIPS compliant. +Similarly, when the option to add non-FIPS add-on packs is enabled, users can add packs in cluster profiles that are not FIPS compliant. For more information about these tenant-level settings, refer to [Enable non-FIPS Settings](../system-management/enable-non-fips-settings/enable-non-fips-settings.md). + +To avoid confusion and compliance issues, Palette VerteX displays icons to indicate the FIPS compliance status of clusters, profiles, and packs. + +The table lists icons used to indicate FIPS compliance status. The partial FIPS compliance icon applies only to clusters and profiles because these may contain packs with an *Unknown* or *Not FIPS-compliant* status. + +| **Icon** | **Description** | **Applies to Clusters** | **Applies to Profiles** | **Applies to Packs** | +|---------------|------------|----------------|----------------|----------------| +| ![Full FIPS compliance](/vertex_fips-status-icons_compliant.png) | Full FIPS compliance. All packs in the cluster are FIPS-compliant.| ✅ | ✅ | ✅ | +| ![Partial FIPS compliance](/vertex_fips-status-icons_partial.png) | Partial FIPS compliance. Some packs are FIPS compliant, but there is at least one that is not.| ✅ | ✅ | ❌ | +| ![Not FIPS-compliant](/vertex_fips-status-icons_not-compliant.png) | Not FIPS-compliant. None of the packs in the cluster are FIPS-compliant.| ✅ | ✅ | ✅ | +|![Unknown FIPS state](/vertex_fips-status-icons_unknown.png) | Unknown state of FIPS compliance. This applies to imported clusters that were not deployed by Palette. | ✅ | ✅ | ✅ | + + + +The screenshots below show how Palette VerteX applies FIPS status icons. + +When you create a profile, icons display next to packs. + +![Diagram showing FIPS status icons on profile page.](/vertex_fips-status-icons_icons-on-profile-page.png) + + + +
+ +Icons appear next to each profile layer to indicate FIPS compliance. + +![Diagram showing FIPS-compliant icons in profile stack.](/vertex_fips-status-icons_icons-in-profile-stack.png) + +
+ +In this screenshot, Palette VerteX shows FIPS status for the cluster is partially compliant because one pack in the profile is not FIPS-compliant. + +![Diagram showing FIPS status icons on Cluster Overview page.](/vertex_fips-status-icons_icons-in-cluster-overview.png) \ No newline at end of file diff --git a/docs/docs-content/vertex/fips/fips.md b/docs/docs-content/vertex/fips/fips.md new file mode 100644 index 0000000000..5d6c6e22e1 --- /dev/null +++ b/docs/docs-content/vertex/fips/fips.md @@ -0,0 +1,55 @@ +--- +sidebar_label: "FIPS" +title: "FIPS" +description: "Learn about FIPS compliance in Palette VerteX." +icon: "" +hide_table_of_contents: false +tags: ["vertex", "fips"] +--- + +Palette VerteX is FIPS 140-2 compliant. This means that Palette VerteX uses FIPS 140-2 compliant algorithms and encryption methods. With its additional security scanning capabilities, Palette VerteX is designed to meet the stringent requirements of regulated industries. Palette VerteX operates on FIPS-compliant Ubuntu Pro versions. + + +## Non-FIPS Enablement + +You can deploy non-FIPS-compliant components in your Palette VerteX environment by enabling non-FIPS settings. Refer to the [Enable non-FIPS Settings](../system-management/enable-non-fips-settings/enable-non-fips-settings.md) guide for more information. + + +Something to note when using RKE2 and K3s: + +
+ + +- When we scan the binaries, which we consume directly from Rancher's RKE2 repository, issues are reported for the following components. These components were compiled with a Go compiler that is not FIPS-compliant. + + - container-suseconnect + - container-suseconnect-zypp + - susecloud + +
+ + Since these components are unrelated to Kubernetes and are instead used to access SUSE’s repositories during the Docker build process, RKE2 itself remains fully compliant. + + RKE2 is designated as FIPS-compliant per official Rancher [FIPS 140-2 Enablement](https://docs.rke2.io/security/fips_support) security documentation. Therefore, Palette VerteX designates RKE2 as FIPS-compliant. + + + +- Although K3s is not available as a FIPS-certified distribution, Palette VerteX supports K3s as a Kubernetes distribution for Edge clusters. + +Palette VerteX uses icons to show FIPS compliance status. For information about Palette VerteX status icons, review [FIPS Status Icons](fips-status-icons.md). + + +## Legal Notice + +Spectro Cloud has performed a categorization under FIPS 199 with (client/tenant) for the data types (in accordance with NIST 800-60 Vol. 2 Revision 1) to be stored, processed, and/or transmitted by the Palette Vertex environment. (client/tenant) maintains ownership and responsibility for the data and data types to be ingested by the Palette Vertex SaaS in accordance with the agreed upon Palette Vertex FIPS 199 categorization. + + +## Resources + +- [FIPS Status Icons](fips-status-icons.md) + + +- [FIPS-Compliant Components](fips-compliant-components.md) + + +- [RKE2 FIPS 140-2 Enablement](https://docs.rke2.io/security/fips_support) \ No newline at end of file diff --git a/docs/docs-content/vertex/install-palette-vertex/_category_.json b/docs/docs-content/vertex/install-palette-vertex/_category_.json new file mode 100644 index 0000000000..094470741d --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 10 +} diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/_category_.json b/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/_category_.json new file mode 100644 index 0000000000..094470741d --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 10 +} diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/install-on-kubernetes.md b/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/install-on-kubernetes.md new file mode 100644 index 0000000000..fedbb6de89 --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/install-on-kubernetes.md @@ -0,0 +1,24 @@ +--- +sidebar_label: "Kubernetes" +title: "Kubernetes" +description: "Learn how to install Palette VerteX on Kubernetes." +icon: "" +hide_table_of_contents: false +tags: ["vertex", "kubernetes"] +--- + + +Palette VerteX can be installed on Kubernetes with internet connectivity or an airgap environment. When you install Palette VerteX, a three-node cluster is created. You use a Helm chart our support team provides to install Palette VerteX on Kubernetes. Refer to [Access Palette VerteX](../../vertex.md#access-palette-vertex) for instructions on requesting access to the Helm Chart. + + +To get started with Palette VerteX on Kubernetes, refer to the [Install Instructions](install.md) guide. + +## Resources + +- [Install Instructions](install.md) + + + + + +- [Helm Configuration Reference](vertex-helm-ref.md) diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/install.md b/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/install.md new file mode 100644 index 0000000000..16a20bca20 --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/install.md @@ -0,0 +1,320 @@ +--- +sidebar_label: "Instructions" +title: "Install VerteX" +description: "Learn how to install Palette VerteX on VMware vSphere." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["vertex", "kubernetes"] +--- + + + +Use the Palette VerteX Helm Chart to install Palette VerteX in a multi-node Kubernetes cluster in your production environment. Palette VerteX is a FIPS-compliant product that must be installed in a FIPS-compliant environment. This means that Operating System (OS) the Kubernetes cluster you are installing Palette VerteX into must be FIPS-compliant. + +Review our [architecture diagrams](../../../architecture/networking-ports.md) to ensure your Kubernetes cluster has the necessary network connectivity for Palette to operate successfully. + +## Prerequisites + +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed and available. + + +- [Helm](https://helm.sh/docs/intro/install/) is installed and available. + + +- Access to the target Kubernetes cluster's kubeconfig file. You must be able to interact with the cluster using `kubectl` commands and have sufficient permissions to install Palette VerteX. We recommend using a role with cluster-admin permissions to install Palette VerteX. + + +- The Kubernetes cluster must be set up on a supported version of Kubernetes, which includes versions v1.25 to v1.27. + + + +- Ensure the Kubernetes cluster does not have Cert Manager installed. Palette VerteX requires a unique Cert Manager configuration to be installed as part of the installation process. If Cert Manager is already installed, you must uninstall it before installing Palette VerteX. + + +- The Kubernetes cluster must have a Container Storage Interface (CSI) installed and configured. Palette VerteX requires a CSI to store persistent data. You may install any CSI that is compatible with your Kubernetes cluster. + + + +- We recommend the following resources for Palette VerteX. Refer to the [Palette VerteX size guidelines](../install-palette-vertex.md#size-guidelines) for additional sizing information. + + - 8 CPUs per node. + + - 16 GB Memory per node. + + - 100 GB Disk Space per node. + + - A Container Storage Interface (CSI) for persistent data. + + - A minimum of three worker nodes or three untainted control plane nodes. + +
+ + :::info + + Refer to the Palette VerteX [size guidelines](../install-palette-vertex.md#size-guidelines) resource for additional sizing information. + + ::: + + +- The following network ports must be accessible for Palette VerteX to operate successfully. + + - TCP/443: Inbound and outbound to and from the Palette VerteX management cluster. + + - TCP/6443: Outbound traffic from the Palette VerteX management cluster to the deployed clusters' Kubernetes API server. + + +- Ensure you have an SSL certificate that matches the domain name you will assign to Palette VerteX. You will need this to enable HTTPS encryption for Palette VerteX. Reach out to your network administrator or security team to obtain the SSL certificate. You need the following files: + + - x509 SSL certificate file in base64 format. + + - x509 SSL certificate key file in base64 format. + + - x509 SSL certificate authority file in base64 format. + + +- Ensure the OS and Kubernetes cluster you are installing Palette VerteX onto is FIPS-compliant. Otherwise, Palette VerteX and its operations will not be FIPS-compliant. + + +- A custom domain and the ability to update Domain Name System (DNS) records. You will need this to enable HTTPS encryption for Palette VerteX. + + +- Access to the Palette Helm Charts. Refer to the [Access Palette VerteX](../../vertex.md#access-palette-vertex) for instructions on how to request access to the Helm Chart. + + + +
+ +:::caution + +Do not use a Palette-managed Kubernetes cluster when installing Palette VerteX. Palette-managed clusters contain the Palette agent and Palette-created Kubernetes resources that will interfere with the installation of Palette VerteX. + +::: + + +## Install Palette VerteX + +Use the following steps to install Palette VerteX on Kubernetes. + +
+ +:::info + +The following instructions are written agnostic to the Kubernetes distribution you are using. Depending on the underlying infrastructure provider and your Kubernetes distribution, you may need to modify the instructions to match your environment. Reach out to our support team if you need assistance. + +::: + + +1. Open a terminal session and navigate to the directory where you downloaded the Palette VerteX Helm Charts provided by our support team. We recommend you place all the downloaded files within the same directory. You should have the following Helm Charts: + +
+ + - Spectro Management Plane Helm Chart. + +
+ + - Cert Manager Helm Chart. + + +2. Extract each Helm Chart into its directory. Use the commands below as a reference. Do this for all the provided Helm Charts. + +
+ + ```shell + tar xzvf spectro-mgmt-plane-*.tgz + ``` + +
+ + ```yaml + tar xzvf cert-manager-*.tgz + ``` + + +3. Install Cert Manager using the following command. Replace the actual file name of the Cert Manager Helm Chart with the one you downloaded, as the version number may be different. + +
+ + ```shell + helm upgrade --values cert-manager/values.yaml cert-manager cert-manager-1.11.0.tgz --install + ``` + +
+ + :::info + + The Cert Manager Helm Chart provided by our support team is configured for Palette VerteX. Do not modify the **values.yaml** file unless instructed to do so by our support team. + + ::: + + +4. Open the **values.yaml** in the **spectro-mgmt-plane** folder with a text editor of your choice. The **values.yaml** contains the default values for the Palette VerteX installation parameters. You must populate the following parameters in the YAML file before installing Palette VerteX. + +
+ + | **Parameter** | **Description** | **Type** | + | --- | --- | --- | + | `env.rootDomain` | The URL name or IP address you will use for the Palette VerteX installation. | string | + | `ociPackRegistry` or `ociPackEcrRegistry` | The OCI registry credentials for Palette VerteX FIPS packs.| object | + | `scar` | The Spectro Cloud Artifact Repository (SCAR) credentials for Palette VerteX FIPS images. These credentials are provided by our support team. | object | + +
+ + Save the **values.yaml** file after you have populated the required parameters listed in the table. + +
+ + :::info + + You can learn more about the parameters in the **values.yaml** file in the [Helm Configuration Reference](vertex-helm-ref.md) page. + + ::: + + + +5. Install the Palette VerteX Helm Chart using the following command. + +
+ + ```shell + helm upgrade --values spectro-mgmt-plane/values.yaml hubble spectro-mgmt-plane-0.0.0.tgz --install + ``` + + +6. Track the installation process using the command below. Palette VerteX is ready when the deployments in the namespaces `cp-system`, `hubble-system`, `ingress-nginx`, `jet-system` , and `ui-system` reach the *Ready* state. The installation takes between two to three minutes to complete. + +
+ + ```shell + kubectl get pods --all-namespaces --watch + ``` + + +7. Create a DNS CNAME record that is mapped to the Palette VerteX `ingress-nginx-controller` load balancer. You can use the following command to retrieve the load balancer IP address. You may require the assistance of your network administrator to create the DNS record. + +
+ + ```shell + kubectl get service ingress-nginx-controller --namespace ingress-nginx --output jsonpath='{.status.loadBalancer.ingress[0].hostname}' + ``` + +
+ + :::info + + As you create tenants in Palette VerteX, the tenant name is prefixed to the domain name you assigned to Palette VerteX. For example, if you create a tenant named `tenant1` and the domain name you assigned to Palette VerteX is `vertex.example.com`, the tenant URL will be `tenant1.vertex.example.com`. You can create an additional wildcard DNS record to map all tenant URLs to the Palette VerteX load balancer. + + ::: + + +8. Use the custom domain name or the IP address of the load balancer to visit the Palette VerteX system console. To access the system console, open a web browser and paste the custom domain URL in the address bar and append the value `/system`. Replace the domain name in the URL with your custom domain name or the IP address of the load balancer. Alternatively, you can use the load balancer IP address with the appended value `/`system` to access the system console. + +
+ + :::info + + The first time you visit the Palette VerteX system console, a warning message about an untrusted SSL certificate may appear. This is expected, as you have not yet uploaded your SSL certificate to Palette VerteX. You can ignore this warning message and proceed. + + ::: + +
+ + ![A view of the Palette system console login screen.](/vertex_install-on-kubernetes_install_system-console.png) + + +9. Log in to the system console using the following default credentials. + +
+ + | **Parameter** | **Value** | + | --- | --- | + | Username | `admin` | + | Password | `admin` | + +
+ + After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette VerteX system console. + +
+ +10. After login, a summary page is displayed. Palette VerteX is installed with a self-signed SSL certificate. To assign a different SSL certificate you must upload the SSL certificate, SSL certificate key, and SSL certificate authority files to Palette VerteX. You can upload the files using the Palette VerteX system console. Refer to the [Configure HTTPS Encryption](../../system-management/ssl-certificate-management.md) page for instructions on how to upload the SSL certificate files to Palette VerteX. + +
+ +:::caution + +If you are planning to deploy host clusters into different networks, you may require a reverse proxy. Check out the [Configure Reverse Proxy](../../system-management/reverse-proxy.md) guide for instructions on configuring a reverse proxy for Palette VerteX. + +::: + + +You now have a self-hosted instance of Palette VerteX installed in a Kubernetes cluster. Make sure you retain the **values.yaml** file as you may need it for future upgrades. + + +## Validate + +Use the following steps to validate the Palette VerteX installation. + +
+ + +1. Open up a web browser and navigate to the Palette VerteX system console. To access the system console, open a web browser and paste the following URL in the address bar and append the value `/system`. Replace the domain name in the URL with your custom domain name or the IP address of the load balancer. + + + +2. Log in using the credentials you received from our support team. After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette VerteX system console. + + +3. Open a terminal session and issue the following command to verify the Palette VerteX installation. The command should return a list of deployments in the `cp-system`, `hubble-system`, `ingress-nginx`, `jet-system` , and `ui-system` namespaces. + +
+ + ```shell + kubectl get pods --all-namespaces --output custom-columns="NAMESPACE:metadata.namespace,NAME:metadata.name,STATUS:status.phase" \ + | grep -E '^(cp-system|hubble-system|ingress-nginx|jet-system|ui-system)\s' + ``` + + Your output should look similar to the following. + + ```shell hideClipboard + cp-system spectro-cp-ui-689984f88d-54wsw Running + hubble-system auth-85b748cbf4-6drkn Running + hubble-system auth-85b748cbf4-dwhw2 Running + hubble-system cloud-fb74b8558-lqjq5 Running + hubble-system cloud-fb74b8558-zkfp5 Running + hubble-system configserver-685fcc5b6d-t8f8h Running + hubble-system event-68568f54c7-jzx5t Running + hubble-system event-68568f54c7-w9rnh Running + hubble-system foreq-6b689f54fb-vxjts Running + hubble-system hashboard-897bc9884-pxpvn Running + hubble-system hashboard-897bc9884-rmn69 Running + hubble-system hutil-6d7c478c96-td8q4 Running + hubble-system hutil-6d7c478c96-zjhk4 Running + hubble-system mgmt-85dbf6bf9c-jbggc Running + hubble-system mongo-0 Running + hubble-system mongo-1 Running + hubble-system mongo-2 Running + hubble-system msgbroker-6c9b9fbf8b-mcsn5 Running + hubble-system oci-proxy-7789cf9bd8-qcjkl Running + hubble-system packsync-28205220-bmzcg Succeeded + hubble-system spectrocluster-6c57f5775d-dcm2q Running + hubble-system spectrocluster-6c57f5775d-gmdt2 Running + hubble-system spectrocluster-6c57f5775d-sxks5 Running + hubble-system system-686d77b947-8949z Running + hubble-system system-686d77b947-cgzx6 Running + hubble-system timeseries-7865bc9c56-5q87l Running + hubble-system timeseries-7865bc9c56-scncb Running + hubble-system timeseries-7865bc9c56-sxmgb Running + hubble-system user-5c9f6c6f4b-9dgqz Running + hubble-system user-5c9f6c6f4b-hxkj6 Running + ingress-nginx ingress-nginx-controller-2txsv Running + ingress-nginx ingress-nginx-controller-55pk2 Running + ingress-nginx ingress-nginx-controller-gmps9 Running + jet-system jet-6599b9856d-t9mr4 Running + ui-system spectro-ui-76ffdf67fb-rkgx8 Running + ``` + + +## Next Steps + +You have successfully installed Palette VerteX in a Kubernetes cluster. Your next steps are to configure Palette VerteX for your organization. Start by creating the first tenant to host your users. Use the [Create a Tenant](../../system-management/tenant-management.md) page for instructions on how to create a tenant. diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/vertex-helm-ref.md b/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/vertex-helm-ref.md new file mode 100644 index 0000000000..b2480549a0 --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-kubernetes/vertex-helm-ref.md @@ -0,0 +1,429 @@ +--- +sidebar_label: "Helm Configuration Reference" +title: "Helm Configuration Reference" +description: "Reference resource for the Palette VerteX Helm Chart installation parameters." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["vertex", "helm"] +--- + + +You can use the Palette VerteX Helm Chart to install Palette VerteX in a multi-node Kubernetes cluster in your production environment. The Helm chart allows you to customize values in the **values.yaml** file. This reference page lists and describes parameters available in the **values.yaml** file from the Helm Chart for your installation. + +To learn how to install Palette VerteX using the Helm Chart, refer to the Kubernetes [Instructions](install.md). + +## Required Parameters + +The following parameters are required for a successful installation of Palette VerteX. + + +| **Parameters** | **Description** | **Type** | +| --- | --- | --- | +| `config.env.rootDomain` | Used to configure the domain for the Palette installation. We recommend you create a CNAME DNS record that supports multiple subdomains. You can achieve this using a wild card prefix, `*.vertex.abc.com`. Review the [Environment parameters](#environment) to learn more. | String | +| `config.env.ociPackRegistry` or `config.env.ociPackEcrRegistry`| Specifies the FIPS image registry for Palette VerteX. You can use an a self-hosted OCI registry or a public OCI registry we maintain and support. For more information, refer to the [Registry](#registries) section. | Object | +| `scar`| The Spectro Cloud Artifact Repository (SCAR) credentials for Palette VerteX FIPS images. Our support team provides these credentials. For more information, refer to the [Registry](#registries) section. | Object | + + +:::caution + +If you are installing an air-gapped version of Palette VerteX, you must provide the image swap configuration. For more information, refer to the [Image Swap Configuration](#image-swap-configuration) section. + + +::: + + +## MongoDB + +Palette VerteX uses MongoDB Enterprise as its internal database and supports two modes of deployment:

+ +- MongoDB Enterprise deployed and active inside the cluster. + + +- MongoDB Enterprise is hosted on a software-as-a-service (SaaS) platform, such as MongoDB Atlas. + +The table below lists the parameters used to configure a MongoDB deployment. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `internal` | Specifies the MongoDB deployment either in-cluster or using Mongo Atlas. | Boolean | `true` | +| `databaseUrl`| The URL for MongoDB Enterprise. If using a remote MongoDB Enterprise instance, provide the remote URL. This parameter must be updated if `mongo.internal` is set to `false`. | String | `mongo-0.mongo,mongo-1.mongo,mongo-2.mongo` | +| `databasePassword`| The base64-encoded MongoDB Enterprise password. If you don't provide a value, a random password will be auto-generated. | String | `""` | +| `replicas`| The number of MongoDB replicas to start. | Integer | `3` | +| `memoryLimit`| Specifies the memory limit for each MongoDB Enterprise replica.| String | `4Gi` | +| `cpuLimit` | Specifies the CPU limit for each MongoDB Enterprise member.| String | `2000m` | +| `pvcSize`| The storage settings for the MongoDB Enterprise database. Use increments of `5Gi` when specifying the storage size. The storage size applies to each replica instance. The total storage size for the cluster is `replicas` * `pvcSize`. | string | `20Gi`| +| `storageClass`| The storage class for the MongoDB Enterprise database. | String | `""` | + + +```yaml +mongo: + internal: true + databaseUrl: "mongo-0.mongo,mongo-1.mongo,mongo-2.mongo" + databasePassword: "" + replicas: 3 + cpuLimit: "2000m" + memoryLimit: "4Gi" + pvcSize: "20Gi" + storageClass: "" +``` + +## Config + +Review the following parameters to configure Palette VerteX for your environment. The `config` section contains the following subsections: + + +#### Install Mode + +You can install Palette in connected or air-gapped mode. The table lists the parameters to configure the installation mode. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `installMode` | Specifies the installation mode. Allowed values are `connected` or `airgap`. Set the value to `airgap` when installing in an air-gapped environment. | String | `connected` | + +```yaml +config: + installationMode: "connected" +``` + +### SSO + +You can configure Palette VerteX to use Single Sign-On (SSO) for user authentication. Configure the SSO parameters to enable SSO for Palette VerteX. You can also configure different SSO providers for each tenant post-install, check out the [SAML & SSO Setup](../../../user-management/saml-sso/saml-sso.md) documentation for additional guidance. + +To configure SSO, you must provide the following parameters. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | --- | +| `saml.enabled` | Specifies whether to enable SSO SAML configuration by setting it to true. | Boolean | `false` | +| `saml.acsUrlRoot` | The root URL of the Assertion Consumer Service (ACS).| String | `myfirstpalette.spectrocloud.com`| +| `saml.acsUrlScheme` | The URL scheme of the ACS: `http` or `https`. | String | `https` | +| `saml.audienceUrl` | The URL of the intended audience for the SAML response.| String| `https://www.spectrocloud.com` | +| `saml.entityID` | The Entity ID of the Service Provider.| String | `https://www.spectrocloud.com`| +| `saml.apiVersion` | Specify the SSO SAML API version to use.| String | `v1` | + +```yaml +config: + sso: + saml: + enabled: false + acsUrlRoot: "myfirstpalette.spectrocloud.com" + acsUrlScheme: "https" + audienceUrl: "https://www.spectrocloud.com" + entityId: "https://www.spectrocloud.com" + apiVersion: "v1" +``` + +### Email + +Palette VerteX uses email to send notifications to users. The email notification is used when inviting new users to the platform, password resets, and when [webhook alerts](../../../clusters/cluster-management/health-alerts.md) are triggered. Use the following parameters to configure email settings for Palette VerteX. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `enabled` | Specifies whether to enable email configuration. | Boolean| `false`| +| `emailID ` | The email address for sending mail.| String| `noreply@spectrocloud.com` | +| `smtpServer` | Simple Mail Transfer Protocol (SMTP) server used for sending mail. | String | `smtp.gmail.com` | +| `smtpPort` | SMTP port used for sending mail.| Integer | `587` | +| `insecureSkipVerifyTLS` | Specifies whether to skip Transport Layer Security (TLS) verification for the SMTP connection.| Boolean | `true` | +| `fromEmailID` | Email address of the ***From*** address.| String | `noreply@spectrocloud.com` | +| `password` | The base64-encoded SMTP password when sending emails.| String | `""` | + +```yaml +config: + email: + enabled: false + emailId: "noreply@spectrocloud.com" + smtpServer: "smtp.gmail.com" + smtpPort: 587 + insecureSkipVerifyTls: true + fromEmailId: "noreply@spectrocloud.com" + password: "" +``` + +### Environment + +The following parameters are used to configure the environment. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `env.rootDomain` | Specifies the URL name assigned to Palette Vertex. The value assigned should have a Domain Name System (DNS) CNAME record mapped to exposed IP address or the load balancer URL of the service *ingress-nginx-controller*. Optionally, if `ingress.ingressStaticIP` is provided with a value you can use same assigned static IP address as the value to this parameter.| String| `""` | +| `env.installerMode` | Specifies the installer mode. Do not modify the value.| String| `self-hosted` | +| `env.installerCloud` | Specifies the cloud provider. Leave this parameter empty if you are installing a self-hosted Palette VerteX. | String | `""` | + +```yaml +config: + env: + rootDomain: "" +``` +
+ +:::caution + +As you create tenants in Palette VerteX, the tenant name is prefixed to the domain name you assigned to Palette VerteX. For example, if you create a tenant named tenant1 and the domain name you assigned to Palette VerteX is `vertex.example.com`, the tenant URL will be `tenant1.vertex.example.com`. We recommend you create an additional wildcard DNS record to map all tenant URLs to the Palette VerteX load balancer. For example, `*.vertex.example.com`. + +::: + +### Cluster + +Use the following parameters to configure the Kubernetes cluster. + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `stableEndpointAccess` | Set to `true` if the Kubernetes cluster is deployed in a public endpoint. If the cluster is deployed in a private network through a stable private endpoint, set to `false`. | Boolean | `false` | + +```yaml +config: + cluster: + stableEndpointAccess: false +``` + +## Registries + +Palette VerteX requires credentials to access the required Palette VerteX images. You can configure different types of registries for Palette VerteX to download the required images. You must configure at least one Open Container Initiative (OCI) registry for Palette VerteX. You must also provide the credentials for the Spectro Cloud Artifact Repository (SCAR) to download the required FIPS images. + +
+ +### OCI Registry + + +Palette VerteX requires access to an OCI registry that contains all the required FIPS packs. You can host your own OCI registry and configure Palette VerteX to reference the registry. Alternatively, you can use the public OCI registry provided by us, refer to the [`ociPackEcrRegistry`](#oci-ecr-registry) section to learn more about the publicly available OCI registry. + + +
+ +:::caution + +If you are using a self-hosted OCI registry, you must provide the required FIPS packs to the registry. Contact support for additional guidance on how to add the required FIPS packs to your OCI registry. + +::: + + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `ociPackRegistry.endpoint` | The endpoint URL for the registry. | String| `""` | +| `ociPackRegistry.name` | The name of the registry. | String| `""` | +| `ociPackRegistry.password` | The base64-encoded password for the registry. | String| `""` | +| `ociPackRegistry.username` | The username for the registry. | String| `""` | +| `ociPackRegistry.baseContentPath`| The base path for the registry. | String | `""` | +| `ociPackRegistry.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the registry connection. | Boolean | `false` | +| `ociPackRegistry.caCert` | The registry's base64-encoded certificate authority (CA) certificate. | String | `""` | + + +```yaml +config: + ociPackRegistry: + endpoint: "" + name: "" + password: "" + username: "" + baseContentPath: "" + insecureSkipVerify: false + caCert: "" +``` + +### OCI ECR Registry + +We expose a public OCI ECR registry that you can configure Palette VerteX to reference. If you want to host your own OCI registry, refer to the [OCI Registry](#oci-registry) section. +The OCI Elastic Container Registry (ECR) is hosted in an AWS ECR registry. Our support team provides the credentials for the OCI ECR registry. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `ociPackEcrRegistry.endpoint` | The endpoint URL for the registry. | String| `""` | +| `ociPackEcrRegistry.name` | The name of the registry. | String| `""` | +| `ociPackEcrRegistry.accessKey` | The base64-encoded access key for the registry. | String| `""` | +| `ociPackEcrRegistry.secretKey` | The base64-encoded secret key for the registry. | String| `""` | +| `ociPackEcrRegistry.baseContentPath`| The base path for the registry. | String | `""` | +| `ociPackEcrRegistry.isPrivate` | Specifies whether the registry is private. | Boolean | `true` | +| `ociPackEcrRegistry.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the registry connection. | Boolean | `false` | +| `ociPackEcrRegistry.caCert` | The registry's base64-encoded certificate authority (CA) certificate. | String | `""` | + +```yaml +config: + ociPackEcrRegistry: + endpoint: "" + name: "" + accessKey: "" + secretKey: "" + baseContentPath: "" + isPrivate: true + insecureSkipVerify: false + caCert: "" +``` + +### Spectro Cloud Artifact Repository (SCAR) + +SCAR credentials are required to download the necessary FIPS manifests. Our support team provides the SCAR credentials. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `scar.endpoint` | The endpoint URL of SCAR. | String| `""` | +| `scar.username` |The username for SCAR. | String| `""` | +| `scar.password` | The base64-encoded password for the SCAR. | String| `""` | +| `scar.insecureSkipVerify` | Specifies whether to skip Transport Layer Security (TLS) verification for the SCAR connection. | Boolean | `false` | +| `scar.caCert` | The base64-encoded certificate authority (CA) certificate for SCAR. | String | `""` | + +
+ + ```yaml + config: + scar: + endpoint: "" + username: "" + password: "" + insecureSkipVerify: false + caCert: "" + ``` + +### Image Swap Configuration + +You can configure Palette VerteX to use image swap to download the required images. This is an advanced configuration option, and it is only required for air-gapped deployments. You must also install the Palette VerteX Image Swap Helm chart to use this option, otherwise, Palette VerteX will ignore the configuration. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `imageSwapInitImage` | The image swap init image. | String | `gcr.io/spectro-images-public/thewebroot/imageswap-init:v1.5.2` | +| `imageSwapImage` | The image swap image. | String | `gcr.io/spectro-images-public/thewebroot/imageswap:v1.5.2` | +| `imageSwapConfig`| The image swap configuration for specific environments. | String | `""` | +| `imageSwapConfig.isEKSCluster` | Specifies whether the cluster is an Amazon EKS cluster. Set to `false` if the Kubernetes cluster is not an EKS cluster. | Boolean | `true` | + +
+ + ```yaml + config: + imageSwapImages: + imageSwapInitImage: "gcr.io/spectro-images-public/thewebroot/imageswap-init:v1.5.2" + imageSwapImage: "gcr.io/spectro-images-public/thewebroot/imageswap:v1.5.2" + + imageSwapConfig: + isEKSCluster: true + ``` + +## gRPC + +gRPC is used for communication between Palette VerteX components. You can enable the deployment of an additional load balancer for gRPC. Host clusters deployed by Palette VerteX use the load balancer to communicate with the Palette VerteX control plane. This is an advanced configuration option, and it is not required for most deployments. Speak with your support representative before enabling this option. + +If you want to use an external gRPC endpoint, you must provide a domain name for the gRPC endpoint and a valid x509 certificate. Additionally, you must provide a custom domain name for the endpoint. A CNAME DNS record must point to the IP address of the gRPC load balancer. For example, if your Palette VerteX domain name is `vertex.example.com`, you could create a CNAME DNS record for `grpc.vertex.example.com` that points to the IP address of the load balancer dedicated to gRPC. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `external`| Specifies whether to use an external gRPC endpoint. | Boolean | `false` | +| `endpoint`| The gRPC endpoint. | String | `""` | +| `caCertificateBase64`| The base64-encoded certificate authority (CA) certificate for the gRPC endpoint. | String | `""` | +| `serverCrtBase64`| The base64-encoded server certificate for the gRPC endpoint. | String | `""` | +| `serverKeyBase64`| The base64-encoded server key for the gRPC endpoint. | String | `""` | +| `insecureSkipVerify`| Specifies whether to skip Transport Layer Security (TLS) verification for the gRPC endpoint. | Boolean | `false` | + + + + +```yaml +grpc: + external: false + endpoint: "" + caCertificateBase64: "" + serverCrtBase64: "" + serverKeyBase64: "" + insecureSkipVerify: false +``` + +## Ingress + +Palette VerteX deploys an Nginx Ingress Controller. This controller is used to route traffic to the Palette VerteX control plane. You can change the default behavior and omit the deployment of an Nginx Ingress Controller. + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `enabled`| Specifies whether to deploy an Nginx controller. Set to `false` if you do not want an Nginx controller deployed. | Boolean | `true` | +| `ingress.internal`| Specifies whether to deploy a load balancer or use the host network. | Boolean | `false` | +| `ingress.certificate`| Specify the base64-encoded x509 SSL certificate for the Nginx Ingress Controller. If left blank, the Nginx Ingress Controller will generate a self-signed certificate. | String | `""` | +| `ingress.key`| Specify the base64-encoded x509 SSL certificate key for the Nginx Ingress Controller. | String | `""` | +| `ingress.annotations`| A map of key-value pairs that specifies load balancer annotations for ingress. You can use annotations to change the behavior of the load balancer and the Nginx configuration. This is an advanced setting. We recommend you consult with your assigned support team representative prior to modification. | Object | `{}` | +| `ingress.ingressStaticIP`| Specify a static IP address for the ingress load balancer service. If empty, a dynamic IP address will be assigned to the load balancer. | String | `""` | +| `ingress.terminateHTTPSAtLoadBalancer`| Specifies whether to terminate HTTPS at the load balancer. | Boolean | `false` | + + +```yaml +ingress: + enabled: true + ingress: + internal: false + certificate: "" + key: "" + annotations: {} + ingressStaticIP: "" + terminateHTTPSAtLoadBalancer: false +``` + +## Spectro Proxy + +You can specify a reverse proxy server that clusters deployed through Palette VerteX can use to facilitate network connectivity to the cluster's Kubernetes API server. Host clusters deployed in private networks can use the [Spectro Proxy pack](../../../integrations/frp.md) to expose the cluster's Kubernetes API to downstream clients that are not in the same network. Check out the [Reverse Proxy](../../system-management/reverse-proxy.md) documentation to learn more about setting up a reverse proxy server for Palette VerteX. + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `frps.enabled`| Specifies whether to enable the Spectro server-side proxy. | Boolean | `false` | +| `frps.frpHostURL`| The Spectro server-side proxy URL. | String | `""` | +| `frps.server.crt`| The base64-encoded server certificate for the Spectro server-side proxy. | String | `""` | +| `frps.server.key`| The base64-encoded server key for the Spectro server-side proxy. | String | `""` | +| `frps.ca.crt`| The base64-encoded certificate authority (CA) certificate for the Spectro server-side proxy. | String | `""` | + +```yaml +frps: + frps: + enabled: false + frpHostURL: "" + server: + crt: "" + key: "" + ca: + crt : "" +``` + +## UI System + +The table lists parameters to configure the Palette VerteX User Interface (UI) behavior. You can disable the UI or the Network Operations Center (NOC) UI. You can also specify the MapBox access token and style layer ID for the NOC UI. MapBox is a third-party service that provides mapping and location services. To learn more about MapBox and how to obtain an access token, refer to the [MapBox Access tokens](https://docs.mapbox.com/help/getting-started/access-tokens) guide. + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `enabled`| Specifies whether to enable the Palette VerteX UI. | Boolean | `true` | +| `ui.nocUI.enable`| Specifies whether to enable the Palette VerteX Network Operations Center (NOC) UI. Enabling this parameter requires the `ui.nocUI.mapBoxAccessToken`. Once enabled, all cluster locations will be reported to MapBox. This feature is not FIPS compliant. | Boolean | `false` | +| `ui.nocUI.mapBoxAccessToken`| The MapBox access token for the Palette VerteX NOC UI. | String | `""` | +| `ui.nocUI.mapBoxStyledLayerID`| The MapBox style layer ID for the Palette VerteX NOC UI. | String | `""` | + + + +```yaml +ui-system: + enabled: true + ui: + nocUI: + enable: false + mapBoxAccessToken: "" + mapBoxStyledLayerID: "" +``` + + + + +## Reach System + +You can configure Palette VerteX to use a proxy server to access the internet. Set the parameter `reach-system.reachSystem.enabled` to `true` to enable the proxy server. Proxy settings are configured in the `reach-system.reachSystem.proxySettings` section. + + +| **Parameters** | **Description** | **Type** | **Default value** | +| --- | --- | --- | --- | +| `reachSystem.enabled`| Specifies whether to enable the usage of a proxy server for Palette VerteX. | Boolean | `false` | +| `reachSystem.proxySettings.http_proxy`| The HTTP proxy server URL. | String | `""` | +| `reachSystem.proxySettings.https_proxy`| The HTTPS proxy server URL. | String | `""` | +| `reachSystem.proxySettings.no_proxy`| A list of hostnames or IP addresses that should not be proxied. | String | `""` | + + + ```yaml + reach-system: + reachSystem: + enabled: false + proxySettings: + http_proxy: "" + https_proxy: "" + no_proxy: + ``` diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/_category_.json b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/_category_.json new file mode 100644 index 0000000000..3fca6fb9f9 --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 0 +} diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install-on-vmware.md b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install-on-vmware.md new file mode 100644 index 0000000000..29f56e1b42 --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install-on-vmware.md @@ -0,0 +1,24 @@ +--- +sidebar_label: "VMware" +title: "Install Palette VerteX on VMware" +description: "Learn how to install Palette VerteX on VMware." +icon: "" +hide_table_of_contents: false +tags: ["vertex", "vmware"] +--- + + + + +Palette VerteX can be installed on VMware vSphere with internet connectivity or an airgap environment. When you install Palette VerteX, a three-node cluster is created. You use the interactive Palette CLI to install Palette VerteX on VMware vSphere. Refer to [Access Palette VerteX](../../vertex.md#access-palette-vertex) for instructions on requesting repository access. + +## Resources + +- [Install on VMware](install.md) + + + + + +- [VMware System Requirements](vmware-system-requirements.md) + \ No newline at end of file diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install.md b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install.md new file mode 100644 index 0000000000..4460a78a0f --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install.md @@ -0,0 +1,341 @@ +--- +sidebar_label: "Instructions" +title: "Install Palette VerteX on VMware" +description: "Learn how to deploy Palette VerteX on VMware." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["vertex", "vmware"] +--- + + + + + + +# Install Palette VerteX on VMware vSphere + +You install Palette VerteX using the Palette Command Line Interface (CLI) that guides you for details to create a configuration file and a three-node enterprise cluster for high availability (HA). You can invoke the Palette CLI on any Linux x86-64 system with the Docker daemon installed and connectivity to the VMware vSphere environment where Palette VerteX will be deployed. + + +## Prerequisites + +- An AMD64 Linux environment with connectivity to the VMware vSphere environment. + + + +- [Docker](https://docs.docker.com/engine/install/) or equivalent container runtime installed and available on the Linux host. + + + +- Palette CLI installed and available. Refer to the Palette CLI [Install](../../../palette-cli/install-palette-cli.md#download-and-setup) page for guidance. + + +- An Ubuntu Pro Subscription and token. Ubuntu Pro provides access to FIPS 140-2 certified cryptographic packages. + + +- Review required VMware vSphere environment [permissions](vmware-system-requirements.md). + + + +- We recommended the following resources for Palette VerteX. Refer to the [Palette VerteX size guidelines](../install-palette-vertex.md#instance-sizing) for additional sizing information. + + - 8 CPUs per VM. + + - 16 GB Memory per VM. + + - 100 GB Disk Space per VM. + + +- The following network ports must be accessible for Palette VerteX to operate successfully. + + - TCP/443: Inbound to and outbound from the Palette VerteX management cluster. + + - TCP/6443: Outbound traffic from the Palette VerteX management cluster to the deployed cluster's Kubernetes API server. + + +- Ensure you have an SSL certificate that matches the domain name you will assign to Palette VerteX. You will need this to enable HTTPS encryption for Palette VerteX. Reach out to your network administrator or security team to obtain the SSL certificate. You need the following files: + + - x509 SSL certificate file in base64 format. + + - x509 SSL certificate key file in base64 format. + + - x509 SSL certificate authority file in base64 format. This file is optional. + + +- Zone tagging is required for dynamic storage allocation across fault domains when provisioning workloads that require persistent storage. Refer to [Zone Tagging](vmware-system-requirements.md#zone-tagging) for information. + + +- Assigned IP addresses for application workload services, such as Load Balancer services. + + +- Shared Storage between vSphere hosts. + + + +
+ +:::info + +Self-hosted Palette VerteX installations provide a system Private Cloud Gateway (PCG) out-of-the-box and typically do not require a separate, user-installed PCG. However, you can create additional PCGs as needed to support provisioning into remote data centers that do not have a direct incoming connection from the Palette console. To learn how to install a PCG on VMware, check out the [VMware](../../../clusters/data-center/vmware.md) guide. + +::: + + +## Install the Enterprise Cluster + +The video below provides a demonstration of the installation wizard and the prompts you will encounter. Take a moment to watch the video before you begin the installation process. Make sure to use values that are appropriate for your environment. Use the **three-dot Menu** in the lower right corner of the video to expand the video to full screen and to change the playback speed. + +
+ + + + +Use the following steps to install Palette VerteX. + + +
+ +1. Open a terminal window and invoke the Palette CLI by using the `ec` command to install the enterprise cluster. The interactive CLI prompts you for configuration details and then initiates the installation. For more information about the `ec` subcommand, refer to [Palette Commands](../../../palette-cli/commands.md#ec). + +
+ + ```bash + palette ec install + ``` + +2. At the **Enterprise Cluster Type** prompt, choose **Palette VerteX**. + + +3. Type `y` to enable Ubuntu Pro, and provide your Ubuntu Pro token when prompted. + +
+ + :::caution + + To ensure FIPS compliance, be sure to enter your Ubuntu Pro token. + + ::: + +
+ + +4. Provide the FIPS repository URL you received from our support team. + + +5. Enter the FIPS repository credentials. + + +6. Choose `VMware vSphere` as the cloud type. This is the default. + + +7. Type an enterprise cluster name. + + +8. When prompted, enter the information listed in each of the following tables. + +
+ + #### Environment Configuration + + |**Parameter**| **Description**| + |:-------------|----------------| + |**HTTPS Proxy**|Leave this blank unless you are using an HTTPS Proxy. This setting will be propagated to all EC nodes and all of its target cluster nodes. Example: `https://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| + |**HTTP Proxy**|Leave this blank unless you are using an HTTP Proxy. This setting will be propagated to all EC nodes and all of its target cluster nodes. Example: `http://USERNAME:PASSWORD@PROXYIP:PROXYPORT`.| + |**No Proxy**|The default is blank. You can add a comma-separated list of local network CIDR addresses, hostnames, and domain names that should be excluded from being a proxy. This setting will be propagated to all the nodes to bypass the proxy server. Example if you have a self-hosted environment: `maas.company.com,10.10.0.0/16`.| + |**Proxy CA Certificate Filepath**|The default is blank. You can provide the filepath of a CA certificate on the installer host. If provided, this CA certificate will be copied to each host in the PCG cluster during deployment. The provided path will be used on the PCG cluster hosts. Example: `/usr/local/share/ca-certificates/ca.crt`.| + |**Pod CIDR**|Enter the CIDR pool IP that will be used to assign IP addresses to pods in the EC cluster. The pod IP addresses should be unique and not overlap with any machine IPs in the environment.| + |**Service IP Range**|Enter the IP address range that will be used to assign IP addresses to services in the EC cluster. The service IP addresses should be unique and not overlap with any machine IPs in the environment.| + +
+ + + +9. Select the OCI registry type and provide the configuration values. Review the following table for more information. + +
+ + #### Pack & Image Registry Configuration + + | **Parameter** | **Description** | + |---------------------------|-----------------------------------------| + | **Registry Type** | Specify the type of registry. Allowed values are `OCI` or `OCI ECR`. | + | **Registry Name** | Enter the name of the registry. | + | **Registry Endpoint** | Enter the registry endpoint. | + | **Registry Base Path** | Enter the registry base path. | + |**Allow Insecure Connection** | Bypasses x509 verification. Type `Y` if using a VMware vSphere instance with self-signed Transport Layer Security (TLS) certificates. Otherwise, type `n`.| + | **Registry Username** or **Registry Access Key** | Enter the registry username or the access key if using `OCI ECR`. | + | **Registry Password** or **Registry Secret Key** | Enter the registry password or the secret key if using `OCI ECR`. | + | **Registry Region** | Enter the registry region. This option is only available if you are using `OCI ECR`. | + | **ECR Registry Private** | Type `y` if the registry is private. Otherwise, type `n`. | + | **Use Public Registry for Images** | Type `y` to use a public registry for images. Type `n` to a different registry for images. If you are using another registry for images, you will be prompted to enter the registry URL, base path, username, and password. | + +
+ +10. Next, specify the database storage size to allocate for Palette VerteX. The default is 20 GB. Refer to the [size guidelines](../install-palette-vertex.md#instance-sizing) for additional information. + + + +11. The next set of prompts is for the VMware vSphere account information. Enter the information listed in the following table. + +
+ + #### VMware vSphere Account Information + + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + |**vSphere Endpoint** | VMware vSphere endpoint. Must be a fully qualified domain name (FQDN) or IP address without a scheme - that is, without an IP protocol, such as `https://`. Example: `vcenter.mycompany.com`.| + |**vSphere Username** | VMware vSphere account username.| + |**vSphere Password**| VMware vSphere account password.| + |**Allow Insecure Connection** | Bypasses x509 verification. Type `Y` if using a VMware vSphere instance with self-signed Transport Layer Security (TLS) certificates. Otherwise, type `n`.| + +
+ + #### VMware vSphere Cluster Configuration + + This information determines where Palette VerteX will be deployed in your VMware vSphere environment. The Palette CLI will use the provided VMware credentials to retrieve information from your VMware vSphere environment and present options for you to select from. + +
+ + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + |**Datacenter**| The installer retrieves the Datacenter automatically. | + |**Folder** | Select the folder that contains the VM instance. | + | **Cluster** | Select the cluster where you want to deploy Palette VerteX. | + | **Network** | Select the network where you want to deploy Palette VerteX. | + | **Resource Pool** | Select the resource pool where you want to deploy Palette VerteX. | + | **Datastore** | Select the datastore where you want to deploy Palette VerteX. | + |**Fault Domains** | Configure one or more fault domains by selecting values for these properties: Cluster, Network (with network connectivity), Resource Pool, and Storage Type (Datastore or VM Storage Policy). Note that when configuring the Network, if you are using a distributed switch, choose the network that contains the switch. | + |**NTP Servers** | You can provide a list of Network Time Protocol (NTP) servers. | + |**SSH Public Keys** | Provide any public SSH keys to access your Palette VerteX VMs. This option opens up your system's default text editor. Vi is the default text editor for most Linux distributions. To review basic vi commands, check out the [vi Commands](https://www.cs.colostate.edu/helpdocs/vi.html) reference. | + + +12. Specify the IP pool configuration. The placement type can be Static or Dynamic Domain Name Server (DDNS). Choosing static placement creates an IP pool from which VMs are assigned IP addresses. Choosing DDNS assigns IP addresses using DNS. + +
+ + #### Static Placement Configuration + | **Parameter** | **Description** | + |---------------------------|-----------------------------------------| + | **IP Start range** | Enter the first address in the EC IP pool range. | + | **IP End range** | Enter the last address in the EC IP pool range. | + | **Network Prefix** | Enter the network prefix for the IP pool range. Valid values are in [0, 32]. Example: `18`. | + | **Gateway IP Address** | Enter the IP address of the static IP gateway. | + | **Name servers** | Comma-separated list of DNS name server IP addresses. | + | **Name server search suffixes** | An optional comma-separated list of DNS search domains. | + + +
+ + +13. The last set of prompts is for the VMware vSphere machine configuration. Enter the information listed in the following table. + +
+ + #### vSphere Machine Configuration + + |**Parameter** | **Description**| + |-----------------------------------------|----------------| + | **Number of CPUs** | The number of CPUs allocated to each VM node instance.| + | **Memory** | The amount of memory allocated to each VM node instance.| + | **Disk Size** | The size of the disk allocated to each VM node instance.| + + +
+ + + The installation process stands up a [kind](https://kind.sigs.k8s.io/) cluster locally that will orchestrate the remainder of the installation. The installation takes some time. + +
+ + Upon completion, the enterprise cluster configuration file named `ec.yaml` contains the information you provided, and its location is displayed in the terminal. Credentials and tokens are encrypted in the YAML file. + +
+ + ```bash hideClipboard + ==== Enterprise Cluster config saved ==== + Location: :/home/spectro/.palette/ec/ec-20230706150945/ec.yaml + ``` + +
+ + When the installation is complete, Enterprise Cluster Details that include a URL and default credentials are displayed in the terminal. You will use these to access the Palette VerteX System Console. + +
+ + ```bash hideClipboard + ==================================== + ==== Enterprise Cluster Details ==== + ==================================== + Console URL: https://10.10.189.100/system + Username: ********** + Password: ********** + ``` + + +14. Copy the URL to the browser to access the System Console. You will be prompted to reset the password. + +
+ + :::info + + The first time you visit the Palette VerteX system console, a warning message about an untrusted SSL certificate may appear. This is expected, as you have not yet uploaded your SSL certificate to Palette VerteX. You can ignore this warning message and proceed. + + ::: + +
+ + ![Screenshot of the Palette VerteX system console showing Username and Password fields.](/vertex_installation_install-on-vmware_vertex-system-console.png) + +
+ + +15. Log in to the System Console using the credentials provided in the Enterprise Cluster Details output. After login, you will be prompted to create a new password. Enter a new password and save your changes. You will be redirected to the Palette VerteX system console. + + +16. After login, a Summary page is displayed. Palette VerteX is installed with a self-signed SSL certificate. To assign a different SSL certificate you must upload the SSL certificate, SSL certificate key, and SSL certificate authority files to Palette VerteX. You can upload the files using the Palette VerteX system console. Refer to the [Configure HTTPS Encryption](/vertex/system-management/ssl-certificate-management) page for instructions on how to upload the SSL certificate files to Palette VerteX. + + +17. The last step is to start setting up a tenant. To learn how to create a tenant, check out the [Tenant Management](../../system-management/tenant-management.md) guide. + +
+ + ![Screenshot of the Summary page showing where to click Go to Tenant Management button.](/vertex_installation_install-on-vmware_goto-tenant-management.png) + + +## Validate + +You can verify the installation is successful if you can access the system console using the IP address provided in Enterprise Cluster Details and if the Summary page displays the **Go to Tenant Management** button. + +You can also validate that a three-node Kubernetes cluster is launched and Palette VerteX is deployed on it. + +
+ +1. Log in to the vCenter Server by using vSphere Client. + + +2. Navigate to the Datacenter and locate your VM instance. + + +3. Select the VM to access its details page, and verify three nodes are listed. + + +4. Open a web browser session, and use the IP address provided in Enterprise Cluster Details at the completion of the installation to connect to the Palette VerteX System Console. Copy the IP address to the address bar and append `/system`. + + +5. Log in using your credentials. + + +6. A **Summary** page will be displayed that contains a tile with a **Go to Tenant Management** button. After initial installation, the **Summary** page shows there are zero tenants. + + +## Next Steps + +You have successfully installed Palette VerteX in vSphere. Your next steps are to configure Palette VerteX for your organization. Start by creating the first tenant to host your users. Refer to [Create a Tenant](../../system-management/tenant-management.md) for instructions. + +After you create the tenant, you are ready to configure authentication types in tenant settings and create users and teams. + +## Resources + +- [Create a Tenant](../../system-management/tenant-management.md) + \ No newline at end of file diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/vmware-system-requirements.md b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/vmware-system-requirements.md new file mode 100644 index 0000000000..df57133386 --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/vmware-system-requirements.md @@ -0,0 +1,278 @@ +--- +sidebar_label: "VMware System and Permission Requirements" +title: "VMware System and Permission Requirements" +description: "Review VMware system requirements and cloud account permissions." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["vertex", "vmware"] +--- + + + + +The sections below describe system requirements and cloud account permissions for VMware vSphere environments hosting Palette VerteX. + + + +## VMware Cloud Account Permissions + +The vSphere user account that deploys Palette VerteX must have the minimum root-level VMware vSphere privileges listed in the table below. The **Administrator** role provides superuser access to all vSphere objects. For users without the **Administrator** role, one or more custom roles can be created based on tasks the user will perform. Permissions and privileges vary depending on the vSphere version you are using. + +Select the tab for your vSphere version. + +
+ +:::caution + +If the network is a Distributed Port Group under a vSphere Distributed Switch (VDS), ReadOnly access to the VDS without “Propagate to children” is required. + +::: + +
+ + + + + +## Root-Level Role Privileges + +Root-level role privileges are only applied to root objects and data center objects. + +| **vSphere Object** | **Privilege** | +|--------------------|-----------------------------------------| +| CNS | Searchable | +| Datastore | Browse datastore | +| Host | Configuration
Storage partition configuration | +| vSphere Tagging | Create vSphere Tag
Edit vSphere Tag | +| Network | Assign network | +| Sessions | Validate session | +| VM Storage Policies| View VM storage policies | +| Storage views | View | + + +## Spectro Role Privileges + +Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, templates, datastore, network objects, and Virtual Machines (VMs). A separate table lists Spectro role privileges for VMs by category. + +
+ +:::info + +Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. + +::: + + +| **vSphere Object**| **Privileges** | +|-------------------|---------------------------------------------| +| CNS | Searchable | +| Datastore | Allocate space
Browse datastore
Low-level file operations
Remove file
Update VM files
Update VM metadata | +| Folder | Create Folder
Delete folder
Move folder
Rename folder| +| Host | Local operations: Reconfigure VM | +| Network | Assign network | +| Resource | Apply recommendation
Assign VM to resource pool
Migrate powered off VM
Migrate powered on VM
Query vMotion | +| Sessions | Validate sessions | +| Storage policies | View access for VM storage policies is required.
Ensure ``StorageProfile.View`` is available. | +| spectro-templates | Read only | +| Storage views | View | +| Tasks | Create task
Update task | +| vApp | Import
View OVF environment
Configure vAPP application
Configure vApp instance | +| vSphere tagging | Assign or Unassign vSphere Tag
Create vSphere Tag
Delete vSphere Tag
Edit vSphere Tag | + + +The following table lists Spectro Cloud role privileges for VMs by category. + +| **vSphere Object**| **Category** | **Privileges** | +|-------------------|----------------------|--------------------| +| Virtual Machines | Change Configuration | Acquire disk lease
Add existing disk
Add new disk
Add or remove device
Advanced configuration
Change CPU count
Change memory
Change settings
Change swapfile placement
Change resource
Change host USB device
Configure raw device
Configure managedBy
Display connection settings
Extend virtual disk
Modify device settings
Query fault tolerance compatibity
Query unowned files
Reload from path
Remove disk
Rename
Reset guest information
Set annotation
Toggle disk change tracking
Toggle fork parent
Upgrade VM compatibility| +| | Edit Inventory | Create from existing
Create new
Move
Register
Remove
Unregister | +| | Guest Operations | Alias modification
Alias query
Modify guest operations
Invoke programs
Queries | +| | Interaction | Console Interaction
Power on/off | +| | Provisioning | Allow disk access
Allow file access
Allow read-only disk access
Allow VM download
Allow VM files upload
Clone template
Clone VM
Create template from VM
Customize guest
Deploy template
Mark as template
Mark as VM
Modify customization specification
Promote disks
Read customization specifications | +| | Service Configuration| Allow notifications
Allow polling of global event notifications
Manage service configurations
Modify service configurations
Query service configurations
Read service configurations | +| | Snapshot Management | Create snapshot
Remove snapshot
Rename snapshot
Revert to snapshot | +| | vSphere Replication | Configure replication
Manage replication
Monitor replication | +| | vSAN | Cluster: ShallowRekey | + +
+ + + + + + +## Root-Level Role Privileges + +Root-level role privileges are only applied to root objects and Data center objects. + +| **vSphere Object**| **Privileges** | +|-------------------|---------------------------------------------| +| CNS | Searchable | +| Datastore | Browse datastore | +| Host | Configuration
Storage partition configuration| +| vSphere tagging | Create vSphere Tag
Edit vSphere Tag | +| Network | Assign network | +| Profile-driven storage | View | +| Sessions | Validate session | +| Storage views | View | + + +## Spectro Role Privileges + +Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, templates, datastore, network objects, and Virtual Machines (VMs). A separate table lists Spectro role privileges for VMs by category. + +
+ +:::info + +Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. + +::: + +
+ +| **vSphere Object**| **Privileges** | +|-------------------|---------------------------------------------| +| CNS | Searchable | +| Datastore | Allocate space
Browse datastore
Low-level file operations
Remove file
Update VM files
Update VM metadata | +| Folder | Create Folder
Delete folder
Move folder
Rename folder| +| Host | Local operations: Reconfigure VM | +| Network | Assign network | +| Resource | Apply recommendation
Assign VM to resource pool
Migrate powered off VM
Migrate powered on VM
Query vMotion | +| Profile-driven storage | Profile-driven storage view | +| Sessions | Validate session | +| spectro-templates | Read only | +| Storage views | Configure service
View | +| Tasks | Create task
Update task | +| vApp | Import
View OVF environment
Configure vAPP applications
Configure vApp instances | +| vSphere tagging | Assign or Unassign vSphere Tag
Create vSphere Tag
Delete vSphere Tag
Edit vSphere Tag | + +
+ +The following table lists Spectro role privileges for VMs by category. + +| **vSphere Object**| **Category** | **Privileges** | +|-------------------|----------------------|--------------------| +| Virtual Machines | Change Configuration | Acquire disk lease
Add existing disk
Add new disk
Add or remove device
Advanced configuration
Change CPU count
Change memory
Change Settings
Change Swapfile placement
Change resource
Change host USB device
Configure Raw device
Configure managedBy
Display connection settings
Extend virtual disk
Modify device settings
Query fault tolerance compatibity
Query unowned files
Reload from path
Remove disk
Rename
Reset guest information
Set annotation
Toggle disk change tracking
Toggle fork parent
Upgrade VM compatibility| +| | Edit Inventory | Create from existing
Create new
Move
Register
Remove
Unregister | +| | Guest Operations | Alias modification
Alias query
Modify guest operations
Invoke programs
Query guest operations | +| | Interaction | Console Interaction
Power on/off | +| | Provisioning | Allow disk access
Allow file access
Allow read-only disk access
Allow VM download
Allow VM upload
Clone template
Clone VM
Create template from VM
Customize guest
Deploy template
Mark as template
Modify customization specifications
Promote disks
Read customization specifications | +| | Service Configuration| Allow notifications
Allow polling of global event notifications
Manage service configurations
Modify service configurations
Query service configurations
Read service configurations | +| | Snapshot Management | Create snapshot
Remove snapshot
Rename snapshot
Revert to snapshot | +| | vSphere Replication | Configure replication
Manage replication
Monitor replication | +| | vSAN | Cluster
ShallowRekey | + + +
+ + + + + +## Root-Level Role Privileges + +Root-level role privileges are only applied to root objects and Data center objects. + +| **vSphere Object**| **Privileges** | +|-------------------|---------------------------------------------| +| CNS | Searchable | +| Datastore | Browse datastore | +| Host | Configuration
Storage partition configuration| +| vSphere tagging | Create vSphere Tag
Edit vSphere Tag | +| Network | Assign network | +| Profile-driven storage | Profile-driven storage view | +| Sessions | Validate session | +| Storage views | View | + + +## Spectro Role Privileges + +Spectro role privileges listed in the table must be applied to the spectro-template folder, hosts, clusters, templates, datastore, network objects, and Virtual Machines (VMs). A separate table lists Spectro role privileges for VMs by category. + +
+ +:::info + +Palette downloads images and Open Virtual Appliance (OVA) files to the spectro-templates folder and clones images from it to create nodes. + +::: + + +| **vSphere Object**| **Privileges** | +|-------------------|---------------------------------------------| +| CNS | Searchable | +| Datastore | Allocate space
Browse datastore
Low-level file operations
Remove file
Update VM files
Update VM metadata | +| Folder | Create Folder
Delete folder
Move folder
Rename folder| +| Host | Local operations: Reconfigure VM | +| Network | Assign network | +| Profile-driven storage | Profile-driven storage view | +| Resource | Apply recommendation
Assign VM to resource pool
Migrate powered off VM
Migrate powered on VM
Query vMotion | +| Sessions | Validate session | +| spectro-templates | Read only | +| Storage views | View | +| Tasks | Create task
Update task | +| vApp | Import
View OVF environment
Configure vAPP applications
Configure vApp instances | +| vSphere tagging | Assign or Unassign vSphere Tag
Create vSphere Tag
Delete vSphere Tag
Edit vSphere Tag | + +
+ +The following table lists Spectro role privileges for VMs by category. + +| **vSphere Object**| **Category** | **Privileges** | +|-------------------|----------------------|--------------------| +| Virtual Machines | Change Configuration | Acquire disk lease
Add existing disk
Add new disk
Add or remove device
Advanced configuration
Change CPU count
Change memory
Change Settings
Change Swapfile placement
Change resource
Change host USB device
Configure Raw device
Configure managedBy
Display connection settings
Extend virtual disk
Modify device settings
Query fault tolerance compatibity
Query unowned files
Reload from path
Remove disk
Rename
Reset guest information
Set annotation
Toggle disk change tracking
Toggle fork parent
Upgrade VM compatibility| +| | Edit Inventory | Create from existing
Create new
Move
Register
Remove
Unregister | +| | Guest Operations | Alias modification
Alias query
Modify guest operations
Invoke programs
Query guest operations | +| | Interaction | Console Interaction
Power on/off | +| | Provisioning | Allow disk access
Allow file access
Allow read-only disk access
Allow VM download
Allow VM upload
Clone template
Clone VM
Create template from VM
Customize guest
Deploy template
Mark as template
Modify customization specifications
Promote disks
Read customization specifications | +| | Service Configuration| Allow notifications
Allow polling of global event notifications
Manage service configurations
Modify service configurations
Query service configurations
Read service configurations | +| | Snapshot Management | Create snapshot
Remove snapshot
Rename snapshot
Revert to snapshot | +| | vSphere Replication | Configure replication
Manage replication
Monitor replication | +| | vSAN | Cluster
ShallowRekey | + +
+ + +
+ +
+ +
+ + +## Zone Tagging + +Zone tagging is required for dynamic storage allocation across fault domains when provisioning workloads that require persistent storage. This is required to install the Palette Platform itself and is also helpful for workloads deployed in the tenant clusters if they have persistent storage needs. Use vSphere tags on data centers(k8s-region) and compute clusters (k8s-zone) to create distinct zones in your environment. + +For example, assume your vCenter environment includes three compute clusters, cluster-1, cluster-2, and cluster-3, that are part of vSphere Object, Tag Category, and Tag value as shown in the table. + +| **vSphere Object** | **Tag Category** | **Tag Value** | +|--------------------|------------------|---------------| +| dc-1 | k8s-region | region1 | +| cluster-1 | k8s-zone | az1 | +| cluster-2 | k8s-zone | az2 | +| cluster-3 | k8s-zone | az3 | + + + +:::info + +The exact values for the k8s-region and k8s-zone tags can be different from the ones described in the above example, if they are unique. + +::: + +## Naming Conventions for vSphere Region and Zone Tags + +The following requirements apply to tags: + +
+ +- A valid tag must consist of alphanumeric characters. + + +- The tag must start and end with an alphanumeric characters. + + +- The regex used for validation is ``(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?`` \ No newline at end of file diff --git a/docs/docs-content/vertex/install-palette-vertex/install-palette-vertex.md b/docs/docs-content/vertex/install-palette-vertex/install-palette-vertex.md new file mode 100644 index 0000000000..117178bb0e --- /dev/null +++ b/docs/docs-content/vertex/install-palette-vertex/install-palette-vertex.md @@ -0,0 +1,85 @@ +--- +sidebar_label: "Installation" +title: "Installation" +description: "Review Palette VerteX system requirements." +icon: "" +hide_table_of_contents: false +tags: ["vertex"] +--- + + +Palette VerteX is available as a self-hosted application that you install in your environment. The self-hosted version is a dedicated Palette VerteX environment hosted on VMware instances or in an existing Kubernetes cluster. Palette VerteX is available in the following modes: + +| **Supported Platform** | **Description** | +|------------------------|------------------------------------| +| VMware | Install Palette VerteX in VMware environment. | +| Kubernetes | Install Palette VerteX using a Helm Chart in an existing Kubernetes cluster. | + +The next sections describe specific requirements for installing Palette VerteX. + +## Proxy Requirements + +- A proxy used for outgoing connections should support both HTTP and HTTPS traffic. + + +- Allow connectivity to domains and ports in the table. + +
+ + | **Top-Level Domain** | **Port** | **Description** | + |----------------------------|----------|-------------------------------------------------| + | spectrocloud.com | 443 | Spectro Cloud content repository and pack registry | + | s3.amazonaws.com | 443 | Spectro Cloud VMware OVA files | + | gcr.io | 443 | Spectro Cloud and common third party container images | + | ghcr.io | 443 | Kubernetes VIP images | + | docker.io | 443 | Common third party content | + | googleapis.com | 443 | For pulling Spectro Cloud images | + | docker.com | 443 | Common third party container images | + | raw.githubusercontent.com | 443 | Common third party content | + | projectcalico.org | 443 | Calico container images | + | quay.io | 443 | Common 3rd party container images | + | grafana.com | 443 | Grafana container images and manifests | + | github.com | 443 | Common third party content | + + +## Size Guidelines + +This section lists resource requirements for Palette VerteX for various capacity levels. In Palette VerteX, the terms *small*, *medium*, and *large* are used to describe the instance size of worker pools that Palette VerteX is installed on. The following table lists the resource requirements for each size. + + +
+ +:::caution + +The recommended maximum number of deployed nodes and clusters in the environment should not be exceeded. We have tested the performance of Palette VerteX with the recommended maximum number of deployed nodes and clusters. Exceeding these limits can negatively impact performance and result in instability. The active workload limit refers to the maximum number of active nodes and pods at any given time. + +::: + +
+ + + +| **Size** | **Nodes**| **CPU**| **Memory**| **Storage**| **MongoDB Storage Limit**| **MongoDB Memory Limit**| **MongoDB CPU Limit** |**Total Deployed Nodes**| **Deployed Clusters with 10 Nodes**| +|----------|----------|--------|-----------|------------|--------------------|-------------------|------------------|----------------------------|----------------------| +| Small | 3 | 8 | 16 GB | 60 GB | 20 GB | 4 GB | 2 | 1000 | 100 | +| Medium (Recommended) | 3 | 16 | 32 GB | 100 GB | 60 GB | 8 GB | 4 | 3000 | 300 | +| Large | 3 | 32 | 64 GB | 120 GB | 80 GB | 12 GB | 6 | 5000 | 500 | + + +#### Instance Sizing + +| **Configuration** | **Active Workload Limit** | +|---------------------|---------------------------------------------------| +| Small | Up to 1000 Nodes each with 30 Pods (30,000 Pods) | +| Medium (Recommended) | Up to 3000 Nodes each with 30 Pods (90,000 Pods)| +| Large | Up to 5000 Nodes each with 30 Pods (150,000 Pods) | + + +
+ +## Resources + +- [Install on VMware vSphere](install-on-vmware/install-on-vmware.md) + + +- [Install Using Helm Chart](install-on-kubernetes/install-on-kubernetes.md) \ No newline at end of file diff --git a/docs/docs-content/vertex/system-management/_category_.json b/docs/docs-content/vertex/system-management/_category_.json new file mode 100644 index 0000000000..455b8e4969 --- /dev/null +++ b/docs/docs-content/vertex/system-management/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 20 +} diff --git a/docs/docs-content/vertex/system-management/enable-non-fips-settings/_category_.json b/docs/docs-content/vertex/system-management/enable-non-fips-settings/_category_.json new file mode 100644 index 0000000000..3fca6fb9f9 --- /dev/null +++ b/docs/docs-content/vertex/system-management/enable-non-fips-settings/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 0 +} diff --git a/docs/docs-content/vertex/system-management/enable-non-fips-settings/allow-cluster-import.md b/docs/docs-content/vertex/system-management/enable-non-fips-settings/allow-cluster-import.md new file mode 100644 index 0000000000..1168916f40 --- /dev/null +++ b/docs/docs-content/vertex/system-management/enable-non-fips-settings/allow-cluster-import.md @@ -0,0 +1,65 @@ +--- +sidebar_label: "Allow Cluster Import" +title: "Allow Cluster Import" +description: "Learn how to import clusters to Palette VerteX." +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["vertex", "non-fips"] +--- + + + + +You can allow tenant users to import Kubernetes clusters that were not deployed through Palette, including some that *are not* FIPS compliant or are only *partially* compliant. Prior to enabling cluster import, the **Import Cluster** option is not available. + +Palette VerteX displays icons next to clusters to indicate their FIPS compliance status or when FIPS compliance cannot be confirmed. To learn about icons that Palette VerteX applies, refer to [FIPS Status Icons](../../fips/fips-status-icons.md). + + + +## Prerequisites + +- You need tenant admin permission to enable this feature. + + +- Refer to [Cluster Import Prerequisites](../../../clusters/imported-clusters/cluster-import.md#prerequisites). + + +## Allow non-FIPS Cluster Import + + +1. Log in to [Palette VerteX](https://console.spectrocloud.com/) as a tenant admin. + + +2. Navigate to the left **Main Menu** and click on **Tenant Settings**. Next, on the **Tenant Settings Menu**, select **Platform Settings**. + + +3. Enable the **Allow non-FIPS cluster import** option. When you enable this option, you are prompted to confirm importing clusters into the tenant that may not be FIPS-compliant. + +![Diagram showing the Allow non-FIPS cluster import toggle enabled.](/vertex_use-non-fips-settings_nonFips-cluster-import.png) + +To disable the setting, toggle this option off and confirm you want to disable it. + +Refer to [Import a Cluster](../../../clusters/imported-clusters/cluster-import.md) for guidance. Check out [Import Modes](../../../clusters/imported-clusters/imported-clusters.md#import-modes) to learn about various import modes and limitations to be aware of. + + +## Validate + +1. Log in to [Palette VerteX](https://console.spectrocloud.com/). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Click on **Add New Cluster**. The **Import Cluster** option is now displayed on the **Create a New Cluster** page. + + +## Resources + +- [Import a Cluster](../../../clusters/imported-clusters/cluster-import.md) + + +- [Import Modes](../../../clusters/imported-clusters/imported-clusters.md#import-modes) + + +- [Cluster Import Limitations](../../../clusters/imported-clusters/imported-clusters.md#limitations) \ No newline at end of file diff --git a/docs/docs-content/vertex/system-management/enable-non-fips-settings/enable-non-fips-settings.md b/docs/docs-content/vertex/system-management/enable-non-fips-settings/enable-non-fips-settings.md new file mode 100644 index 0000000000..f3bc781139 --- /dev/null +++ b/docs/docs-content/vertex/system-management/enable-non-fips-settings/enable-non-fips-settings.md @@ -0,0 +1,31 @@ +--- +sidebar_label: "Enable non-FIPS Settings" +title: "Enable non-FIPS Settings" +description: "Enable settings in Palette VerteX that allow you to use non-FIPS resources and perform non-FIPS compliant actions." +icon: "" +hide_table_of_contents: false +tags: ["vertex", "non-fips"] +--- + + + + +Palette VerteX is FIPS-enforced by default, incorporating the Spectro Cloud Cryptographic Module into the Kubernetes Management Platform and the infrastructure components of target clusters. To learn more about our cryptographic library, check out [FIPS 140-2 Certification](../../../compliance.md#fips-140-2). + +If desired, you can allow the consumption of certain non-FIPS functionality in Palette VerteX at the tenant level. **Platform Settings** at the tenant level provide toggles to allow non-FIPS-compliant add-on packs and non-FIPS features such as scans, backup, and restore. You can also allow importing clusters created external to Palette. + + +## Resources + +- [Use non-FIPS Add-On Packs](../../system-management/enable-non-fips-settings/use-non-fips-addon-packs.md) + + +- [Use non-FIPS Features](../../system-management/enable-non-fips-settings/use-non-fips-features.md) + + +- [Allow Cluster Import](../../system-management/enable-non-fips-settings/allow-cluster-import.md) + + +- [Spectro Cloud FIPS 140-2 Certification](../../../compliance.md#fips-140-2) + + diff --git a/docs/docs-content/vertex/system-management/enable-non-fips-settings/use-non-fips-addon-packs.md b/docs/docs-content/vertex/system-management/enable-non-fips-settings/use-non-fips-addon-packs.md new file mode 100644 index 0000000000..5878f1ff58 --- /dev/null +++ b/docs/docs-content/vertex/system-management/enable-non-fips-settings/use-non-fips-addon-packs.md @@ -0,0 +1,92 @@ +--- +sidebar_label: "Use non-FIPS Add-On Packs" +title: "Use non-FIPS Add-On Packs" +description: "Add non-FIPS add-on packs to VerteX cluster profiles." +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["vertex", "non-fips"] +--- + + + + +Palette VerteX provides the following FIPS-compliant infrastructure components in Kubernetes clusters it deploys. Review [FIPS-Compliant Components](../../fips/fips-compliant-components.md) to learn more. + +
+ + +- Operating System (OS) + +- Kubernetes + +- Container Network Interface (CNI) + +- Container Storage Interface (CSI) + +As shown in the screenshot below, the FIPS-compliant icon used to indicate full FIPS compliance is displayed next to Palette VerteX infrastructure components in the cluster profile stack. To learn about other icons Palette VerteX applies, refer to [FIPS Status Icons](../../fips/fips-status-icons.md). + +![Diagram showing FIPS-compliant icons in profile stack.](/vertex_fips-status-icons_icons-in-profile-stack.png) + +You can allow tenant users to customize their cluster profiles by using add-on packs, which *may not* be FIPS compliant. Add-on packs enhance cluster functionality by adding profile layers such as system apps, authentication, security, monitoring, logging, and more. + + +## Prerequisites + +- You need tenant admin permission to enable this feature. + + +## Allow Non-FIPS Add-On Packs + + +1. Log in to [Palette VerteX](https://console.spectrocloud.com/) as a tenant admin. + + +2. Navigate to the left **Main Menu** and click on **Tenant Settings**. + + +3. On the **Tenant Settings Menu**, select **Platform Settings**. + + +4. Enable the **Allow non-FIPS add-on packs** option. When you enable this option, you are prompted to confirm the use of non-FIPS add-on packs for the tenant. + + +![Diagram showing the Allow non-FIPS add-on packs toggle enabled.](/vertex_use-non-fips-settings_nonFips-addon-packs.png) + + +To disable the setting, toggle this option off and confirm you want to disable it. + +When you or other users add a pack to a cluster profile. Palette VerteX will apply the appropriate icon next to packs and imported clusters to indicate their FIPS compliance status. + + +## Validate + + +1. Log in to [Palette VerteX](https://console.spectrocloud.com/). + + +2. Navigate to the left **Main Menu** and select **Profiles**. When you select a profile, the **Add New Pack** option is available. + + + +3. Navigate back to the **Main Menu** and re-select **Profiles**. + + +4. Click the **Add Cluster Profile** button. The **Add-on** option is available in the wizard. + + +Palette VerteX will display the appropriate FIPS status icon next to the pack layer and in the profile stack. + + +## Resources + +- [Packs List](../../../integrations/integrations.mdx) + + +- [Create an Add-on Profile](../../../cluster-profiles/create-add-on-profile.md) + + +- [FIPS Status Icons](../../fips/fips-status-icons.md) + + + diff --git a/docs/docs-content/vertex/system-management/enable-non-fips-settings/use-non-fips-features.md b/docs/docs-content/vertex/system-management/enable-non-fips-settings/use-non-fips-features.md new file mode 100644 index 0000000000..e2db8ea908 --- /dev/null +++ b/docs/docs-content/vertex/system-management/enable-non-fips-settings/use-non-fips-features.md @@ -0,0 +1,63 @@ +--- +sidebar_label: "Use non-FIPS Features" +title: "Use non-FIPS Features" +description: "Use non-FIPS features such as backup, restore, and scans." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["vertex", "non-fips"] +--- + + + +You can allow tenant users access to Palette features that are *not* FIPS-compliant, such as tenant cluster backup and restore or various scanning capabilities for compliance, security, validation, and software bill of materials (SBOM). Prior to enabling non-FIPS features, the **Scan** and **Backups** tabs are not displayed on the **Cluster Overview** page. + +## Prerequisites + +- You need tenant admin permission to enable this feature. + + +- Palette can back up clusters to several locations. To learn about backup requirements, review [Backup-Restore](../../../clusters/cluster-management/backup-restore/backup-restore.md). + + +- There are no prerequisites for restoring clusters or performing scans. + + +## Allow non-FIPS Features + + +1. Log in to [Palette VerteX](https://console.spectrocloud.com/) as a tenant admin. + + +2. Navigate to the left **Main Menu** and click on **Tenant Settings**. + + +3. On the **Tenant Settings Menu**, select **Platform Settings**. + + +4. Enable the **Allow non-FIPS features** option. When you enable this option, you are prompted to confirm the use of non-FIPS features for the tenant. + +![Diagram showing the Allow non-FIPS features toggle enabled.](/vertex_use-non-fips-settings_nonFips-features.png) + + +To disable the setting, toggle this option off and confirm you want to disable it. + +## Validate + + +1. Log in to [Palette VerteX](https://console.spectrocloud.com/). + + +2. Navigate to the left **Main Menu** and click on **Clusters**. + + +3. Select your cluster in the list. The **Scan** and **Backups** tabs are now displayed on the **Cluster Overview** page. + + +## Resources + +- [Cluster Backup and Restore](../../../clusters/cluster-management/backup-restore/backup-restore.md) + + +- [Scans](../../../clusters/cluster-management/compliance-scan.md) + diff --git a/docs/docs-content/vertex/system-management/reverse-proxy.md b/docs/docs-content/vertex/system-management/reverse-proxy.md new file mode 100644 index 0000000000..df8b318097 --- /dev/null +++ b/docs/docs-content/vertex/system-management/reverse-proxy.md @@ -0,0 +1,255 @@ +--- +sidebar_label: "Configure Reverse Proxy" +title: "Configure Reverse Proxy" +description: "Learn how to configure a reverse proxy for Palette VerteX." +icon: "" +hide_table_of_contents: false +sidebar_position: 40 +tags: ["vertex", "management"] +--- + + + +You can configure a reverse proxy for Palette VerteX. The reverse proxy can be used by host clusters deployed in a private network. Host clusters deployed in a private network are not accessible from the public internet or by users in different networks. You can use a reverse proxy to access the cluster's Kubernetes API server from a different network. + +When you configure reverse proxy server for Palette VerteX, clusters that use the [Spectro Proxy pack](../../integrations/frp.md) will use the reverse proxy server address in the kubeconfig file. Clusters not using the Spectro Proxy pack will use the default cluster address in the kubeconfig file. + + +Use the following steps to configure a reverse proxy server for Palette VerteX. + +## Prerequisites + + +- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) is installed and available. + + +- [Helm](https://helm.sh/docs/intro/install/) is installed and available. + + +- Access to the kubeconfig file of the Palette VerteX Kubernetes cluster. You can download the kubeconfig file from the Palette VerteX system console. Navigate to **Enterprise System Migration**, select the Palette VerteX cluster, and click the **Download Kubeconfig** button for the cluster. + + +- A domain name that you can use for the reverse proxy server. You will also need access to the DNS records for the domain so that you can create a CNAME DNS record for the reverse proxy server load balancer. + + +- Ensure you have an SSL certificate that matches the domain name you will assign to Spectro Proxy. You will need this to enable HTTPS encryption for the Spectro Proxy. Contact your network administrator or security team to obtain the SSL certificate. You need the following files: + - x509 SSL certificate file in base64 format. + + - x509 SSL certificate key file in base64 format. + + - x509 SSL certificate authority file in base64 format. + + +- The Spectro Proxy server must have internet access and network connectivity to the private network where the Kubernetes clusters are deployed. + + +## Enablement + +1. Open a terminal session and navigate to the directory where you stored the **values.yaml** for the Palette VerteX installation. + + +2. Use a text editor and open the **values.yaml** file. Locate the `frps` section and update the following values in the **values.yaml** file. Refer to the [Spectro Proxy Helm Configuration](../install-palette-vertex/install-on-kubernetes/vertex-helm-ref.md#spectro-proxy) to learn more about the configuration options. + +
+ + | **Parameter** | **Description** | **Type** | + | --- | --- | ---| + | `enabled`| Set to `true` to enable the Spectro Proxy server. | boolean | + | `frps.frpHostURL`| The domain name you will use for the Spectro Proxy server. For example, `frps.example.com`. | + | `server.crt`| The x509 SSL certificate file in base64 format. | + | `server.key`| The x509 SSL certificate key file in base64 format. | + | `ca.crt`| The x509 SSL certificate authority file in base64 format. | + +
+ + The following is an example of the `frps` section in the **values.yaml** file. The SSL certificate files are truncated for brevity. + +
+ + ```yaml + frps: + frps: + enabled: true + frpHostURL: "frps.vertex.example.com" + server: + crt: "LS0tLS1CRU...........tCg==" + key: "LS0tLS1CRU...........tCg==" + ca: + crt : "LS0tLS1CRU...........tCg==" + ``` + + +3. Issue the `helm upgrade` command to update the Palette VerteX Kubernetes configuration. The command below assumes you in the folder that contains the **values.yaml** file and the Palette VerteX Helm chart. Change the directory path if needed. + +
+ + ```bash + helm upgrade --values values.yaml hubble spectro-mgmt-plane-0.0.0.tgz --install + ``` + + +4. After the new configurations are accepted, use the following command to get the Spectro Proxy server's load balancer IP address. + +
+ + ```bash + kubectl get svc --namespace proxy-system spectro-proxy-svc + ``` +5. Update the DNS records for the domain name you used for the Spectro Proxy server. Create a CNAME record that points to the Spectro Proxy server's load balancer IP address. + + +6. Log in to the Palette VerteX System API by using the `/v1/auth/syslogin` endpoint. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette VerteX or use the IP address. Ensure you replace the credentials below with your system console credentials. + +
+ + ```bash + curl --insecure --location 'https://vertex.example.com/v1/auth/syslogin' \ + --header 'Content-Type: application/json' \ + --data '{ + "password": "**********", + "username": "**********" + }' + ``` + Output + ```json hideClipboard + { + "Authorization": "**********.", + "IsPasswordReset": true + } + ``` + +7. Using the output you received, copy the authorization value to your clipboard and assign it to a shell variable. Replace the authorization value below with the value from the output. + +
+ + ```shell hideClipboard + TOKEN=********** + ``` + +8. Next, prepare a payload for the`/v1/system/config/` endpoint. This endpoint is used to configure Palette VerteX to use a reverse proxy. The payload requires the following parameters: + +
+ + | **Parameter** | **Description** | **Type** | + | --- | --- | --- | + | `caCert`| The x509 SSL certificate authority file in base64 format. | string | + | `clientCert`| The x509 SSL certificate file in base64 format. | string | + | `clientKey`| The x509 SSL certificate key file in base64 format. | string | + | `port` | The port number for the reverse proxy server. We recommend using port `443`. | integer | + | `protocol` | The protocol to use for the reverse proxy server. We recommend using `https`. | string | + | `server`| The domain name you will use for the Spectro Proxy server. For example, `frps.example.com`. Don't include the HTTP schema in the value. | string | + + The following is an example payload. The SSL certificate files are truncated for brevity. + +
+ + ```json hideClipboard + { + "caCert": "-----BEGIN CERTIFICATE-----\n.............\n-----END CERTIFICATE-----", + "clientCert": "-----BEGIN CERTIFICATE-----\n..........\n-----END CERTIFICATE-----", + "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n........\n-----END RSA PRIVATE KEY-----", + "port": 443, + "protocol": "https", + "server": "frps.vertex.example.com.com" + } + ``` + + :::info + + You can save the payload to a file and use the `cat` command to read the file contents into the `curl` command. For example, if you save the payload to a file named `payload.json`, you can use the following command to read the file contents into the `curl` command. You can also save the payload as a shell variable and use the variable in the `curl` command. + + ::: + + +
+ +9. Issue a PUT request using the following `curl` command. Replace the URL with the custom domain URL you assigned to Palette VerteX or use the IP address. You can use the `TOKEN` variable you created earlier for the authorization header. Ensure you replace the payload below with the payload you created in the previous step. + +
+ + ```bash + curl --insecure --silent --include --output /dev/null -w "%{http_code}" --location --request PUT 'https://vertex.example.com/v1/system/config/reverseproxy' \ + --header "Authorization: $TOKEN" \ + --header 'Content-Type: application/json' \ + --data ' { + "caCert": "-----BEGIN CERTIFICATE-----\n................\n-----END CERTIFICATE-----\n", + "clientCert": "-----BEGIN CERTIFICATE-----\n.............\n-----END CERTIFICATE-----", + "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n............\n-----END RSA PRIVATE KEY-----\n", + "port": 443, + "protocol": "https", + "server": "frps.vertex.example.com.com" + }' + ``` + + A successful response returns a `204` status code. + + Output + ```shell hideClipboard + 204 + ``` + +You now have a Spectro Proxy server that you can use to access Palette VerteX clusters deployed in a different network. Make sure you add the [Spectro Proxy pack](../../integrations/frp.md) to the clusters you want to access using the Spectro Proxy server. + + +## Validate + +Use the following command to validate that the Spectro Proxy server is active. + +
+ + + +1. Open a terminal session. + + +2. Log in to the Palette VerteX System API by using the `/v1/auth/syslogin` endpoint. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette VerteX or use the IP address. Ensure you replace the credentials below with your system console credentials. + +
+ + ```bash + curl --insecure --location 'https://vertex.example.com/v1/auth/syslogin' \ + --header 'Content-Type: application/json' \ + --data '{ + "password": "**********", + "username": "**********" + }' + ``` + Output + ```json hideClipboard + { + "Authorization": "**********.", + "IsPasswordReset": true + } + ``` + +3. Using the output you received, copy the authorization value to your clipboard and assign it to a shell variable. Replace the authorization value below with the value from the output. + +
+ + ```shell hideClipboard + TOKEN=********** + ``` + +4. Query the system API endpoint `/v1/system/config/reverseproxy` to verify the current reverse proxy settings applied to Palette VerteX. Use the `curl` command below and replace the URL with the custom domain URL you assigned to Palette VerteX or use the IP address. You can use the `TOKEN` variable you created earlier for the authorization header. + +
+ + ```bash + curl --location --request GET 'https://vertex.example.com/v1/system/config/reverseproxy' \ + --header "Authorization: $TOKEN" + ``` + + If the proxy server is configured correctly, you will receive an output similar to the following containing your settings. The SSL certificate outputs are truncated for brevity. + +
+ + ```json hideClipboard + { + "caCert": "-----BEGIN CERTIFICATE-----\n...............\n-----END CERTIFICATE-----\n", + "clientCert": "-----BEGIN CERTIFICATE-----\n...........\n-----END CERTIFICATE-----", + "clientKey": "-----BEGIN RSA PRIVATE KEY-----\n........\n-----END RSA PRIVATE KEY-----\n", + "port": 443, + "protocol": "https", + "server": "frps.vertex.example.com" + } + ``` \ No newline at end of file diff --git a/docs/docs-content/vertex/system-management/ssl-certificate-management.md b/docs/docs-content/vertex/system-management/ssl-certificate-management.md new file mode 100644 index 0000000000..96f0bb272d --- /dev/null +++ b/docs/docs-content/vertex/system-management/ssl-certificate-management.md @@ -0,0 +1,85 @@ +--- +sidebar_label: "SSL Certificate Management" +title: "SSL Certificate" +description: "Upload and manage SSL certificates in Palette VerteX." +icon: "" +hide_table_of_contents: false +sidebar_position: 30 +tags: ["vertex", "management"] +--- + + +When you install Palette VerteX, a self-signed certificate is generated and used by default. You can upload your own SSL certificate to replace the default certificate. + +Palette VerteX uses SSL certificates to secure external communication. Palette VerteX's internal communication is default secured by default and uses HTTPS. External communication with Palette VerteX, such as the system console, gRPC endpoint, and API endpoint, requires you to upload an SSL certificate to enable HTTPS. + +
+ +:::info + +Enabling HTTPS is a non-disruptive operation. You can enable HTTPS at any time without affecting the system's functionality. + +::: + + +## Upload an SSL Certificate + +You can upload an SSL certificate in Palette VerteX by using the following steps. + + +## Prerequisites + +- Access to the Palette VerteX system console. + + +- You need to have an x509 certificate and a key file in PEM format. The certificate file must contain the full certificate chain. Reach out to your network administrator or security team if you do not have these files. + + +- Ensure the certificate is created for the custom domain name you specified for your Palette VerteX installation. If you did not specify a custom domain name, the certificate must be created for the Palette VerteX system console's IP address. You can also specify a load balancer's IP address if you are using a load balancer to access Palette VerteX. + + +## Enablement + +1. Log in to the Palette VerteX system console. + + +2. Navigate to the left **Main Menu** and select **Administration**. + + +3. Select the tab titled **Certificates**. + + +4. Copy and paste the certificate into the **Certificate** field. + + +5. Copy and paste the certificate key into the **Key** field. + + +6. Copy and paste the certificate authority into the **Certificate authority** field. + + +
+ + ![A view of the certificate upload screen](/vertex_system-management_ssl-certifiacte-management_certificate-upload.png) + +
+ +7. Save your changes. + +If the certificate is invalid, you will receive an error message. Once the certificate is uploaded successfully, Palette VerteX will refresh its listening ports and start using the new certificate. + + +## Validate + +You can validate that your certificate is uploaded correctly by using the following steps. + +
+ + +1. Log out of the Palette VerteX system console. If you are already logged in, log out and close your browser session. Browsers cache connections and may not use the newly enabled HTTPS connection. Closing your existing browser session avoids issues related to your browser caching an HTTP connection. + + +2. Log back into the Palette VerteX system console. Ensure the connection is secure by checking the URL. The URL should start with `https://`. + + +Palette VerteX is now using your uploaded certificate to create a secure HTTPS connection with external clients. Users can now securely access the system console, gRPC endpoint, and API endpoint. \ No newline at end of file diff --git a/docs/docs-content/vertex/system-management/system-management.md b/docs/docs-content/vertex/system-management/system-management.md new file mode 100644 index 0000000000..dc24511fbc --- /dev/null +++ b/docs/docs-content/vertex/system-management/system-management.md @@ -0,0 +1,37 @@ +--- +sidebar_label: "System Management" +title: "System Management" +description: "Manage your Palette VerteX system settings." +icon: "" +hide_table_of_contents: false +sidebar_position: 20 +tags: ["vertex", "management"] +--- + +Palette VerteX contains many system settings you can configure to meet your organization's needs. These settings are available at the system level and are applied to all [tenants](../../glossary-all.md#tenant) in the system. + +You can access the system setting by visiting the IP address or the custom domain name assigned to your Palette VerteX cluster and appending the `/system` path to the URL. For example, if your Palette VerteX cluster is hosted at `https://vertex.abc.com`, you can access the system settings at `https://vertex.abc.com/system`. + + +
+ +![View of the VerteX system console landing page.](/vertex_system-management_overview-system-console.png) + +
+ +:::caution + +Exercise caution when changing system settings as the changes will be applied to all tenants in the system. + +::: + + +## Resources + +* [Enable non-FIPS Settings](enable-non-fips-settings/enable-non-fips-settings.md) + + +* [Tenant Management](../system-management/tenant-management.md) + + +* [SSL Certificate Management](../system-management/ssl-certificate-management.md) diff --git a/docs/docs-content/vertex/system-management/tenant-management.md b/docs/docs-content/vertex/system-management/tenant-management.md new file mode 100644 index 0000000000..1f3db4ee0d --- /dev/null +++ b/docs/docs-content/vertex/system-management/tenant-management.md @@ -0,0 +1,119 @@ +--- +sidebar_label: "Tenant Management" +title: "Tenant Management" +description: "Learn how to create and remove tenants in Palette VerteX." +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["vertex", "management"] +--- + + +Tenants are isolated environments in Palette VerteX that contain their own clusters, users, and resources. You can create multiple tenants in Palette VerteX to support multiple teams or projects. Instructions for creating and removing tenants are provided below. + + +
+ +## Create a Tenant + +You can create a tenant in Palette VerteX by following these steps. + + +## Prerequisites + +* Access to the Palette VerteX system console. + + +## Enablement + +1. Log in to the Palette VerteX system console. + + +2. Navigate to the left **Main Menu** and select **Tenant Management**. + + +3. Click **Create New Tenant**. + + +4. Fill out the **Org Name** and the properties of the admin user by providing the **First Name**, **Last Name**, and **Email**. + + +5. Confirm your changes. + + +6. From the tenant list view, find your newly created tenant and click on the **three dots Menu**. Select **Activate** to activate the tenant. + +
+ + ![View of a tenant activation option](/vertex_system-management_tenant-management_activate-tenant.png) + +
+ +7. A pop-up box will present you with an activation URL. Copy the URL and paste it into your browser to activate the tenant. + + +8. Provide the admin user with a new password. + + +9. Log in to the tenant console using the admin user credentials. + + +## Validate + +1. Log in to Palette VerteX. + + +2. Verify you can access the tenant as the admin user. + + + +## Remove a Tenant + +You can remove a tenant in Palette VerteX using the following steps. + +## Prerequisites + +* Access to the Palette VerteX system console. + +## Removal + +1. Log in to the Palette VerteX system console. + + +2. Navigate to the left **Main Menu** and select **Tenant Management**. + + +3. From the tenant list view, select the tenant you want to remove and click on the **three dots Menu**. + + +4. Select **Delete** to prepare the tenant for removal. + + +5. Click on your tenant's **three dots Menu** and select **Clean up** to remove all the tenant's resources. + +
+ + ![View of a tenant deletion option](/vertex_system-management_tenant-management_remove-tenant.png) + + +
+ + :::caution + + If you do not clean up the tenant's resources, such as clusters and Private Cloud Gateways (PCGs), the tenant will remain in a **Deleting** state. You can use **Force Cleanup & Delete** to proceed with deletion without manually cleaning up tenant resources. + + ::: + + +After the cleanup process completes, the tenant will be removed from the tenant list view. + +## Validate + + +1. Log in to the Palette VerteX system console. + + +2. Navigate to the left **Main Menu** and select **Tenant Management**. + + +3. Validate that the tenant was removed by checking the tenant list view. \ No newline at end of file diff --git a/docs/docs-content/vertex/vertex.md b/docs/docs-content/vertex/vertex.md new file mode 100644 index 0000000000..b8a4f40028 --- /dev/null +++ b/docs/docs-content/vertex/vertex.md @@ -0,0 +1,46 @@ +--- +sidebar_label: "Palette VerteX" +title: "Palette VerteX" +description: "Learn how Palette VerteX enables regulated industries to meet stringent security requirements." +hide_table_of_contents: false +sidebar_custom_props: + icon: "shield" +tags: ["vertex"] +--- + + +Palette VerteX offers regulated industries, such as government and public sector organizations that handle sensitive and classified information simplicity, security, and scale in production Kubernetes. + +## FIPS-Compliant + +Palette VerteX integrates validated Federal Information Processing Standards (FIPS) 140-2 cryptographic modules in Kubernetes clusters it deploys to ensure robust data protection for your organization’s infrastructure and applications. To learn more about our FIPS 140-2 certification, review [Spectro Cloud Cryptographic Module](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4349). FIPS modules, which are accessible in our private artifact repository, extend Palette’s existing security features that include security scans, powerful RBAC, and tamper-proof edge device images. Palette VerteX protects sensitive data in clusters across edge, bare metal, on-prem data centers, air-gapped environments, and cloud. + + +To learn more about FIPS in Palette VerteX, check out the [FIPS](fips/fips.md) section. + +## Access Palette VerteX + +To set up a Palette VerteX account, contact our support team by sending an email to support@spectrocloud.com. Include the following information in your email: + +- Your full name +- Organization name (if applicable) +- Email address +- Phone number (optional) +- Target Platform (VMware or Kubernetes) +- A brief description of your intended use of VerteX + + +Our dedicated support team will promptly get in touch with you to provide the necessary assistance and share the installer image, credentials, and an endpoint URL to access the FIPS registry. + +## Resources + +- [FIPS](fips/fips.md) + + +- [Installation](install-palette-vertex/install-palette-vertex.md) + + +- [System Management](system-management/system-management.md) + + + diff --git a/docs/docs-content/vm-management/_category_.json b/docs/docs-content/vm-management/_category_.json new file mode 100644 index 0000000000..79a194a9b1 --- /dev/null +++ b/docs/docs-content/vm-management/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 80 +} diff --git a/docs/docs-content/vm-management/create-manage-vm/_category_.json b/docs/docs-content/vm-management/create-manage-vm/_category_.json new file mode 100644 index 0000000000..094470741d --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 10 +} diff --git a/docs/docs-content/vm-management/create-manage-vm/access-cluster-with-virtctl.md b/docs/docs-content/vm-management/create-manage-vm/access-cluster-with-virtctl.md new file mode 100644 index 0000000000..f62196882b --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/access-cluster-with-virtctl.md @@ -0,0 +1,82 @@ +--- +sidebar_label: "Access VM Cluster with virtctl" +title: "Set up virtctl" +description: "Set up KubeVirt virtctl to facilitate VM operations in Palette Virtual Machine Orchestrator" +icon: " " +hide_table_of_contents: false +sidebar_position: 20 +tags: ["vmo"] +--- + + + +The virtctl command-line interface (CLI) tool facilitates some of the virtual machine (VM) operations you will perform by providing convenient commands for copying and pasting into the virtual console, starting and stopping VMs, live migrating VMs, and uploading VM disk images. + +The virtctl CLI also provides a lightweight Secure Copy Protocol (SCP) client with the `virtctl scp` command, which you can use to transfer files to and from a VM. Its usage is similar to the ssh command. + + +## Prerequisites + +- An active virtual cluster with Palette Virtual Machine Orchestrator (VMO). + + +- Access to the virtual cluster. + +## Download and Connect virtctl + +1. Download the most recent virtctl artifact based on your machine type from the official [KubeVirt Assets](https://github.com/kubevirt/kubevirt/releases/tag/v0.60.0-alpha.0). Scroll down to the **Assets** section. + + +2. Assign the execute permission to the virtctl command. + +
+ + ```shell + chmod +x virtctl + ``` +
+ +3. Next, log in to [Palette](https://console.spectrocloud.com) to connect your host cluster with the virtctl CLI. + + +4. Navigate to the left **Main Menu** and select **Clusters**. + + +5. Select the cluster you want to connect to. + + +6. From the cluster overview page, navigate to the middle column containing cluster details and locate the **Kubernetes Config File** row. + + +7. Click on the kubeconfig link to download the file. + + +8. Open a terminal window and set the KUBECONFIG environment variable to the file path of the kubeconfig file. + + Example: + ```shell + export KUBECONFIG=~/Downloads/dev-cluster.kubeconfig + ``` + +
+ +9. Issue the `virtctl ssh ` or `virtctl vnc ` command to display the login screen. + + Example: + ```shell + virtctl ssh ubuntu + ``` + +
+ +You can now issue virtctl commands against the VM in your Kubernetes cluster. + +## Validate + +Verify you have access to your virtual machine by issuing virtctl commands against it, as shown in the example below. + +
+ +```bash +virtctl guestosinfo +``` \ No newline at end of file diff --git a/docs/docs-content/vm-management/create-manage-vm/create-manage-vm.md b/docs/docs-content/vm-management/create-manage-vm/create-manage-vm.md new file mode 100644 index 0000000000..5607fd57f8 --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/create-manage-vm.md @@ -0,0 +1,60 @@ +--- +sidebar_label: "Create and Manage VMs" +title: "Create and Manage VMs" +description: "Learn methods to create VMs using Palette's Virtual Machine Orchestrator.." +icon: " " +hide_table_of_contents: false +tags: ["vmo"] +--- + + +Palette Virtual Machine Orchestrator (VMO) allows you to deploy and manage Virtual Machines (VMs) alongside containerized applications. + +## Prerequisites + +- Outbound internet connectivity for port 443 is allowed so that you and your applications can connect with the Spectro Cloud reverse proxy. + + +- Users or groups must be mapped to a Virtual Machine RBAC role. You can create a custom role through a manifest and use Palette's RoleBinding feature to associate users and groups with the role. Refer to the [Create Role Bindings](../../clusters/cluster-management/cluster-rbac.md#create-role-bindings) guide to learn more. + + +- A namespace for VMs. Although you can deploy VMs from the default namespace, we recommend creating at least one namespace dedicated to VMs as a way to organize and manage them. To learn how to create a namespace, check out [Create a Namespace](../../clusters/cluster-management/namespace-management.md#create-a-namespace). + + +## VM Creation + +You can create a VM three ways: + +
+ +- Deploy a VM from a template. Palette provides out-of-the-box templates, or your organization may provide templates. For the latter, refer to the [Create a VM Template](create-vm-template.md) guide. + + +- Create an empty VM and install the Operating System (OS) using a standard method, such as a Preboot Execution Environment (PXE) or optical disk image (ISO). + + +- Clone an existing VM. + +Administrators can also import VMs from their existing VMware vSphere environment into Palette. + +Although no additional components are required in VMs, the **QEMU Guest Agent** is an optional component that runs inside a VM and provides runtime information. + +Additionally, Virtio is a virtualization standard for network and disk device drivers where only the guest's device driver knows it is deployed in a virtual environment, and cooperates with the hypervisor. This enables guests to receive high performance network and disk operations and provides most of the performance benefits of paravirtualization. + +
+ +:::caution + +We recommend installing the QEMU guest agent to display additional details in Palette Virtual Machine Orchestrator. We also recommend installing VirtIO drivers to ensure you can use the paravirtualized hardware properly. + +::: + +## Resources + +- [Standard VM Operations](standard-vm-operations/standard-vm-operations.md) + + +- [Deploy VM from a Template](standard-vm-operations/deploy-vm-from-template.md) + + +- [Create a VM Template](create-vm-template.md) diff --git a/docs/docs-content/vm-management/create-manage-vm/create-vm-template.md b/docs/docs-content/vm-management/create-manage-vm/create-vm-template.md new file mode 100644 index 0000000000..42cb44d4fc --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/create-vm-template.md @@ -0,0 +1,123 @@ +--- +sidebar_label: "Create a VM Template" +title: "Create a VM Template" +description: "Learn how to create a VM template using Palette Virtual Machine Orchestrator." +icon: " " +hide_table_of_contents: false +sidebar_position: 10 +tags: ["vmo"] +--- + + +Although Palette provides out-of-the box templates, we recommend that you create and manage your own templates. + +## Prerequisites + +- Valid YAML that defines your VM template. + +## Create a VM Template + +Create a template by adding a YAML file as a manifest in an add-on profile. + +
+ +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, click **Profiles** and click the **Add Cluster Profile** button. + + +3. Give the profile a name, select type **Add-on**, and click **Next**. + + +4. On the Profile Layers page, click **Add Manifest**. + + +5. Give the layer a name, and click **Edit manifest** and enter a name for the first template. Click the checkmark icon. + + +6. In the blank manifest file at right, enter the VM definition as a YAML file. You can add multiple manifests for multiple templates in the same add-on profile. They will display as layers in the profile. + + +7. Click **Confirm and Create**, then click **Next**. + + +8. Click **Finish Configuration**. + +
+ +#### Example YAML for a VM template + + +```yaml +apiVersion: spectrocloud.com/v1 +kind: VmTemplate +metadata: + labels: + app.kubernetes.io/name: fedora-36 + app.kubernetes.io/instance: fedora-36-instance + app.kubernetes.io/part-of: vmtemplate + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: vmtemplate + name: fedora-36 +spec: + description: This is Fedora 36 image + displayName: Fedora 36 + icon: "https://s3.amazonaws.com/manifests.spectrocloud.com/logos/fedora.png" + running: false + template: + spec: + domain: + cpu: + cores: 1 + devices: + disks: + - name: containerdisk + disk: + bus: virtio + - name: cloudinitdisk + disk: + bus: virtio + interfaces: + - name: default + masquerade: {} + resources: + requests: + memory: 1Gi + cpu: 1 + limits: + memory: 2Gi + cpu: 2 + networks: + - name: default + pod: {} + volumes: + - name: containerdisk + containerDisk: + image: gcr.io/spectro-images-public/release/vm-dashboard/os/fedora-container-disk:36 + - name: cloudinitdisk + cloudInitNoCloud: + # user name is fedora + userData: | + #cloud-config + ssh_pwauth: True + chpasswd: { expire: False } + password: spectro + disable_root: false + packages: + qemu-guest-agent + runcmd: + - ["sudo", "systemctl", "enable", "--now", "qemu-guest-agent"] +``` + + +## Validate + +1. Navigate to the left **Main Menu** and click **Profiles**. + + +2. Verify your newly added manifest is listed. + +## Next Steps + +Try applying the template to your cluster. Navigate to **Clusters** and click `+` next to Addon Layers, then select the VMO profile you created. diff --git a/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/_category_.json b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/_category_.json new file mode 100644 index 0000000000..3fca6fb9f9 --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 0 +} diff --git a/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/clone-vm.md b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/clone-vm.md new file mode 100644 index 0000000000..f1fa2a1e44 --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/clone-vm.md @@ -0,0 +1,51 @@ +--- +sidebar_label: "Clone a VM" +title: "Clone a VM" +description: "Learn how to clone a VM from a template using Palette Virtual Machine Orchestrator." +icon: " " +hide_table_of_contents: false +sidebar_position: 40 +tags: ["vmo"] +--- + + + +A VM clone is a copy of an existing, or parent, virtual machine (VM). The cloned VM has the same configuration settings and identifiers as the parent VM. After you clone a VM, it as a separate virtual machine. + +Cloning is a quick way to create a new virtual machine that shares the same properties as the parent. You may want to clone a VM for the following reasons: + +
+ +- Software testing - developers can clone an active VM to test new changes to their code. + + +- Forensics - security administators can clone an infected machine and connect it to an air-gaped network to investigate the source of the infection while the parent VM can be destroyed or remediated. + + +## Prerequisites + +There are no requirements. + +## Clone a VM + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, click **Clusters** and click on your cluster. + + +3. Select the VM to clone and click either the **three-dot Menu** or **Actions** + + +4. Power off the parent VM and click **Clone**. If you forget to power it off, the parent VM will automatically be powered off while cloning is in progress. + + +5. Give the clone a name, give an optional description, and select a namespace. + + +6. Optionally, you can enable the checkbox to start the cloned VM automatically when cloning is complete. + + +## Validate + +From the **Virtual Machines** tab, verify the cloned VM is listed and displays **Running** status. \ No newline at end of file diff --git a/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/deploy-vm-from-template.md b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/deploy-vm-from-template.md new file mode 100644 index 0000000000..e56cac2e6f --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/deploy-vm-from-template.md @@ -0,0 +1,101 @@ +--- +sidebar_label: "Deploy VM From a Template" +title: "Deploy VM From a Template" +description: "Learn how to deploy a VM from a template using Palette Virtual Machine Orchestrator" +icon: " " +hide_table_of_contents: false +sidebar_position: 0 +tags: ["vmo"] +--- + + +You can deploy a Virtual Machine (VM) using Palette's out-of-the-box templates or from templates that your organization's administrator provides. + +## Prerequisites + +- Configured Virtual Machine Orchestrator profile applied to your cluster. Review [Create a VMO Profile](../../vm-packs-profiles/create-vmo-profile.md) to configure the dashboard. + +## Deploy VM from a Template + +These steps will help guide you to deploy a VM from an out-of-the-box VM template. + +
+ +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, click **Clusters** and select the cluster in which you deploy VMs. + + +3. Click the **Virtual Machines** tab. + + +4. Select the appropriate namespace from the **drop-down Menu**. + + +5. From the **Virtual Machines** tab that appears, click **New Virtual Machine**. + + +6. Click the **New Virtual Machine** button. Available templates are displayed based on supported Operating Systems (OS). + + +7. You can deploy from a template or create an empty VM as follows: + +
+ + - To deploy from a template, select one of the VM templates. These can be Palette's out-of-the-box templates or templates that you or your administrator created. + +
+ + - To create an empty VM, close the templates choice page and install the OS using a different method. + +
+ +8. Give the VM a name and specify memory and CPUs. + + +9. Optionally, you can enable the checkbox to start the VM automatically after creation. + + +10. Click the **Next** button, which displays the YAML file. Tooltip help is available when you hover over lines in the file. + + +11. Review the YAML file and click the **Next** button when you are done. + + +12. Click **Create Virtual Machine**. + + +VM status will display as **Starting** for several minutes while the required resources are built and the image is pulled from the registry. If you did not enable the checkbox to start the VM automatically, VM status displays as **Stopped** until the VM is fully deployed. + +
+ +:::caution + +VMs do not self-heal. If a VM is running on a node that fails, the VM is re-scheduled to a different node. Similar to live migration, to provide high availability, the disks should be ``ReadWriteMany`` so that they can be mounted on other nodes when the VM is restarting. + +::: + + +## Validate + +1. Log in to [Palette](https://console.spectroloud.com). + + +2. Navigate to the left **Main Menu** and select **Clusters**. + + +3. Select the host cluster that contains your VMs to view its details page. + + +4. Click on the **Virtual Machines** tab. + + +5. Review the list of VMs and ensure the new VM is displayed and has the status **Running**. + + +## Next Steps + +Try installing your applications. If you did not install the QEMU guest agent as part of the VM deployment, you can install it now. The guest agent displays additional details in the **Virtual Machines** > **Details** tab. + +You can update the VM configuration from the VM console or from tabs when you click on the VM. Learn about updates you can make in the [Update VM Configuration](update-vm-configuration.md) guide. \ No newline at end of file diff --git a/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/migrate-vm-to-different-node.md b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/migrate-vm-to-different-node.md new file mode 100644 index 0000000000..1cfcb32b0f --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/migrate-vm-to-different-node.md @@ -0,0 +1,170 @@ +--- +sidebar_label: "Migrate a VM" +title: "Migrate a VM to a Different Node" +description: "Learn how to migrate a VM to another physical host in the cluster using Palette." +icon: " " +hide_table_of_contents: false +sidebar_position: 20 +tags: ["vmo"] +--- + + + +Palette supports virtual machine (VM) migration to another physical host in the cluster. This is known as *live migration*. During live migration, the VM and its memory, storage, and CPU resources are moved from one cluster compute node to another without any noticeable downtime. + +Successful live migrations rely on appropriately configured storage and networking, and live migration must be enabled as a feature gate. Live migration is enabled by default in the ``feature-gates`` section of the KubeVirt configuration file that is part of the **Virtual Machine Orchestrator** pack. Refer to [Feature Gates](../../vm-management.md#feature-gates) for more information. + +Live migration is used with rolling Kubernetes upgrades and workload balancing. To avoid interrupting a VM when a node is placed into maintenance or upgraded, all VM instances require a ``LiveMigrate`` eviction strategy. + + +## Prerequisites + +- All VM instances must have an eviction strategy set as `evictionStrategy: LiveMigrate` to ensure that a VM is not interrupted if the node is placed into maintenance. This is configured automatically in the KubeVirt configuration file. If needed, you can override the default setting by configuring `spec.template.spec.evictionStrategy`. + + +- VMs that use Persistent Volumes must have shared ``ReadWriteMany`` (``RWX``) access. For more information, refer to the [Persistent Volume Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) Kubernetes resource. VMs that do not use persistent storage, such as containerDisks, do not require modifications for live migration. + + +- A VM’s pod network cannot use a Bridge interface. Disable the default Bridge interface on the pod network. However, other interfaces such as those that Multus grants, may use a bridge interface for live migration. + + +## Migrate VM to a Different Node + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, choose **Clusters** and click on your cluster. + + +3. Click on the **Virtual Machines** tab. + + +4. Select the VM to migrate and click either the **three-dot Menu** or **Actions**. + + +5. Click **Migrate Node to Node**. + + + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, click **Clusters** and select the cluster. + + +3. Navigate to the **Virtual Machines** tab, and click the VM you migrated. + + +4. Click the **Details** tab, and verify that the name and IP address of the new node is changed. + + +## Evacuate a Host + +Compute nodes can be placed into maintenance mode using Palette or manually using the `cordon` and `drain` commands. The `cordon` command marks the node as un-schedulable and the `drain`command evacuates all the VMs and pods from it. This process is useful in case you need to perform hardware maintenance on the node - for example to replace a disk or network interface card (NIC) card, perform memory maintenance, or if there are any issues with a particular node that need to be resolved. To learn more, check out the [Safely Drain a Node](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/#use-kubectl-drain-to-remove-a-node-from-service) Kubernetes resource. + + +## Prerequisites + +- Ensure `LiveMigrate` is set as the eviction strategy for all affected VMs. When the host is put in maintenance mode, this feature allows for a smooth and uninterrupted migration process. + + +## Evacuate VMs in Palette + + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, choose **Clusters** and click on the **Nodes** tab. + + +3. Click the **three-dot Menu** in the row of the node you want to evacuate and select **Turn on maintenance mode**. This evacuates all workloads from the node to other nodes in the worker pool. + + +4. Turn off maintenance mode by clicking the **three-dot Menu** in the row of the evacuated node and select **Turn off maintenance mode**. + +
+ + :::caution + + Maintenance mode reduces cluster capacity. Be sure to turn off maintenance mode after maintenance completes. + + ::: + + +## Validate + +You can validate evacuation completed by following the steps below. + +
+ +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, choose **Clusters**. + + +3. Verify the **Health** column displays the **Maintenance mode: Completed** icon. + + + +## Evacuate VMs Manually + +
+ +1. Obtain the kubeconfig file from Palette, and set the KUBECONFIG environment variable to access it so you can issue kubectl commands to the cluster. To learn how, refer to [Set up Kubectl](../../../clusters/cluster-management/palette-webctl.md#set-up-kubectl). + + +2. Issue the following command to mark the node as *un-schedulable*. This alerts the Kubernetes scheduler not to schedule any new pods on that node but allows existing pods on the node to continue to operate. + +
+ + + Example: + ```bash + kubectl cordon + ``` + + **node-name**: The name of the node that should be marked as *un-schedulable*. + + +3. Issue the following command to gracefully remove all pods from the node that is undergoing maintenance. When you drain a node, all pods and VMs will be safely evicted from the node. + + Example: + ```bash + kubectl drain + ``` + + **node-name**: The name of the node that you wish to drain. + +
+ + :::info + + The kubectl `drain` command should only be issued to a single node at a time. + + ::: + + +## Validate + + +1. Using kubectl, log in to a machine that has access to the kubernetes cluster. + + +2. Issue the following command to verify the pods are rescheduled on a different node by verifying the name and IP address of the new node changed. + +
+ + ```bash + kubectl get pods --output wide + ``` + + +## Resources + +- [Persistent Volume Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) + + +- [Safely Drain a Node](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/#use-kubectl-drain-to-remove-a-node-from-service) diff --git a/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/standard-vm-operations.md b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/standard-vm-operations.md new file mode 100644 index 0000000000..c608e6cdc2 --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/standard-vm-operations.md @@ -0,0 +1,74 @@ +--- +sidebar_label: "Standard VM Operations" +title: "Standard VM Operations" +description: "Learn about standard VM operations that you can perform using Palette Virtual Machine Orchestrator." +icon: " " +hide_table_of_contents: false +tags: ["vmo"] +--- + + + +Palette Virtual Machine Orchestrator (VMO) supports standard VM power operations: + +
+ +- **Start** + + +- **Stop** + + +- **Pause** + + +- **Restart** + + +From the **Virtual Machines** tab, you can select a VM to view its details and perform standard VM operations, such as live migration (vMotion), snapshots, and cloning. VM operations are accessible from the **three-dot Menu** at the right of each listed VM. + +When you select a VM from the **Clusters** > **Virtual Machines** tab, the following tabs display. Tabs are specific to the selected VM. + +
+ +- **Overview**: Provides general information about the VM, such as its IP address, operating system, creation date and time zone, status, active users, whether the guest agent is installed or not, the quantity of Network Interface Cards (NIC) and disks, and any recent events. + + +- **Details**: Provides additional VM details such as labels associated with the VM, pod information, scheduling and resource requirements, and CPU and memory. If the QEMU Guest Agent is not installed, **Not Available** displays in place of details that would otherwise be available to you. + + +- **YAML**: You can review and change the VM configuration from here. + + +- **Events**: Displays streaming events in the VM. Any standard operations you perform on the VM are captured here. + + +- **Console**: Allows you to access and interact with the VM through its console. If you are not using a template, you can configure the VM using the console. + + +- **Network Interfaces**: Allows you to add and manage network interfaces. By default, the Pod Networking interface is a masquerade type interface, or in simple terms, it's a one-to-many IP address translation. You can change this to be a Bridge or other interface type. + + +- **Disks**: Allows you to add and manage disks. You can update the disk size, specify type `Disk`, `CD-ROM`, or `LUN`, and specify the interface `virtuo`, `sata`, or `scsi`. By default, `spectro-storage-class` is applied to the disk. + + +- **Snapshots**: Allows you to take a new snapshot of a VM's disk file at a given point in time and manage existing snapshots. + + + + +## Resources + +- [Deploy VM From a Template](deploy-vm-from-template.md) + + +- [Update VM Configuration](update-vm-configuration.md) + + +- [Migrate VM to a Different Node](migrate-vm-to-different-node.md) + + +- [Take a VM Snapshot](take-snapshot-of-vm.md) + + +- [Clone a VM](clone-vm.md) \ No newline at end of file diff --git a/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/take-snapshot-of-vm.md b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/take-snapshot-of-vm.md new file mode 100644 index 0000000000..7b01c75d58 --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/take-snapshot-of-vm.md @@ -0,0 +1,82 @@ +--- +sidebar_label: "Take a VM Snapshot" +title: "Take a Snapshot of the VM" +description: "Learn how to snapshot a VM using Palette Virtual Machine Orchestrator.." +icon: " " +hide_table_of_contents: false +sidebar_position: 30 +tags: ["vmo"] +--- + + +A snapshot is a copy of a virtual machine's (VM) disk file at a given point in time. Snapshots provide a change log for the virtual disk and are used to restore a VM to a particular point in time. + +You can take a snapshot of a VM that is online (**Running** state) or offline (**Stopped** state). When you take a snapshot of an active VM, the controller checks for the QEMU guest agent in the VM. If the guest agent is present, the controller freezes the VM file system before it takes the snapshot and unfreezes the file system afterwards. This provides for crash consistency. + +
+ +:::info + +For optimal snapshots, we recommend taking snapshots of online VMs that have the QEMU Guest Agent installed. If the guest agent is not installed, a best effort snapshot is taken. + +To check whether the VM has the ``qemu-guest-agent`` active, look for ``AgentConnected`` in the **Virtual Machines > Snapshots** tab. The ``vmSnapshot Status`` will display if the snapshot was taken online and with or without guest agent participation. + +::: + +
+ +You can take a snapshot of an online VM that has hotplugged disks. Only persistent hotplugged disks will be included in the snapshot. Only disks with a snapshot-supported storage class defined are included in snapshots. If no eligible disk is found, the **Snapshot** action is not possible. + +## Prerequisites + +- A deployed VM. + + +## Take a Snapshot + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, click **Clusters** and select your cluster. + + +3. Navigate to **Virtual Machines > Snapshots**, and click the **Take snapshot** button. + + +:::caution + +In some situations, such as with the Fedora operating system, SELinux on the guest prevents the QEMU guest agent from quiescing the target filesystem. As a workaround, you can do one of the following: + +- Generate an appropriate local security module that permits `qemu-ga` to operate correctly. This is the preferred workaround. + + +- Turn off SELinux **Enforcing** mode before the snapshot by issuing the `setenforce 0` command as the root user. Enforcing can be re-enabled after the snapshot using the `setenforce 1` command. + +::: + + +The **Snapshots** tab displays the ``vmSnapshot Status`` parameter with snapshot phases for the VM: **InProgress**, **Succeeded**, or **Failed**. + +The default time for a snapshot is five minutes. If the snapshot has not successfully completed within that time, it's status will display as **Failed**. The VM will be unfrozen and the snapshot content will be cleaned up if necessary. The snapshot will remain in **Failed** state until you delete it. You can change the default snapshot time to meet your workload requirements. + +
+ +:::caution + +Snapshots should not be used as a backup method, as running a VM on a snapshot for extended periods of time can cause instability and data loss. + +::: + +## Validate + +1. From the **Snapshots** tab, verify the ``vmSnapshot Status`` parameter displays **Succeeded**. + + +2. If the snapshot status displays as **Failed**, delete the snapshot and take a new one. You may need to change the default snapshot time in the VM configuration. + + + + + + + diff --git a/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/update-vm-configuration.md b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/update-vm-configuration.md new file mode 100644 index 0000000000..6ca957136b --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/standard-vm-operations/update-vm-configuration.md @@ -0,0 +1,105 @@ +--- +sidebar_label: "Update VM Configuration" +title: "Update VM Configuration" +description: "Learn how to add disk storage and network interfaces to a VM using Palette Virtual Machine Orchestrator." +icon: " " +hide_table_of_contents: false +sidebar_position: 10 +tags: ["vmo"] +--- + + + + +You can add storage and additional network interfaces to your virtual machines (VMs). + + +## Add Disk Storage + +KubeVirt allows hot plugging additional storage into a running VM. Both block and file system volume types are supported. + +## Prerequisites + +- A deployed VM. + +## Add a Disk + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, click **Clusters** and click on your cluster. + + +3. Navigate to **Virtual Machines** > **Disks** tabs and click the **Add disk** button. + + +4. Review the parameters and update as needed. You can specify the disk size, disk type (Disk, CD-ROM, or LUN), and network interface. + + The interface type determines out-of-the-box operating system (OS) support and disk performance. Choose from the following: + +
+ + - **virtio**: Optimized for best performance, but the operating system may require additional Virtio drivers. + +
+ + - **sata**: Most operating systems support Serial ATA (SATA). However it offers lower performance. + +
+ + - **scsi**: A paravirtualized Internet Small Computer System Interface (iSCSI) HDD driver that offers similar functionality to the virtio-block device but with some additional enhancements. In particular, this driver supports adding hundreds of devices and names devices using the standard SCSI device naming scheme. + + +5. Click **Add** when you are done. + +## Validate + +The **Disks** tab lists the newly added disk as ``PersistingHotplug``. + +
+ +## Add Network Interfaces + +You can add additional network interfaces to a VM. By default, VMs use the native networking already configured in the pod. Typically, this means using the Bridge option, and your VM has the same IP address as the pod. This approach makes interoperability possible. The VM can integrate with different cases like sidecar containers and pod masquerading. + +When using pod masquerading, you choose a CIDR for which VMs are not assigned a private IP, and instead use Network Address Translation (NAT) behind the pod IP. + +Multus is a secondary network that uses Multus-CNI. Multus allows you to attach multiple network interfaces to pods in Kubernetes. If you use Multus as your network, ensure that Multus is installed across your cluster and that you have created a default ``NetworkAttachmentDefinition`` CRD. For more information, refer to the [Multus CNI](/integrations/multus-cni) guide. + + +## Prerequisites + +- A deployed VM. + +## Add an Interface + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. From the left **Main Menu**, click **Clusters** and click on your cluster. + + +3. Navigate to **Virtual Machines > Network Interfaces** and click the **Add network interface** button. + + +4. Review the parameters and update as needed. Interface types are: **Masquerade**, **Bridge**, and **SR-IOV**. + + +5. Click **Add** when you are done. + + +
+ +:::info + +Multus allows hot plugging network interfaces only when interfaces use the **virtio** model connected through bridge binding. + +::: + +## Validate + +The **Network Interfaces** tab lists the newly added interface. + +# Resources + +- [Multus CNI](../../../integrations/multus-cni.md) diff --git a/docs/docs-content/vm-management/create-manage-vm/vm-oversubscription.md b/docs/docs-content/vm-management/create-manage-vm/vm-oversubscription.md new file mode 100644 index 0000000000..35f5c0bc15 --- /dev/null +++ b/docs/docs-content/vm-management/create-manage-vm/vm-oversubscription.md @@ -0,0 +1,90 @@ +--- +sidebar_label: "VM Performance" +title: "VM Performance" +description: "Learn how to improve VM performance by maximizing virtual machine CPU and memory using Palette." +icon: " " +hide_table_of_contents: false +sidebar_position: 30 +tags: ["vmo"] +--- + + + +Palette Virtual Machine Orchestrator (VMO) allows administrators to oversubscribe the physical resources on a host to maximize the number of active workloads. + +VM workloads typically have varying resource demands and peak utilization patterns. By oversubscribing resources, it is possible to allocate them flexibly and take advantage of the fact that not all VMs will require their maximum allocation simultaneously. + +The hypervisor automatically overcommits CPU and memory. This means that more virtualized CPU and memory can be allocated to VMs than there are physical resources on the system. + +
+ +## CPU Overcommit + +Kubevirt offers the `cpuAllocationRatio` in its Custom Resource Definitions (CRD). This ratio is used to normalize the amount of CPU time the pod will request based on the number of virtual CPUs (vCPUs). + +Using the following algorithm, when `cpuAllocationRatio` is set to 1, the full amount of vCPUs are requested for the pod: `pod CPU request = number of vCPUs * 1/cpuAllocationRatio`. + +The `cpuAllocationRatio` is global, so setting it to greater than 1 has the effect of requesting less CPU from Kubernetes for each VM. + +Certain workloads that require a predictable latency and enhanced performance would benefit from obtaining dedicated CPU resources. KubeVirt relies on the Kubernetes CPU manager to pin vCPUs to the physical host’s CPUs. To learn more, refer to [Dedicated CPU Resources](https://kubevirt.io/user-guide/virtual_machines/dedicated_cpu_resources/) and [Resources Requests and Limits](https://kubevirt.io/user-guide/virtual_machines/virtual_hardware/#resources-requests-and-limits) Kubevirt documentation. + + +
+ +:::caution + +- We do not recommend overcommitting CPUs in a production environment without extensive testing. Applications that use 100 percent of processing resources may become unstable in overcommitted environments. + + +- Ensure you don't overcommit guest VMs on more than the physical number of processing cores. For example, a guest VM with four vCPUs should only be deployed on a host physical machine with a quad-core processor instead of a dual-core processor. + + We recommend no more than 10 total allocated vCPUs per physical processor core. + +::: + +
+ +## Memory Overcommit + +KubeVirt allows you to assign more or less memory to a VM than a VM requests to Kubernetes. You may want to overcommit VM memory if you have a cluster or a few nodes that are dedicated to running VMs. In this case, overcommitting memory makes use of all the memory in the nodes regardless of reserved or requested memory from the system. + +To learn about options for memory overcommitment, refer to [Node Overcommit](https://kubevirt.io/user-guide/operations/node_overcommit/) KubeVirt resource. + +You can make several changes to reduce the memory footprint and overcommit the per-VMI memory overhead. + +
+ +- Enable guest overhead overcommit by setting `spec.domain.resources.overcommitGuestOverhead` to true. + + +- Enable guest memory by setting `spec.domain.memory.guest` to a value higher than `spec.domain.resources.requests.memory`, as shown in the example. + +```yaml + apiVersion: kubevirt.io/v1alpha3 + kind: VirtualMachineInstance + metadata: + name: testvmi-nocloud + spec: + terminationGracePeriodSeconds: 30 + domain: + resources: + overcommitGuestOverhead: true + requests: + memory: 1024M + memory: + guest: 2048M +``` + +
+ +- Enable implicit memory overcommit by setting `spec.configuration.developerConfiguration.memoryOvercommit` in the KubeVirt CRD to a percentage of the desired memory overcommit. + +## Resources + +- [Dedicated CPU Resources](https://kubevirt.io/user-guide/virtual_machines/dedicated_cpu_resources/) + + +- [Resources Requests and Limits](https://kubevirt.io/user-guide/virtual_machines/virtual_hardware/#resources-requests-and-limits) + + +- [Node Overcommit](https://kubevirt.io/user-guide/operations/node_overcommit/) \ No newline at end of file diff --git a/docs/docs-content/vm-management/vm-management.md b/docs/docs-content/vm-management/vm-management.md new file mode 100644 index 0000000000..cb071dbf8a --- /dev/null +++ b/docs/docs-content/vm-management/vm-management.md @@ -0,0 +1,97 @@ +--- +sidebar_label: "Virtual Machine Orchestrator" +title: "Virtual Machine Orchestrator" +description: "Learn about the Palette's Virtual Machine Orchestrator solution for managing containerized and virtualized applications." +hide_table_of_contents: false +sidebar_custom_props: + icon: "server" +tags: ["vmo"] +--- + +Palette Virtual Machine Orchestrator (VMO) provides a unified platform for managing containerized and virtualized applications. This solution allows organizations to onboard, deploy, manage, and scale VMs within the same cluster as their containerized applications. Palette VM Orchestrator simplifies managing infrastructure, improves resource utilization, and removes the cost of having a hypervisor. + + +![A drawing of VMs deployed to Palette](/docs_vm-mangement_vmo-diagram.png) + + +## Use Cases + +Palette VM Orchestrator is particularly suitable in the following scenarios: + +- Organizations that want to remove their virtualization infrastructure due to an aging environment or to reduce costs. By using Palette VM Orchestrator, legacy applications and modern, containerized applications can be deployed on VMs. + +- Edge locations with a few VMs deployed and where a hypervisor is no longer desired. + + +## Prerequisites + +Palette Virtual Machine Orchestrator requires the following: + +- Palette version 3.3.0 or higher. + + +- For data centers, production VMs are supported on bare metal Kubernetes clusters deployed on Canonical MAAS. To learn how to configure MAAS and create MAAS clusters in Palette, refer to the [Install and Manage MAAS Gateway](../clusters/data-center/maas/install-manage-maas-pcg.md) guide. + +- To use VMO on Edge, contact our support team by sending an email to [support@spectrocloud.com](mailto:support@spectrocloud.com) + +- VMs with Persistent Volume Claim (PVC) must have a StorageClass that supports ``ReadWriteMany`` (``RWX``) access mode for seamless live migration to a different node - either when triggered manually or during a Kubernetes upgrades. + + :::caution + + In environments that use nested virtualization, where VMs operate inside of VMs due to lack of hardware to host VMs, it is technically possible to operate VMs in Kubernetes by setting the KubeVirt resource ``useEmulation`` to true. However, we do not recommend this approach. + + ::: + + +## Get Started With VM Orchestrator + +To get started, review [Virtual Machine Orchestrator Pack](vm-packs-profiles/vm-packs-profiles.md) to learn about its components. + +Review [Create a VMO Profile](vm-packs-profiles/create-vmo-profile.md) and [Add Roles and Role Bindings](vm-packs-profiles/add-roles-and-role-bindings.md) to learn how to create the cluster profile and add roles and permissions that allow users to create and manage Virtual Machines (VMs). + +Palette VM Orchestrator provides various methods to quickly deploy VMs from out-of-the-box templates or from your organization's templates. To learn more about using and creating templates, review [Deploy VM From a Template](create-manage-vm/standard-vm-operations/deploy-vm-from-template.md) and [Create a VM Template](create-manage-vm/create-vm-template.md). + + +## Feature Gates + +Palette VM Orchestrator utilizes open-source KubeVirt as a component of the **Virtual Machnine Orchestrator** pack to manage VMs and enables the following KubeVirt feature gates by default: + +- LiveMigration +- Snapshot +- HotplugVolumes +- VMExport +- ExpandDisks +- HotplugNICs +- VMLiveUpdateFeatures + +KubeVirt offers other feature gates you may find useful and which you can enable using [Kubernetes feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/). To enable more KubeVirt feature gates, you can modify the ``kubevirt.kubevirtResource.additonalFeatureGates`` parameter in the **Virtual Machine Orchestrator** manifest. + +For more information on KubeVirt feature gates, refer to the [KubeVirt user guide](https://kubevirt.io/user-guide/operations/activating_feature_gates/). + +## Resources + +- [Virtual Machine Orchestrator Pack](vm-packs-profiles/vm-packs-profiles.md) + + +- [Create a VMO Profile](vm-packs-profiles/create-vmo-profile.md) + + +- [Add Roles and Role Bindings](vm-packs-profiles/add-roles-and-role-bindings.md) + + +- [Create and Manage VMs](create-manage-vm/create-manage-vm.md) + + +- [Standard VM Operations](create-manage-vm/standard-vm-operations/standard-vm-operations.md) + + +- [Deploy VM from a Template](create-manage-vm/standard-vm-operations/deploy-vm-from-template.md) + + +- [Create a VM Template](create-manage-vm/create-vm-template.md) + + +- [VM Roles and Permissions](vm-roles-permissions.md) + + +- [KubeVirt user guide](https://kubevirt.io/user-guide/operations/activating_feature_gates/) \ No newline at end of file diff --git a/docs/docs-content/vm-management/vm-packs-profiles/_category_.json b/docs/docs-content/vm-management/vm-packs-profiles/_category_.json new file mode 100644 index 0000000000..3fca6fb9f9 --- /dev/null +++ b/docs/docs-content/vm-management/vm-packs-profiles/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 0 +} diff --git a/docs/docs-content/vm-management/vm-packs-profiles/add-roles-and-role-bindings.md b/docs/docs-content/vm-management/vm-packs-profiles/add-roles-and-role-bindings.md new file mode 100644 index 0000000000..16c648771e --- /dev/null +++ b/docs/docs-content/vm-management/vm-packs-profiles/add-roles-and-role-bindings.md @@ -0,0 +1,77 @@ +--- +sidebar_label: "Add Roles and Role Bindings" +title: "Add Roles and Role Bindings" +description: "Learn how to configure user roles and cluster role bindings for Virtual Machines managed by Palette Virtual Machine Orchestrator." +icon: " " +hide_table_of_contents: false +sidebar_position: 10 +tags: ["vmo"] +--- + + +You must configure permissions for actions that users can perform on Virtual Machines (VMs) deployed using Palette Virtual Machine Orchestrator (VMO), such as cloning, updating, and migrating VMs. You can do this by creating roles and cluster role bindings to determine access permissions. Refer to [VM User Roles and Permissions](../vm-roles-permissions.md) for a list of Cluster Roles and equivalent Palette Roles. To learn more about Cluster RBAC in Palette, review the [RBAC and NS Support](../../clusters/cluster-management/cluster-rbac.md) guide. + + +## Prerequisites + +- A cluster profile with the **Virtual Machine Orchestrator** add-on pack configured. Check out the [Create a VMO Profile](../vm-packs-profiles/create-vmo-profile.md) guide to learn more. + +- Additional cluster roles, based on the user's persona, must be associated with the user by specifying a cluster role binding or a namespace-restricted role binding: + + - ``spectro-vm-admin`` + - ``spectro-vm-power-user`` + - ``spectro-vm-user`` + - ``spectro-vm-viewer`` + + Alternatively, you can use standard Kubernetes roles ``cluster-admin``, ``admin``, ``edit``, and ``view`` instead of defining bindings based on ``spectro-vm-*`` roles. + +- Assigned permissions to access Palette clusters. + +## Add Roles and Role Bindings + +1. Log in to [Palette](https://console.spectrocloud.com). + +2. From the left **Main Menu**, click **Clusters** and select your cluster. + + +3. Click on **Settings** and choose **RBAC** to add role bindings. Refer to [Create a Role Binding](../../clusters/cluster-management/cluster-rbac.md#create-role-bindings) for guidance. Refer to [VM User Roles and Permissions](../vm-roles-permissions.md) for a list of Cluster Roles and equivalent Palette Roles. + + If you have OpenID Connect (OIDC) configured at the Kubernetes layer of your cluster profile, you can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](../../integrations/kubernetes.md#use-rbac-with-oidc). + +4. Click **Confirm** to update the cluster. + +The cluster status displays as **Upgrading** on the **Cluster Overview** page. Upgrading can take several minutes depending on your environment. You can track events from the **Events** tab. + +## Validate + +You can verify role creation and role binding is successful by following the steps below. + +1. Log in to [Palette](https://console.spectrocloud.com). + +2. Navigate to the left **Main Menu** and select **Clusters**. + +3. Select the cluster you created the role binding in to view its details page. + +4. Download the **kubeconfig** file for the cluster or use the web shell to access the host cluster. + +5. Use the following commands to review details about the role and to ensure the role binding was successful. + +#### Cluster Role: + +```shell +kubectl get clusterrole --output yaml +``` + +#### Role + +```shell +kubectl get role --namespace --show-kind --export +``` + +## Next Steps + +Now you are ready to deploy a VM. Review the [Deploy VM From a Template](../create-manage-vm/standard-vm-operations/deploy-vm-from-template.md) guide to get started with the deployment process. + +## Resources + +- [VM User Roles and Permissions](../vm-roles-permissions.md) diff --git a/docs/docs-content/vm-management/vm-packs-profiles/configure_OIDC.md b/docs/docs-content/vm-management/vm-packs-profiles/configure_OIDC.md new file mode 100644 index 0000000000..cd9d4a5599 --- /dev/null +++ b/docs/docs-content/vm-management/vm-packs-profiles/configure_OIDC.md @@ -0,0 +1,60 @@ +--- +sidebar_label: "Configure OIDC" +title: "Configure OIDC" +description: "Learn how to configure OIDC so Palette displays the Virtual Machine Dashboard." +icon: " " +hide_table_of_contents: false +sidebar_position: 15 +tags: ["vmo", "oidc"] +--- + + + + +Palette displays the Virtual Machine dashboard based on the OpenID Connect (OIDC) Identity Provider option that you select in the Kubernetes layer of the infrastructure profile. + + +## Prerequisites + +- A configured infrastructure profile. For more information, review [Create a Cluster Profile](../../cluster-profiles/task-define-profile.md). + + +## Enable OIDC + +1. Log in to [Palette](https://console.spectrocloud.com/). + +2. From the left **Main Menu** click **Profiles**. + +3. Select the cluster profile to update. Palette displays profile details and the profile stack. + +4. Select the Kubernetes layer in the profile stack, and choose an OIDC Identity Provider option. Refer to [Configure OIDC Identify Provider](../../integrations/kubernetes.md#configure-oidc-identity-provider) to learn more about OIDC options. + + Selecting **None** or **Palette** will display the Virtual Machine dashboard in a tab. + + Selecting **Inherit from Tenant** or **Custom** will display a link to the dashboard on the cluster overview page. + + :::caution + + We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that rely on OIDC. + + ::: + +5. Click **Confirm Updates**. + +6. Save your changes. + + +## Validate + +1. From the left **Main Menu** click **Profiles**. + +2. Select the cluster profile you updated. Palette displays profile details and the profile stack. + +3. Select the Kubernetes layer. Palette displays the OIDC Identity Provider you selected - either **None**, **Palette**, **Inherit from Tenant**, or **Custom**. + + +## Next Steps + +You are now ready to create the VMO profile. Refer to [Create the VMO Profile](../vm-packs-profiles/create-vmo-profile.md) for guidance. + + diff --git a/docs/docs-content/vm-management/vm-packs-profiles/create-vmo-profile.md b/docs/docs-content/vm-management/vm-packs-profiles/create-vmo-profile.md new file mode 100644 index 0000000000..ba0406fd12 --- /dev/null +++ b/docs/docs-content/vm-management/vm-packs-profiles/create-vmo-profile.md @@ -0,0 +1,94 @@ +--- +sidebar_label: "Create a VMO Profile" +title: "Create a VMO Profile" +description: "Learn how to create a cluster profile to utilize Palette Virtual Machine Orchestrator capabilities." +icon: " " +hide_table_of_contents: false +sidebar_position: 5 +tags: ["vmo"] +--- + + +The **Virtual Machine Orchestrator** pack conveniently includes several components and automatically installs the [Spectro Proxy](../../integrations/frp.md) pack when you use the default profile configuration. To learn about pack components, refer to [Virtual Machine Orchestrator Pack](../vm-packs-profiles/vm-packs-profiles.md). + + +## Prerequisites + +- A Palette permission key `create` for the resource `clusterProfile`. + + +## Create the Profile + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Select **Profiles** in the left **Main Menu** and click the **Add Cluster Profile** button. + + +3. Enter basic information for the profile: name, version if desired, and optional description. + + +4. Select type **Add-on**, and click **Next**. + + +5. In the following screen, click **Add New Pack**. + + +6. Use the information below to find the **Virtual Machine Orchestrator** pack: + - **Pack Type**: System App + - **Registry**: Public Repo + - **Pack Name**: Virtual Machine Orchestrator + - **Pack Version**: 1.0 or higher + + +7. Review the **Access** configuration panel at right. The default setting is **Proxied**, which automatically adds the **Spectro Proxy** pack when you create the cluster, allowing access to the Spectro VM Dashboard from anywhere. Check out the [Spectro Proxy](../../integrations/frp.md) guide to learn more. Changing the default may require some additional configuration. + + The **Direct** option is intended for a private configuration where a proxy is not implemented or not desired. + + :::caution + + We recommend using the pack defaults. Default settings provide best practices for your clusters. Changing the default settings can introduce misconfigurations. Carefully review the changes you make to a pack. + + ::: + +8. Click **Confirm & Create**. + + +9. In the following screen, click **Next**. + + +10. Review the profile and click **Finish Configuration**. + + +11. Apply the profile to your cluster. + + +## Validate + +You can validate the profile is created. + +1. Log in to [Palette](https://console.spectrocloud.com). + + +2. Navigate to **Profiles** from the left **Main Menu**. + + +3. Locate the newly created profile in the list. + + +4. From the left **Main Menu**, click **Clusters** and select your cluster. + + +5. Based on your Single Sign-On (SSO) settings, the **Virtual Machines** tab may display on the **Cluster Overview** page, or the **Connect** button may display next to **Virtual Machines Dashboard** in cluster details. + + +## Next Steps + +You will need to configure roles and role bindings to give users access virtual clusters. You can use VM user roles and permissions or standard Kubernetes roles. For configuration guidance, refer to [Add Roles and Role Bindings](add-roles-and-role-bindings.md). The [VM User Roles and Permissions](../vm-roles-permissions.md) reference lists Cluster Roles and equivalent Palette Roles. + +If you have OpenID Connect (OIDC) configured at the Kubernetes layer of your cluster profile, you can create a role binding that maps individual users or groups assigned within the OIDC provider's configuration to a role. To learn more, review [Use RBAC with OIDC](../../integrations/kubernetes.md#use-rbac-with-oidc). + + +## Resources + +- [Add Roles and Role Bindings](add-roles-and-role-bindings.md) diff --git a/docs/docs-content/vm-management/vm-packs-profiles/vm-packs-profiles.md b/docs/docs-content/vm-management/vm-packs-profiles/vm-packs-profiles.md new file mode 100644 index 0000000000..1b12965712 --- /dev/null +++ b/docs/docs-content/vm-management/vm-packs-profiles/vm-packs-profiles.md @@ -0,0 +1,44 @@ +--- +sidebar_label: "Virtual Machine Orchestrator Pack" +title: "Virtual Machine Orchestrator Pack" +description: "Learn about components of the Virtual Machine Orchestrator pack." +icon: " " +hide_table_of_contents: false +tags: ["vmo"] +--- + +The **Virtual Machine Orchestrator** pack provides a single-pack experience that consolidates all the dependencies needed to deploy and manage VMs in your Kubernetes host cluster. You use **Virtual Machine Orchestrator** pack to create a VMO cluster profile. The pack's components are described below. All the components are enabled by default in the `charts:` section of the pack YAML configuration file. + +- **Spectro VM Dashboard**: Enables access to a web console so you can manage and monitor your VMs. The console is accessible from the **Virtual Machines** tab that appears on the cluster overview page when using Palette Virtual Machine Orchestrator (VMO). The dashboard provides a web interface to create and manage VMs in your Kubernetes cluster. + + +- **KubeVirt**: Allows you to create VMs within a Kubernetes cluster using open-source [KubeVirt](https://kubevirt.io). KubeVirt provides feature gates you can enable in the Virtual Machine Orchestrator pack YAML file. To learn which feature gates Palette enables by default and how you can enable additional feature gates, check out the [Feature Gates](../vm-management.md#feature-gates) section. + + KubeVirt extends Kubernetes with additional virtualization resource types using Kubernetes Custom Resource Definitions (CRD) API. KubeVirt also includes controllers and agents that provide VM management capabilities on the cluster. Through KubeVirt you can use the Kubernetes API to manage VM resources similar to the way you manage Kubernetes resources. + + +- **KubeVirt CDI**: Provides persistent storage for Kubernetes clusters. It enables Persistent Volume Claims (PVCs) to be used as disks for KubeVirt VMs. + + +- **Volume Snapshot Controller**: A Kubernetes plugin that watches VolumeSnapshot CRD objects and manages the creation and deletion of volume snapshots. A snapshot represents a point-in-time copy of a volume. + + +- **Multus CNI**: A Controller Network Interface (CNI) plugin that enables multiple network interfaces to attach to Kubernetes pods. In this context, it is used to attach VM networks to the launched VM. + + +:::info + +The **Spectro Proxy** pack enables the use of a reverse proxy with a Kubernetes cluster and is automatically installed when you create the cluster with the default **Proxied** setting for **Access** during cluster profile creation. Check out the [Spectro Proxy](../../integrations/frp.md) pack documentation to learn more. + +::: + + +Administrators can configure the out-of-the-box add-on packs, cluster profiles, and VM templates that include commonly used operating systems, or they can define their own VM templates to share with users. + + +# Resources + + +- [Spectro Proxy](../../integrations/frp.md) + +- [Feature Gates](../vm-management.md#feature-gates) diff --git a/docs/docs-content/vm-management/vm-roles-permissions.md b/docs/docs-content/vm-management/vm-roles-permissions.md new file mode 100644 index 0000000000..d25d475f6a --- /dev/null +++ b/docs/docs-content/vm-management/vm-roles-permissions.md @@ -0,0 +1,43 @@ +--- +sidebar_label: "VM User Roles and Permissions" +title: "VM User Roles and Permissions" +description: "Learn about roles and permissions to apply to VMs when using Palette Virtual Machine Orchestrator." +icon: " " +hide_table_of_contents: false +sidebar_position: 20 +tags: ["vmo"] +--- + + +You must configure roles and role binding before any user, including you as administrator, can access Palette Virtual Machine Orchestrator (VMO). There are two sets of roles: Cluster Roles and Palette Roles, along with the required bindings configuration. + +Palette provides the following four out-of-the-box Cluster roles for Palette Virtual Machine Orchestrator. The table also lists the corresponding Palette roles. + +
+ +| Cluster Role | Description | Restrictions | Palette Role | +|-----------|-------------|-----------|-----------| +| ``spectro-vm-admin`` | Has admin privileges to manage the Kubernetes cluster, VMs, and templates.| None | Cluster Admin or
Editor

Cluster Profile Admin or Editor

Virtual Machine Admin +| ``spectro-vm-power-user`` | Can perform most VM operations, but does not handle infrastructure aspects. | Cannot manage or administer the
Kubernetes cluster.

Cannot manage or update VM templates. | Cluster Viewer

Virtual Machine Power User | +| ``spectro-vm-user`` | Primarily uses VMs created by others. | Cannot launch new VMs or clone existing ones.

Cannot delete VMs.

Cannot migrate VMs from one node to another. | Cluster Viewer

Virtual Machine User +| ``spectro-vm-viewer`` | A view-only role. | Cannot perform any of the operations offered to the above users.| Cluster Viewer

Virtual Machine Viewer + + +
+ +:::caution + +These roles are currently only relevant to access Palette Virtual Machine Orchestrator APIs. To access the Virtual Machines console, users must have permissions to access the host clusters. These permissions can be granted through the [default Kubernetes roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#default-roles-and-role-bindings) Admin/Editor/Viewer. + +::: + +
+ +You can create additional roles based on the permissions granularity that Palette offers. Palette provides the ability to specify bindings to configure granular Role-Based Access Control (RBAC) rules. + +
+ + +You can configure namespaces and RBAC from within a cluster or from a Palette workspace that contains a cluster group. In a cluster group, all RoleBindings must occur at the namespace level. For details, review the [Cluster RBAC](../clusters/cluster-management/cluster-rbac.md) and [workspace RBAC](../workspace/workspace.md#role-based-access-controlrbac) guides. + +Palette leverages Regex Pattern matching so you can select multiple namespaces to apply role bindings. Check out [Regex for Namespaces](../workspace/workload-features.md#regex-for-namespaces) to learn more. diff --git a/docs/docs-content/workspace/_category_.json b/docs/docs-content/workspace/_category_.json new file mode 100644 index 0000000000..3111907aaf --- /dev/null +++ b/docs/docs-content/workspace/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 90 +} diff --git a/docs/docs-content/workspace/adding-a-new-workspace.md b/docs/docs-content/workspace/adding-a-new-workspace.md new file mode 100644 index 0000000000..d50b761265 --- /dev/null +++ b/docs/docs-content/workspace/adding-a-new-workspace.md @@ -0,0 +1,73 @@ +--- +sidebar_label: "Adding a Workspace" +title: "Adding a workspace" +description: "How to create multi-cluster workspace in Palette" +icon: "" +hide_table_of_contents: false +sidebar_position: 0 +tags: ["workspace"] +--- + + +Palette enables multi-cluster management and governance capabilities by introducing Workspaces. This section explains how a workspace can be created in the Palette console. + +## Prerequisites + + * One or more running workload clusters within the project. + * Cluster must not be imported with read-only mode. + * RBAC should not be set at cluster level but to be included at workspace level. + * Palette Virtual Clusters cannot be part of the workspace. + +## Create Your Workspace + +1. Add the Basic Information +Provide the basic information for the workspace such as: + +* Unique Name +* Optional Description +* Optional Tag + + +2.Associate Clusters + + * Select the cluster(s) to be added to the workspace. (See [New Clusters](../clusters/clusters.md) to learn how to add a new Cluster.) Palette clusters, as well as brownfield clusters, can be added to your workspace. + + + * Configure the Cluster Role Binding (optional). Role bindings can be created on all workspace clusters. + - As step 2 of the new Workspace creation, select **Add Cluster Role Binding**. + - Provide the name of the role for which the cluster role binding needs to be created. The role should be pre-existing or an in-built system role. Palette does not create cluster roles. + - Subjects for the cluster role binding can be groups, users, or service accounts. + + | **Subject Type** | **Subject Name** | **Subject Namespace** | + | ---------------- | ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | + | **User** | a valid path segment name | NA | + | **Group** | a valid path segment name | NA | + | **Service Account** | a valid path segment name | Granting super-user access to all service accounts
cluster-wide is strongly discouraged. Hence, grant a
role to all service accounts in a namespace. | + + +3. Associate Namespaces + +* Enter one or more namespaces that need to be part of the workspace. The combination of workspace and cluster is unique across workspaces in a project. Palette ensures that all the namespaces are created for all the clusters in the workspaces, in case they are not pre-existing. + + +* Add the resource quota for the namespaces by specifying CPU and Memory limits (optional). + + +* Configure the Role Binding (optional). The following information is required for each role binding: + * Select a namespace name or the Regex for namespaces for selecting multiple namespaces. + * Specific name for the role which is pre-existing + * Make the selection of Subjects from the dropdown list (User, Group, or ServiceAccount). For the subject selected, provide a valid path segment name. For the subject, ServiceAccount select namespace name as granting super-user access to all service accounts cluster-wide is strongly discouraged due to security concerns. + * Confirm the information provided to complete the configuration of role binding. + +4. Settings + + +* [Schedule Backups](../clusters/cluster-management/backup-restore/backup-restore.md) - set the backup and restore policies. + +* [Container Image](workload-features.md#restrict-container-images-to-a-workspace) - list out the container images to be restricted within a Workspace namespace. + + + +Review and finish the configuration and complete the deployment. + + diff --git a/docs/docs-content/workspace/workload-features.md b/docs/docs-content/workspace/workload-features.md new file mode 100644 index 0000000000..df934a66d0 --- /dev/null +++ b/docs/docs-content/workspace/workload-features.md @@ -0,0 +1,576 @@ +--- +sidebar_label: "Workspace Management" +title: "The additional features to optimize workspace performance" +description: "How to get unified view of workloads in logically grouped namespaces and clusters" +icon: "" +hide_table_of_contents: false +sidebar_position: 10 +tags: ["workspace"] +--- + + + +# Manage Palette Workspace + +Palette supports several day 2 operations to manage the end-to-end lifecycle of the Kubernetes clusters through Workspaces. It also provides several capabilities across new and imported clusters to keep your clusters secure, compliant, up to date, and perform ongoing management operations like Backup and Restore. Additionally, you can have visibility into the workloads running inside your cluster and cluster costs. + +The following sections describe these capabilities in detail: + +
+ +------------------------ + + + + + +## Workload Visibility + +Workspace provides visibility into workloads deployed across clusters. + +|**Resource**|**Description availed from Workspace**| +|---|-----| +|**Namespaces**|Cluster Specific namespaces with CPU and Memory utilization.| +|**Pods**|Lists all the pods running on a particular namespace with cluster names with the detailed health status, age, and resource utilization of each of them.| +|**Deployments**|All the running deployments specific to clusters belonging to the Workspace with namespace to which these deployments belong, pods details, replicas, and age are enumerated| +|**DaemonSets**|DaemonSet resource utilization is described, with details on namespaces, pods, and age of individual Daemon sets| +|**StatefulSets**|All the active StatefulSets specific to clusters belonging to the Workspace with corresponding namespace, pods details, replicas, and age are enumerated| +|**Jobs**|A Job creates one or more Pods and will continue to retry execution of the Pods until a specified number of them successfully terminate.| +|**CronJobs**|Cron Jobs are regularly scheduled actions or jobs such as backups, report generation, etc. Each of these jobs will recur as scheduled.| +|**RoleBinding**|A role binding grants the permissions defined in a role to a user or set of users. | +|**ClusterRoleBinding**|A Cluster Role binding defines the permissions defined across a cluster.| + + + + + +## Workspace Backup and Restore + +Palette users can create cluster backups from within a workspace (usually consisting of multiple clusters) and restore them later time as desired. Palette allows granular controls within a workspace for users to perform specific tasks within the workspace, without having the ability to update workspace details. To provide granular access within a workspace for specific actions, Palette provides the following two Roles: + +## Workspace Operator + +Users assigned the **Workspace Operator** Role can only perform Backup and Restore actions within the Workspace. + +## Workspace Admin + +A Role that has all administrative permissions and privileges within the Workspace. + +## Create your Workspace Roles + +To create your **Workspace Role**, follow the steps below: + +1. Log in to the Palette Management Console as **Tenant Admin**. + + +2. Go to the **Users and Teams** option. + + +3. From the listed users, select the user to be assigned with Workspace Roles. See here for [User Creation](../user-management/new-user.md). + + +4. Select the **Workspace Roles** tab and click **+ New Workspace Role** to create a new role. + + +5. Fill the following information into the **Add Roles to User-Name** wizard: + * Project + * Workspace + * Choose the role from the options: + * Workspace Admin + * Workspace Operator + + +6. Confirm the information provided to complete the wizard. + + +7. The user set with the Workspace Role can take Workspace-wide Backups and Restores in compliance with their permissions and privileges. + +Palette leverages the BackUps to the following locations: + +
+ +#### Amazon Web Services (AWS) S3 Buckets: [Prerequisites](#amazon-web-services-aws-s3-buckets-prerequisitesbucketasbackuplocation-configure-your-backup)bucketasbackuplocation), [Configure your Backup](#configure-your-backup-in-aws-s3) + +#### Google Cloud Platform (GCP) Buckets: [Prerequisites](#google-cloud-platform-gcp-buckets-prerequisites-configure-your-backup), [Configure your Backup](#configure-your-backup-in-gcp-bucket) + +#### MinIO S3 Buckets: [Prerequisites](#minio-s3-buckets-prerequisites-configure-your-backup), [Configure your Backup](#configure-your-backup-in-minio) + +#### Azure Blob: [Prerequisites](#azure-blob-prerequisites-configure-your-backup), [Configure your Backup](#configure-your-backup-in-azure-azure-blob) + +## Prerequisites + +## For an Amazon Web Services (AWS) Bucket as Backup Location + +* The AWS S3 permissions listed in the next section need to be configured in the AWS account to provision Backup through Palette. + +* Pre-create a bucket at the AWS or MinIO object-store. + +## For a Google Cloud Platform (GCP) Backup Location + +* GCP service account with a **Storage Admin** role. + +* Pre-create a bucket at the GCP object storage. + +## For MinIO S3 Backup + +* S3 bucket with Read/Write Access + +* A unique access key (username) and corresponding secret key (password) from MinIO Console. + +* Service provider certificate (Optional) + +#### For Azure Blob Backup + +* An active Azure cloud account with the following pieces of information noted down: + * Tenant Id + * Client Id + * Subscription Id + * Client Secret created + + +* An [Azure storage account](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-create?tabs=azure-portal) created with the following information to be noted down for Palette use: + * Storage Name: Custom name given to the Azure storage created. + * Stock-keeping unit + + +* A container to be created in the Azure Storage account + +## Backup Locations + +AWS Simple Cloud Storage (S3) and other S3 compliant object stores such as MinIO and GCP Buckets are currently supported as backup locations. These locations can be configured and managed under the **Project** > **Settings** option and can be selected as a backup location, while backing up any cluster in the project. + +### Configure your Backup in AWS S3 + +The following details are required to configure a backup location in AWS: + +1. **Location Name** - Name of your choice. + + +2. **Location Provider** - AWS (This is currently the only choice on the UI. Choose this option when backing up to AWS S3 or any S3 compliance object store). + + +3. **Certificate** - Required for MinIO. + + +4. **S3 Bucket** - S3 bucket name must be pre-created on the object-store. + + +5. **Configuration** - region={region-name},s3ForcePathStyle={true/false},s3Url={S3 URL}. S3 URL need not be provided for AWS S3. + + +6. **Account Information** - Details of the account which hosts the S3 bucket to be specified as Credentials or STS. + * Credentials - Provide access key and secret key. + * STS - Provide the ARN and External ID of the IAM role that has permission to perform all S3 operations. The STS role provided in the backup location should have a trust set up with the account used to launch the cluster itself and should have the permission to assume the role. + + +7. Palette mandates the AWS S3 Permissions while users use the static role to provision worker nodes. + +### AWS S3 Permissions + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateSnapshot", + "ec2:DeleteSnapshot" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:PutObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts" + ], + "Resource": [ + "arn:aws:s3:::BUCKET-NAME/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::BUCKET-NAME" + ] + } + ] + } + + ``` + +### Trust Setup Example + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::141912899XX99:root" + }, + "Action": "sts:AssumeRole", + "Condition": {} + } + ] + } + ``` + +## Configure your Backup in GCP Bucket + +These locations can be configured and managed from the **Settings** option under **Project** and can be selected as a backup location while backing up any cluster in the project. + +The following details are required to configure a backup location in GCP: + +1. **Location Name** - Name of your choice. + + +2. **Location Provider** - Google Cloud (Choose this option when backing up to the GCP bucket object store). + + +3. **Bucket** - The name of the bucket name pre-created on the object store. + + +4. **JSON Credentials** - For external authentication of the GCP storage. + + +## Configure your Backup in MinIO + +The following details are required to configure a backup location in AWS: + +1. **Location Name**: Name of your choice. + + +2. **Location Provider**: Minio + + +3. **Certificate**: Optionally required for MinIO. + + +4. **S3 Bucket**: S3 bucket name must be pre-created on the MinIO object-store. + + +5. **Region**: Region in which the S3 bucket is created. Example: us-east-1 + + +6. **S3 URL**: Url of the MinIO object storage console. Example: `http://12.123.234.567:0000` + + +7. **Force S3 path style** : To force S3 pathstyle addressing or else the url will be converted to virtual-hosted style addressing with bucket name appended to the url.This is an optional setting. + + +8. **Authenticate** using MinIo access key and secret access key. + + +9. Click **Create** to complete the location creation wizard. + +## Configure your Backup in Azure: Azure Blob + +The following details are required to configure a backup location in Azure: + +1. **Location Name**: A custom name for the storage location getting created. + + +2. **Location Provider:** Select **Azure** from the drop-down. + + +3. **Container Name:** The container created in Azure Storage. + + +4. **Storage Name**: Name of the Azure storage created. + + +5. **Stock-Keeping Unit**: Information from the Azure storage. + + +6. **Resource Group:** Azure Resource Group name + + +7. **Tenant ID:** Azure Account Credential. + + +8. **Client ID:** Azure Account Credential. + + +9. **Subscription ID**: Azure Account Credential. + + +10. **Client Secret:** Secret created in the Azure console needs to be validated. + + +11. Click **Create** to complete the location creation wizard. + + +## Add a Backup Location + +Go to **Project Settings** > **Backup locations** > **Add a New Backup location**. + +## Create a Workspace Backup + +Backups can be scheduled or initiated in an on demand basis, during the workspace creation. The following information is required for configuring a Workspace Backup, on demand- + +1. **Backup Prefix / Backup Name**: For scheduled backup, a name will be generated internally, add a prefix of our choice to append with the generated name. For an on demand backup, a name of user choice can be used. + + +2. Select the Backup location. + + +3. **Backup Schedule** - Create a backup schedule of your choice from the dropdown list, applicable only to scheduled backups. + + +4. **Expiry Date** - Select an expiry date for the backups. The backup will be automatically removed on the expiry date. + + +5. **Include all disks** - Optionally, backup persistent disks as part of the backup. + + +6. **Include Cluster Resources** - Select or deselect on your choice. + + +|On Demand Backup | +|-------------------| +|Select the **Workspace to Backup** > **Settings** > **Schedule Backups**| + + +|Scheduled Backup | +|-----------------| +|**Workspace Creation** > **Policies** > **Backup Policies**| + + +## Backup Scheduling Options + +Both the cluster and workspace backup support the following scheduling options: + +* Customize your backup for the exact month, day, hour, and minute of the user's choice. +* Every week on Sunday at midnight +* Every two weeks at midnight +* Every month on the 1st at midnight +* Every two months on the 1st at midnight + +## Restore a Backup + +Backups created manually or as part of the schedule are listed under the Backup/Restore page of the cluster. + +1. Restore operation can be initiated by selecting the restore option for a specific backup. + + +2. Next, you will be prompted to select a target cluster where you would like the backup to be restored. The progress of the restore operation can be tracked from the target cluster's Backup/Restore page. + + +3. Finally, restore operations can be done to the cluster running on the same project. + + + +## Restore Your Backup + +To initiate a restore operation: +
+ +1. Log in to the Palette console as the **Project Admin** and go to **Workspaces** page. + + +2. Select the **Workspace Name** to be restored. + + +3. From the selected Workspace overview, select **Backups** from the top menu. + + +4. The Backup option lists all the backups scheduled for the selected Workspace. Towards the name of the backup, click the meatball (three horizontal dots) button to open the restore wizard. + + +5. Click on the **Restore Backup** option to complete the wizard: + * Choose of the namespaces to be restored + * Three options are available to filter the resources to be restored: + * **Include Cluster Resources** - To restore all the cluster scoped resources. + * **Preserve Node Ports** - To preserve ports for node port service running in the cluster. + * **Restore PVs** - To restore the persistent volumes. + + **Note**: Check **Include Cluster Resource** and **Restore PVs** options together. + + +6. Make the appropriate choice of resources as per user requirements to complete the wizard. + + +
+ + + + +## Workspace Quota + +Palette enables the users to limit resource usage within the workspace optionally. The Quota is specified in terms of the maximum CPU and memory. Therefore, the resource utilization within the namespace should be below the Quota allocated across all the clusters. + +
+ +## To set your Resource Quota: + +1. During [Step: 3 Associate Namespaces](adding-a-new-workspace#3-associate-namespaces) of Namespace creation, **Workspace Quota** can be set by giving the **Maximum CPU** and **Maximum Memory**. Then, all the clusters launched within the Namespace can use the set Quota. + + +2. Namespace Quota can be set for an already deployed workspace as: + `Workspace Settings -> Namespaces -> Workspace Quota` + +### Workspace Quota Notes: + +* The quota allocated to the workspace scope is split across all the namespaces under that workspace per their resource requirements. + + +* The palette allows quotas to be allocated to individual namespaces under a specific workspace. In that case, individual clusters belonging to that namespace can utilize the quota per their resource requirements. When a namespace is allocated with a quota, all the clusters belonging to that namespace get allocated with that resource quota individually. + + **Example**: If Namespace palette-ns belongs to two (2) clusters, p1 and p2, and palette-ns is allocated a quota of 1 CPU and 1 Gb memory, each of p1 and p2 gets allocated 1 CPU and 1 GB memory individually. + + +* Palette allows quota to be allocated to individual clusters under a specific workspace. In that case, the allocated quota should not exceed the namespace quota. + + +* To set an unlimited quota, set the quota value as -1. + * If -1 is set as the quota for a cluster, then we cannot set a quota for the workspace to which the cluster belongs. + * If -1 is set as the quota for a Workspace, then we cannot set a quota for the clusters belonging that Workspace. + + +
+ + +## Regex for Namespaces + +Palette leverages Regex Pattern matching to select multiple namespaces to apply Role binding concurrently. When we have many namespaces to be configured for role binding, the user can provide a Regex pattern matching multiple namespaces instead of giving a single namespace. This will help select all the namespaces matching the given Regex pattern to be selected together for role binding. + +## Use Cases + +1. A Regex pattern that start and end with " / ", will select all the workspace names matching the given Regex pattern. + + **Example:** `/^palette-ns/` +
+ +2. A Regex pattern that starts with `negation symbol(~)`, will select all the namespaces that *does not match* with the regex expression given. + + **Example:** `~/^(kube|cluster|capi|jet|cert)[-].+/` + +**Note**: No spaces to be added between the `~` operator and the `expression`. + +
+ + + +## Workspace Role Binding + +Workspace Role Binding is a Project scope operation. There are two available options for setting up Roll Binding for a Workspace: + +* **Cluster** to create a RoleBinding with cluster-wide scope (ClusterRoleBinding). + + +* **Namespaces** to create a RoleBinding within namespaces scope (RoleBinding). + +Palette users can choose role creation based on their resource requirements. + +## Configure cluster role bindings + +* Login to Palette as Project admin and select the Workspace to which the Role Binding need to configured. + + +* Select Settings -> Cluster + + +* Select the clusters from the workspace to Role Bind. + + +* Click on “Add new binding” to open the “Add Cluster Role Binding” wizard. Fill in the following details: + * Role Name: Define a custom role name to identify the cluster role + * Subjects: Subjects are a group of users, services, or teams using the Kubernetes API. It defines the operations a user, service, or a team can perform. There are three types of subjects: + * Subject Type: + * Users: These are global and meant for humans or processes living outside the cluster. + * Groups: Set of users. + * Service Accounts: Kubernetes uses service accounts to authenticate and authorize requests by pods to the Kubernetes API server. These are namespaced and meant for intra-cluster processes running inside pods. + * Subject Name: Custom name to identify a subject. +A single RoleBinding can have multiple subjects. + + +* “Confirm” the information to complete the creation of the ClusterRoleBinding. + +## Configure role bindings: Namespace Scope + +Users can now allocate CPU and Memory [quotas](#workspace-quota) for each **namespace** at the cluster level. + +* Login to Palette as Project admin and select the Workspace to which the Role Binding need to be configured. + + +* Select Cluster Settings -> Namespace. + + +* Create a namespace with a custom name and add it to the list of the namespace by clicking on “add to the list”. + + +* [Allocate resources](workload-features.md#workspace-quota) to the created namespace (CPU and Memory). + + +* Click on “Add new binding” to open the “Add ClusterRoleBinding” wizard. Fill in the following details: + * Namespace: Select the namespace from the drop-down Menu. The list will display the namespaces created during the previous step. + * Role Type: Select the role type from the drop-down. Either Role or Cluster Role. + +:::info +A RoleBinding may reference any Role in the same namespace. Alternatively, a RoleBinding can reference a ClusterRole and bind that ClusterRole to the namespace of the RoleBinding. For example, if you want to bind a ClusterRole to all the namespaces in your cluster, you use a ClusterRoleBinding. +::: + +* Role Name: Define a custom role name to identify the cluster role + + +* Subjects: Subjects are a group of users, services, or teams using the Kubernetes API. It defines the operations a user, service, or group can perform. There are three types of subjects: + * Subject Type: + * Users: These are global, and meant for humans or processes living outside the cluster. + * Groups: Set of users. + * Service Accounts: Kubernetes uses service accounts to authenticate and authorize requests by pods to the Kubernetes API server. These are name spaced and meant for intra-cluster processes running inside pods. + * Subject Name: Custom name to identify a subject. +A single RoleBinding can have multiple subjects. + + +* “Confirm” the information to complete the creation of the RoleBinding. + + + + + + +## Restricted Container Images + +Palette users can restrict a few container images from getting deployed into a specific Namespace. This helps the tenants from accidentally installing a delisted or unwanted container to that specific namespace. + +
+ +## Restrict container images to a workspace + + To restrict a container image for a particular namespace within the workspace: + +1. During [Step: 4 Settings](adding-a-new-workspace.md#4-settings) of workspace creation, select the **Container Images** tab from the left ribbon. + + +2. Click on **+ Add New Container Image** and provide the **Namespace** and **Restricted Images**. Multiple images can be restricted within a namespace by separating them with commas. +
+ +## Restrict container images to a deployed workspace + +The user can add a list of restricted images to an already deployed workspace as: + +1. **Workspace Settings** > **Container Images** + + +2. Click on **Add New Container Image** and provide the **Namespace** and **Restricted Images**. Multiple images can be restricted within a Namespace by separating them with commas. + +
+
+ + + + + diff --git a/docs/docs-content/workspace/workspace.md b/docs/docs-content/workspace/workspace.md new file mode 100644 index 0000000000..7b2b7518a4 --- /dev/null +++ b/docs/docs-content/workspace/workspace.md @@ -0,0 +1,45 @@ +--- +sidebar_label: "Workspaces" +title: "Creating Workspaces for Spectro Cloud Clusters" +description: "The methods of creating Workspaces" +hide_table_of_contents: false +sidebar_custom_props: + icon: "workspaces" +tags: ["workspace"] +--- + + +Palette extends its multi-cluster management and governance capabilities by introducing **Workspaces**. Workspaces enable the logical grouping of clusters and namespaces to provide application or team-specific governance and visibility into workloads, cost, and usage metrics. For example, the application or team workload may be deployed into namespaces across clusters to achieve High Availability (HA), Disaster Recovery (DR), organization-specific placement policies, etc. Grouping such namespaces and clusters into a workspace provide central management and governance in a multi-cluster distributed environment. The following sections describe various aspects of multi-cluster management via workspaces. + +## Namespace Management + +Workspaces automate the creation and deletion of namespaces common to all clusters within the workspace. A workspace can hold a set of namespaces. Spectro Cloud Palette will periodically reconcile the workspace definition and add/remove namespaces if required from all clusters part of the workspace. + +## Quota Control + +Usage quota in terms of CPU and memory usage limits is specified within the namespaces. Spectro Cloud Palette sets the specified limits across all the clusters in the namespaces. + +## Role Based Access Control(RBAC) + +Role bindings and cluster role bindings are specified within workspaces. Furthermore, these role bindings and cluster role bindings are created in every cluster within the workspaces, thus enabling centralized RBAC. + +## Utilization + +Spectro Cloud Palette reports detailed resource utilization of workloads deployed in all the namespaces in the workspace across clusters. In addition, the CPU and memory usage trends within the workspace provide valuable insights into the consumption patterns of an application distributed across clusters. + +## Cost Attribution + +Spectro Cloud Palette computes utilization costs for workloads deployed in all the namespaces that are part of the workspace across all the clusters based on the detailed resource utilization data. This can be used for internal charge-back or show-back purposes to determine the cost incurred by an application or team. + +## Workload Visibility + +Workspaces provide a workload browser to view all the workloads such as pods, deployment, jobs, stateful sets, etc., deployed in all the namespaces that are part of the workspace across all the clusters. The workload browser aggregates resources across clusters from relevant namespaces and presents them with centralized visibility. + +## Backup and Restore + +A workspace-based backup is similar to a cluster backup, with the additional coverage of multiple clusters, should the workspace include more than one. The prerequisites and detailed instructions to backup and restore clusters are specified on the [Backup and Restore](../clusters/cluster-management/backup-restore/backup-restore.md) page. + + +## Regex for Namespaces + +Palette leverages [Regex Pattern matching](workload-features.md#regex-for-namespaces) to select multiple namespaces to apply Role binding concurrently. When we have many namespaces to be configured for role binding, the user can provide a Regex pattern matching multiple namespaces instead of giving a single namespace. This will help select all the namespaces matching the given Regex pattern to be selected together for role binding. > diff --git a/docsearch.config.json b/docsearch.config.json new file mode 100644 index 0000000000..bf928db07a --- /dev/null +++ b/docsearch.config.json @@ -0,0 +1,28 @@ +{ + "index_name": "prod-docusaurus-librarium", + "start_urls": ["https://docs.spectrocloud.com/"], + "sitemap_urls": ["https://docs.spectrocloud.com/sitemap.xml"], + "sitemap_alternate_links": true, + "stop_urls": ["/api"], + "selectors": { + "lvl0": { + "selector": "(//ul[contains(@class,'menu__list')]//a[contains(@class, 'menu__link menu__link--sublist menu__link--active')]/text() | //nav[contains(@class, 'navbar')]//a[contains(@class, 'navbar__link--active')]/text())[last()]", + "type": "xpath", + "global": true, + "default_value": "Documentation" + }, + "lvl1": "article h1", + "lvl2": "article h2", + "lvl3": "article h3", + "lvl4": "article h4", + "lvl5": "article h5", + "lvl6": "article h6", + "text": "article p, article li" + }, + "strip_chars": " .,;:#", + "custom_settings": { + "separatorsToIndex": "_", + "attributesForFaceting": ["language", "version", "type", "docusaurus_tag"], + "attributesToRetrieve": ["hierarchy", "content", "anchor", "url", "url_without_anchor", "type"] + } +} diff --git a/docusaurus.config.js b/docusaurus.config.js new file mode 100644 index 0000000000..e2f0721710 --- /dev/null +++ b/docusaurus.config.js @@ -0,0 +1,381 @@ +// @ts-check +// Note: type annotations allow type checking and IDEs autocompletion +require("dotenv").config(); +const lightCodeTheme = require("prism-react-renderer/themes/oceanicNext"); +const darkCodeTheme = require("prism-react-renderer/themes/dracula"); +const redirects = require("./redirects"); +const { + pluginPacksAndIntegrationsData +} = require("./plugins/packs-integrations"); +const { + pluginImportFontAwesomeIcons +} = require("./plugins/font-awesome"); + +/** @type {import('@docusaurus/types').Config} */ +const config = { + title: "Spectro Cloud", + tagline: "Spectro Cloud", + favicon: "img/favicon.png", + url: "https://docs.spectrocloud.com", + baseUrl: "/", + organizationName: "Spectro Cloud", + // Usually your GitHub org/user name. + projectName: "Spectro Cloud docs", + // Usually your repo name. + + onBrokenLinks: "throw", + onBrokenMarkdownLinks: "throw", + // Even if you don't use internalization, you can use this field to set useful + // metadata like html lang. For example, if your site is Chinese, you may want + // to replace "en" with "zh-Hans". + i18n: { + defaultLocale: "en", + locales: ["en"], + }, + staticDirectories: ["static", "static/assets/docs/images", "static/assets"], + headTags: [ + { + tagName: "script", + attributes: { + type: "text/plain", + "data-usercentrics": "FullStory", + src: "/scripts/fullstory.js", + }, + }, + { + tagName: "link", + attributes: { + rel: "preconnect", + href: "https://api.usercentrics.eu", + }, + }, + { + tagName: "link", + attributes: { + rel: "preload", + href: "https://app.usercentrics.eu/browser-ui/latest/loader.js", + as: "script", + }, + }, + { + tagName: "script", + attributes: { + src: "https://app.usercentrics.eu/browser-ui/latest/loader.js", + "data-settings-id": "0IhiFXOBwy0Z2U", + id: "usercentrics-cmp", + async: "true", + }, + }, + { + tagName: "link", + attributes: { + rel: "preconnect", + href: "https://www.googletagmanager.com", + }, + }, + { + tagName: "script", + attributes: { + type: "text/javascript", + "data-usercentrics": "Google Tag Manager", + src: "/scripts/googleTagManager.js", + }, + }, + ], + stylesheets: [], + presets: [ + [ + "classic", + /** @type {import('@docusaurus/preset-classic').Options} */ + { + docs: { + path: "docs/docs-content", + showLastUpdateAuthor: false, + showLastUpdateTime: true, + routeBasePath: "/", + lastVersion: "current", + includeCurrentVersion: true, + versions: { + current: { + label: "latest", + }, + }, + // exclude: ["api/v1/palette-apis-3-4"], + + sidebarPath: require.resolve("./sidebars.js"), + async sidebarItemsGenerator({ defaultSidebarItemsGenerator, ...args }) { + const { docs } = args; + const filteredDocs = docs.filter((doc) => { + return true; + }); + const sidebarItems = await defaultSidebarItemsGenerator({ + ...args, + docs: filteredDocs, + }); + // This is an override to the default sidebar items generator. + // This injects the "Privacy Settings" link at the bottom of the sidebar. + sidebarItems.push( + { + type: 'html', + value: 'Privacy Settings', + }, + ); + return sidebarItems; + }, + editUrl: "https://github.com/spectrocloud/librarium/blob/master", + }, + sitemap: { + changefreq: "weekly", + priority: 0.5, + ignorePatterns: ["/tags/**"], + filename: "sitemap.xml", + }, + theme: { + customCss: require.resolve("./src/css/custom.scss"), + }, + }, + ], + ], + plugins: [ + "docusaurus-plugin-sass", + [ + "@docusaurus/plugin-content-docs", + { + id: "api", + path: "docs/api-content/api-docs", + routeBasePath: "api", + docItemComponent: "@theme/ApiItem", + lastVersion: "current", + includeCurrentVersion: true, + versions: { + current: { + label: "latest" + }, + }, + sidebarPath: require.resolve("./apisidebar.js"), + }, + ], + [ + "docusaurus-plugin-openapi-docs", + { + // Visit https://docusaurus-openapi.tryingpan.dev/#config to learn more about this plugin's config options. + id: "apidocs", + docsPluginId: "api", + config: { + palette: { + specPath: "docs/api-content/api-docs/v1/api.json", + outputDir: "docs/api-content/api-docs/v1", + downloadUrl: + "https://github.com/spectrocloud/librarium/blob/master/docs/api-content/api-docs/palette-apis.json", + sidebarOptions: { + groupPathsBy: "tag", + categoryLinkSource: "tag", + }, + template: "api.mustache", + // Customize API MDX with mustache template + hideSendButton: true, + }, + }, + }, + ], + process.env.NODE_ENV !== "production" && [ + () => ({ + name: "plugin-enable-source-map", + configureWebpack() { + return { + devtool: "source-map", + }; + }, + }), + { + id: "enable-source-map", + }, + ], + pluginPacksAndIntegrationsData, + pluginImportFontAwesomeIcons, + function () { + return { + name: "plugin-watch-custom-plugin-path", + getPathsToWatch() { + return ["plugins/font-awesome.js", "plugins/packs-integrations.js"]; + }, + }; + }, + [ + "@docusaurus/plugin-ideal-image", + { + quality: 50, + max: 1035, + steps: 4, + disableInDev: false, + }, + ], + [ + require.resolve("docusaurus-plugin-image-zoom"), + { + id: "docusaurus-plugin-image-zoom", + }, + ], + [ + "@docusaurus/plugin-client-redirects", + { + redirects: [...redirects], + }, + ], + ].filter(Boolean), + scripts: [ + { + src: `https://w.appzi.io/w.js?token=${process.env.APPZI_TOKEN}`, + defer: true, + }, + ], + themes: ["docusaurus-theme-openapi-docs"], + customFields: { + // Put your custom environment here + mendableKey: process.env.MENDABLE_API_KEY, + }, + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + { + colorMode: { + respectPrefersColorScheme: true, + }, + docs: { + sidebar: { + hideable: false, + autoCollapseCategories: true, + }, + }, + tableOfContents: { + minHeadingLevel: 2, + maxHeadingLevel: 6, + }, + // Replace with your project's social card + image: "img/spectro-cloud-social-card.png", + navbar: { + title: "", + logo: { + href: "/", + target: "self", + width: 144, + height: 36, + alt: "Spectro cloud logo", + src: "img/spectrocloud-logo-light.svg", + srcDark: "img/spectrocloud-logo-dark.svg", + }, + items: [ + { + to: "/", + label: "Docs", + position: "left", + activeBaseRegex: "^(?!/api/).*$", + }, + { + to: "/api/introduction", + label: "API", + position: "left", + }, + { + href: "https://github.com/spectrocloud/librarium", + position: "right", + className: "header-github-link", + "aria-label": "GitHub repository", + }, + { + type: "docsVersionDropdown", + position: "left", + docsPluginId: "default" + }, + { + type: "docsVersionDropdown", + position: "left", + docsPluginId: "api", + }, + ], + hideOnScroll: true, + }, + languageTabs: [ + { + highlight: "bash", + language: "curl", + logoClass: "bash", + }, + { + highlight: "python", + language: "python", + logoClass: "python", + variant: "requests", + }, + { + highlight: "go", + language: "go", + logoClass: "go", + }, + { + highlight: "javascript", + language: "nodejs", + logoClass: "nodejs", + variant: "axios", + }, + { + highlight: "java", + language: "java", + logoClass: "java", + variant: "unirest", + }, + ], + algolia: { + // The application ID provided by Algolia + appId: process.env.ALGOLIA_APP_ID, + // Public API key: it is safe to commit it + apiKey: process.env.ALGOLIA_SEARCH_KEY, + indexName: "prod-docusaurus-librarium", + // Optional: see doc section below + contextualSearch: true, + // Optional: Specify domains where the navigation should occur through window.location instead on history.push. Useful when our Algolia config crawls multiple documentation sites and we want to navigate with window.location.href to them. + externalUrlRegex: "external\\.com|domain\\.com", + // Optional: Replace parts of the item URLs from Algolia. Useful when using the same search index for multiple deployments using a different baseUrl. You can use regexp or string in the `from` param. For example: localhost:3000 vs myCompany.com/docs + replaceSearchResultPathname: { + from: "/docs/", + // or as RegExp: /\/docs\// + to: "/", + }, + // Optional: Algolia search parameters + searchParameters: {}, + // Optional: path for search page that enabled by default (`false` to disable it) + searchPagePath: "search", + }, + sidebar: { + hideable: true, + }, + prism: { + defaultLanguage: "json", + theme: lightCodeTheme, + darkTheme: darkCodeTheme, + additionalLanguages: ["hcl", "bash", "json", "powershell", "go", "javascript", "rust"], + magicComments: [ + { + className: "theme-code-block-highlighted-line", + line: "highlight-next-line", + block: { + start: "highlight-start", + end: "highlight-end", + }, + }, + { + className: "code-block-error-line", + line: "This will error", + }, + ], + }, + zoom: { + selector: ".markdown-image", + background: { + light: "rgb(255, 255, 255)", + dark: "rgb(50, 50, 50)", + }, + config: {}, + }, + }, +}; +module.exports = config; \ No newline at end of file diff --git a/gatsby-browser.js b/gatsby-browser.js deleted file mode 100644 index 5fb6263fbb..0000000000 --- a/gatsby-browser.js +++ /dev/null @@ -1,6 +0,0 @@ -const React = require("react"); -const Persistent = require("./src/shared/layouts/Persistent").default; - -module.exports.wrapRootElement = ({ element, props }) => { - return {element}; -}; diff --git a/gatsby-config.js b/gatsby-config.js deleted file mode 100644 index 878244632c..0000000000 --- a/gatsby-config.js +++ /dev/null @@ -1,203 +0,0 @@ -require("dotenv").config(); -const queries = require("./src/shared/utils/algolia"); -const path = require("path"); - -const config = require("./config"); - -const plugins = [ - { - resolve: "gatsby-plugin-sitemap", - options: { - exclude: [`/glossary/`, `/glossary/*`], - }, - }, - "gatsby-plugin-styled-components", - "gatsby-plugin-react-helmet", - "gatsby-plugin-force-trailing-slashes", - { - resolve: `gatsby-plugin-react-helmet-canonical-urls`, - options: { - siteUrl: config.gatsby.siteUrl, - noQueryString: true, - }, - }, - "gatsby-plugin-antd", - { - resolve: `gatsby-source-filesystem`, - options: { - name: `shared`, - path: path.resolve(__dirname, "./src/shared/"), - }, - }, - { - resolve: `gatsby-source-filesystem`, - options: { - name: `assets`, - path: path.resolve(__dirname, "./assets/"), - }, - }, - { - resolve: "gatsby-source-filesystem", - options: { - name: "docs", - path: path.resolve(__dirname, "./content/docs"), - }, - }, - { - resolve: "gatsby-source-filesystem", - options: { - name: "api", - path: path.resolve(__dirname, "./content/api"), - }, - }, - { - resolve: `gatsby-plugin-root-import`, - options: { - assets: path.resolve(__dirname, "./assets/"), - }, - }, - `gatsby-transformer-sharp`, - "gatsby-plugin-sharp", - { - resolve: `gatsby-plugin-webfonts`, - options: { - fonts: { - google: [ - { - family: `Poppins`, - variants: [`300`, `400`, `500`, `600`, `700`, "latin"], - }, - ], - }, - }, - }, - { - resolve: "gatsby-plugin-mdx", - options: { - plugins: [ - `gatsby-remark-relative-images`, - `gatsby-remark-video`, - `gatsby-remark-images`, - `gatsby-remark-images-medium-zoom`, - "gatsby-remark-image-attributes", - "gatsby-remark-copy-linked-files", - ], - gatsbyRemarkPlugins: [ - { - resolve: `gatsby-remark-relative-images`, - options: { - staticFolderName: "./assets/docs/images/", - }, - }, - { - resolve: "gatsby-remark-video", - options: { - width: 800, - height: "auto", - preload: "auto", - muted: false, - autoplay: false, - playsinline: true, - controls: true, - loop: false, - }, - }, - - { - resolve: "gatsby-remark-images", - options: { - maxWidth: 1035, - quality: 100, - linkImagesToOriginal: false, - disableBgImageOnAlpha: true, - }, - }, - { - resolve: "gatsby-remark-image-attributes", - }, - { - resolve: "gatsby-remark-copy-linked-files", - }, - ], - extensions: [".mdx", ".md"], - }, - }, - { - resolve: "gatsby-plugin-google-tagmanager", - options: { - id: "GTM-T2F9ZMS", - includeInDevelopment: false, - }, - }, - { - resolve: "gatsby-plugin-react-svg", - options: { - rule: { - include: /icons/, - }, - }, - }, -]; - -// check and add algolia -if ( - config.header.search && - config.header.search.enabled && - config.header.search.algoliaAppId && - config.header.search.algoliaAdminKey -) { - plugins.push({ - resolve: `gatsby-plugin-algolia`, - options: { - appId: config.header.search.algoliaAppId, // algolia application id - apiKey: config.header.search.algoliaAdminKey, // algolia admin key to index - queries, - chunkSize: 10000, // default: 1000 - enablePartialUpdates: true, - matchFields: ["slug", "modifiedTime"], - }, - }); -} -// check and add pwa functionality -if (config.pwa && config.pwa.enabled && config.pwa.manifest) { - plugins.push({ - resolve: `gatsby-plugin-manifest`, - options: { ...config.pwa.manifest }, - }); - plugins.push({ - resolve: "gatsby-plugin-offline", - options: { - appendScript: require.resolve(`./src/custom-sw-code.js`), - }, - }); -} else { - plugins.push("gatsby-plugin-remove-serviceworker"); -} - -// check and remove trailing slash -if (config.gatsby && !config.gatsby.trailingSlash) { - plugins.push("gatsby-plugin-remove-trailing-slashes"); -} - -module.exports = { - pathPrefix: config.gatsby.pathPrefix, - siteMetadata: { - title: config.siteMetadata.title, - description: config.siteMetadata.description, - docsLocation: config.siteMetadata.docsLocation, - apiLocation: config.siteMetadata.apiLocation, - ogImage: config.siteMetadata.ogImage, - favicon: config.siteMetadata.favicon, - logo: { - link: config.header.logoLink ? config.header.logoLink : "/", - image: config.header.logo, - }, // backwards compatible - headerTitle: config.header.title, - githubUrl: config.header.githubUrl, - helpUrl: config.header.helpUrl, - tweetText: config.header.tweetText, - headerLinks: config.header.links, - siteUrl: config.gatsby.siteUrl, - }, - plugins: plugins, -}; diff --git a/gatsby-node.js b/gatsby-node.js deleted file mode 100644 index be1cf6b69d..0000000000 --- a/gatsby-node.js +++ /dev/null @@ -1,227 +0,0 @@ -const componentWithMDXScope = require("gatsby-plugin-mdx/component-with-mdx-scope"); -const redirects = require("./src/shared/utils/redirects"); -const path = require("path"); -const startCase = require("lodash.startcase"); - -exports.createPages = async ({ graphql, actions }) => { - const { createPage, createRedirect } = actions; - - redirects.forEach((redirect) => { - createRedirect(redirect); - }); - - const result = await graphql(` - { - allMdx { - edges { - node { - fields { - id - isDocsPage - slug - } - } - } - } - } - `); - - result.data.allMdx.edges.forEach(({ node }) => { - if (node.fields.slug === "/glossary") { - return; - } - - let component = path.resolve("./src/templates/docs.js"); - // if (node.fields.slug.startsWith('/glossary')) { - // component = path.resolve('../glossary/src/templates/docs.js'); - // } - - if (node.fields.slug.startsWith("/api")) { - component = path.resolve("./src/templates/api.js"); - } - - const slug = node.fields.slug ? node.fields.slug : "/"; - - // Disable glossary pages - if (node.fields.slug.startsWith("/glossary/")) { - return; - } - - createPage({ - path: slug, - component, - context: { - id: node.fields.id, - }, - }); - }); -}; - -exports.onCreateWebpackConfig = ({ actions, getConfig }) => { - actions.setWebpackConfig({ - resolve: { - modules: [path.resolve(__dirname, "src"), "node_modules"], - alias: { - $components: path.resolve(__dirname, "src/components"), - buble: "@philpl/buble", // to reduce bundle size - }, - }, - }); -}; - -exports.onCreateBabelConfig = ({ actions }) => { - actions.setBabelPlugin({ - name: "@babel/plugin-proposal-export-default-from", - }); -}; - -exports.onCreateNode = ({ node, getNode, actions }) => { - const { createNodeField } = actions; - - if (node.internal.type === `Mdx`) { - const isDocsPage = !!node.fileAbsolutePath.includes("/content/docs/"); - const isApiPage = !!node.fileAbsolutePath.includes("/content/api/"); - const parent = getNode(node.parent); - - let value = parent.relativePath.replace(parent.ext, ""); - - const slugs = value.split("/").map((slugPart, index, slugs) => { - const [_, ...rest] = slugPart.split("-"); - if (index === slugs.length - 1) { - createNodeField({ - name: `index`, - node, - value: _, - }); - } - - if (rest.length === 0) { - return _; - } - - return rest.join("-"); - }); - - value = slugs.join("/"); - if (value === "index") { - value = ""; - } - - let prefix = "/glossary"; - if (isDocsPage) { - prefix = ""; - } - - if (isApiPage) { - prefix = "/api"; - } - - createNodeField({ - name: `slug`, - node, - value: `${prefix}/${value}`, - }); - - createNodeField({ - name: "id", - node, - value: node.id, - }); - - createNodeField({ - name: "title", - node, - value: node.frontmatter.title || startCase(parent.name), - }); - - createNodeField({ - name: "icon", - node, - value: node.frontmatter.icon, - }); - - createNodeField({ - name: "hiddenFromNav", - node, - value: node.frontmatter.hiddenFromNav, - }); - - createNodeField({ - name: "hideToC", - node, - value: node.frontmatter.hideToC, - }); - - createNodeField({ - name: "hideToCSidebar", - node, - value: node.frontmatter.hideToCSidebar, - }); - - createNodeField({ - name: "fullWidth", - node, - value: node.frontmatter.fullWidth, - }); - - createNodeField({ - name: "isDocsPage", - node, - value: isDocsPage, - }); - - createNodeField({ - name: "isApiPage", - node, - value: isApiPage, - }); - - createNodeField({ - name: "isIntegration", - node, - value: node.frontmatter.isIntegration, - }); - - createNodeField({ - name: "category", - node, - value: node.frontmatter.category, - }); - - createNodeField({ - name: "logoUrl", - node, - value: node.frontmatter.logoUrl, - }); - - createNodeField({ - name: "api", - node, - value: node.frontmatter.api, - }); - - createNodeField({ - name: "hideMenuSidebar", - node, - value: node.frontmatter.hideMenuSidebar, - }); - - if (node.frontmatter.api) { - const fileAbsolutePaths = node.fileAbsolutePath.split("/content/api/"); - const versionDirectory = fileAbsolutePaths[1].split("/").shift(); - const endpointsPath = [ - fileAbsolutePaths[0], - "api", - "content", - versionDirectory, - "api.json", - ].join("/"); - - createNodeField({ - name: "version", - node, - value: versionDirectory, - }); - } - } -}; diff --git a/gatsby-ssr.js b/gatsby-ssr.js deleted file mode 100644 index 93c41e70c6..0000000000 --- a/gatsby-ssr.js +++ /dev/null @@ -1,48 +0,0 @@ -import React from "react"; -import Persistent from "./src/shared/layouts/Persistent"; - -const fsScript = ` -window['_fs_debug'] = false; -window['_fs_host'] = 'fullstory.com'; -window['_fs_script'] = 'edge.fullstory.com/s/fs.js'; -window['_fs_org'] = '${process.env.GATSBY_FULLSTORY_ORGID}'; -window['_fs_namespace'] = 'FS'; -(function(m,n,e,t,l,o,g,y){ - if (e in m) {if(m.console && m.console.log) { m.console.log('FullStory namespace conflict. Please set window["_fs_namespace"].');} return;} - g=m[e]=function(a,b,s){g.q?g.q.push([a,b,s]):g._api(a,b,s);};g.q=[]; - o=n.createElement(t);o.async=1;o.crossOrigin='anonymous';o.src='https://'+_fs_script; - y=n.getElementsByTagName(t)[0];y.parentNode.insertBefore(o,y); - g.identify=function(i,v,s){g(l,{uid:i},s);if(v)g(l,v,s)};g.setUserVars=function(v,s){g(l,v,s)};g.event=function(i,v,s){g('event',{n:i,p:v},s)}; - g.anonymize=function(){g.identify(!!0)}; - g.shutdown=function(){g("rec",!1)};g.restart=function(){g("rec",!0)}; - g.log = function(a,b){g("log",[a,b])}; - g.consent=function(a){g("consent",!arguments.length||a)}; - g.identifyAccount=function(i,v){o='account';v=v||{};v.acctId=i;g(o,v)}; - g.clearUserCookie=function(){}; - g.setVars=function(n, p){g('setVars',[n,p]);}; - g._w={};y='XMLHttpRequest';g._w[y]=m[y];y='fetch';g._w[y]=m[y]; - if(m[y])m[y]=function(){return g._w[y].apply(this,arguments)}; - g._v="1.3.0"; -})(window,document,window['_fs_namespace'],'script','user'); -`; - -const HeadComponents = [ - process.env.GATSBY_APPZI_TOKEN && ( - - - ); -}; diff --git a/src/shared/utils/redirects.js b/src/shared/utils/redirects.js deleted file mode 100644 index 3904fc4a55..0000000000 --- a/src/shared/utils/redirects.js +++ /dev/null @@ -1,291 +0,0 @@ -const redirects = [ - { - fromPath: `/api/`, - toPath: `/api/introduction`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/api`, - toPath: `/api/introduction`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/nested-clusters`, - toPath: `/clusters/palette-virtual-clusters`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/sandbox-clusters`, - toPath: `/clusters/palette-virtual-clusters`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/sandbox-clusters/cluster-quickstart`, - toPath: `/clusters/palette-virtual-clusters/virtual-cluster-quickstart`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/devx/sandbox-clusters`, - toPath: `/devx/palette-virtual-clusters`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/terraform/resources`, - toPath: `https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/terraform/datasources`, - toPath: `https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/edge/virtualized`, - toPath: `/clusters/edge`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/troubleshooting/palette-namespaces-podes`, - toPath: `/architecture/palette-namespaces-podes`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/troubleshooting/Network-Communications-and-Ports`, - toPath: `/architecture/networking-ports`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/troubleshooting/SAAS-Network-Communications-and-Ports`, - toPath: `/architecture/networking-ports`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/troubleshooting/orchestration-spectrocloud`, - toPath: `/architecture/orchestration-spectrocloud`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/devx/registries/helm-registry`, - toPath: `/registries-and-packs/helm-charts`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/devx/registries/oci-registry`, - toPath: `/registries-and-packs/oci-registry`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/palette-virtual-clusters/virtual-cluster-quickstart`, - toPath: `/clusters/palette-virtual-clusters/add-virtual-cluster-to-host-cluster/`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/introduction/architecture-overview`, - toPath: `/architecture/architecture-overview`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/introduction/what-is`, - toPath: `/introduction`, - redirectInBrowser: true, - isPermanent: true, - }, - { - - fromPath: `/getting-started/free-cloud-credit`, - toPath: `/getting-started/palette-freemium`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/public-cloud/eks`, - toPath: `/clusters/public-cloud/aws/eks`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/public-cloud/aks`, - toPath: `/clusters/public-cloud/azure/eks`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/integrations/minio-operator`, - toPath: `/integrations/minio`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/knowledgebase/how-to/reverse-proxy-dashboard`, - toPath: `/clusters/cluster-management/kubernetes-dashboard`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/devx/cluster-groups`, - toPath: `/clusters/cluster-groups`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/devx/cluster-groups/ingress-cluster-group`, - toPath: `/clusters/cluster-groups/ingress-cluster-group`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/devx/dev-land-explore`, - toPath: `/devx`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/cluster-management/.ssh`, - toPath: `/clusters/cluster-management/ssh-keys`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/edge/install/installer-image`, - toPath: `/clusters/edge/site-deployment/site-installation`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/edge/native`, - toPath: `/clusters/edge/site-deployment`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/edge/installer-image`, - toPath: `/clusters/edge/site-deployment/site-installation`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/edge/site-deployment/installer`, - toPath: `/clusters/edge/site-deployment/site-installation`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/knowledgebase/tutorials/terraform-tutorial`, - toPath: `/terraform`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/knowledgebase/tutorials/cks-tutorial`, - toPath: `/introduction`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/knowledgebase/tutorials/dev-engine`, - toPath: `/knowledgebase/tutorials`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/knowledgebase/tutorials/dev-engine/deploy-app`, - toPath: `/knowledgebase/tutorials`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/kubernetes-knowlege-hub/how-to/deploy-stateless-frontend-app`, - toPath: `/kubernetes-knowlege-hub/tutorials/deploy-stateless-frontend-app`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/devx/resource-quota`, - toPath: `/devx/manage-dev-engine/resource-quota`, - redirectInBrowser: true, - isPermanent: true, - },{ - fromPath: `/devx/registries`, - toPath: `/devx/manage-dev-engine/registries`, - redirectInBrowser: true, - isPermanent: true, - },{ - fromPath: `/devx/virtual-clusters`, - toPath: `/devx/palette-virtual-clusters`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/edge/edgeforge-workflow/build-kairos-os`, - toPath: `/clusters/edge/edgeforge-workflow/palette-canvos`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/edge/edgeforge-workflow/build-images`, - toPath: `/clusters/edge/edgeforge-workflow/palette-canvos`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/integrations/ubuntu-k3s`, - toPath: `/integrations/ubuntu`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/brownfield-clusters`, - toPath: `/clusters/imported-clusters`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/integrations/oidc-eks`, - toPath: `/integrations/kubernetes#configurecustomoidc`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/clusters/palette-virtual-clusters/add-virtual-cluster-to-host-cluster`, - toPath: `/clusters/palette-virtual-clusters/deploy-virtual-cluster`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/vm-management/vm-packs-profiles/enable-vm-dashboard`, - toPath: `/vm-management/vm-packs-profiles/add-roles-and-role-bindings`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/vm-management/vm-packs-profiles/vm-dashboard`, - toPath: `/vm-management/create-manage-vm/standard-vm-operations`, - redirectInBrowser: true, - isPermanent: true, - }, - { - fromPath: `/vm-management/vm-packs-profiles/create-vm-dashboard-profile`, - toPath: `/vm-management/vm-packs-profiles/create-vmo-profile`, - redirectInBrowser: true, - isPermanent: true, - } -]; - -module.exports = redirects; diff --git a/src/templates/api.js b/src/templates/api.js deleted file mode 100644 index a517f987d8..0000000000 --- a/src/templates/api.js +++ /dev/null @@ -1,286 +0,0 @@ -import React, { useMemo } from "react"; -import { graphql } from "gatsby"; -import Layout from "shared/layouts/Default"; -import DocsLayout from "shared/layouts/Docs"; -import ApiSidebar from "shared/components/common/ApiSidebar"; -import Swagger from "shared/components/common/Swagger"; - -// TODO use graphql to get api.jsons -import v1 from "../../content/api/v1/api.json"; -import { GenericSeoSchema } from "shared/utils/markupSchemas"; - -const APIS = { - v1, -}; - -function fillInApiObj(index, arr, operations, obj) { - const elem = arr[index]; - if (index === arr.length - 1) { - obj[elem] = obj[elem] || {}; - obj[elem]["operations"] = operations; - return; - } - if (!obj[elem]) { - obj[elem] = { - status: false, - }; - } - fillInApiObj(index + 1, arr, operations, obj[elem]); -} - -function formatApi(endPoints) { - const apiObj = {}; - for (let index = 0; index < endPoints.length; index++) { - let endPoint = endPoints[index].path; - let operations = endPoints[index].operations; - let arr = endPoint.split("/"); - arr.shift(); - fillInApiObj(0, arr, operations, apiObj); - } - return apiObj; -} - -export default function MDXLayout({ data = {} }) { - const { - allMdx, - mdx, - site: { - siteMetadata: { apiLocation }, - }, - } = data; - - const menu = useMemo(() => { - return DocsLayout.calculateMenuTree(allMdx.edges, { base: "/api", trailingSlash: true }); - }, [allMdx.edges]); - - const api = APIS[mdx?.fields?.version || "v1"]; - - const apiEndpoints = useMemo( - () => - Object.keys(api.paths).map((path) => { - return { - path, - operations: Object.keys(api.paths[path]) - .filter((method) => method !== "parameters") - .map((method) => { - const apiMethod = api.paths[path][method]; - const parameters = apiMethod?.parameters; - return { - method, - ...apiMethod, - parameters: parameters?.filter((parameter) => parameter.name !== "body") || [], - pathParameters: api.paths[path]?.parameters || [], - }; - }), - }; - }), - [api.paths] - ); - - const apiObj = formatApi(apiEndpoints); - - if (Array.isArray(mdx.frontmatter?.paths)) { - let urlPathName = mdx.frontmatter?.paths[0]; - if (urlPathName) { - const pathName = urlPathName.split("/"); - const lastUrlPathname = pathName[pathName.length - 1]; - if (apiObj.v1[lastUrlPathname]) apiObj.v1[lastUrlPathname].status = true; - } - } - - function renderAPIDoc() { - // TODO refactor this function - const paths = mdx.frontmatter?.paths; - if (!paths || !mdx?.fields?.version) { - return null; - } - - const api = APIS[mdx?.fields?.version]; - - function renderProperties(defObject) { - // if there are no properties, render the format or type (this seems to apply only for timestamps) - if (!defObject?.properties) { - return defObject?.format || defObject.type; - } - - return Object.keys(defObject?.properties).reduce((propertiesAcc, property) => { - const definitionProperty = defObject.properties[property]; - const definitionPropertyRef = definitionProperty?.$ref || definitionProperty?.items?.$ref; - - const propertyName = definitionProperty?.description?.includes("Deprecated") - ? `${property} deprecated` - : property; - // if the property contains a ref, call again extractDefinition - if (definitionPropertyRef) { - return { - ...propertiesAcc, - [propertyName]: - definitionProperty.type === "array" - ? [extractDefinition(definitionPropertyRef)] - : extractDefinition(definitionPropertyRef), - }; - } else { - // if property value is an array, render what type the elements are - if (definitionProperty.type === "array") { - return { - ...propertiesAcc, - [propertyName]: [definitionProperty?.items.type || definitionProperty.type], - }; - } else { - // if the property value is an object that contains the properties key - // call again renderProperties function in case it has refs inside - // otherwise render the property type - return { - ...propertiesAcc, - [propertyName]: definitionProperty?.properties - ? renderProperties(definitionProperty) - : definitionProperty.type, - }; - } - } - }, {}); - } - - function extractDefinition(ref) { - const definitionArray = ref?.split("/") || []; - const def = definitionArray[definitionArray.length - 1]; - const defObject = api.definitions[def]; - - // the response has no schema - if (!defObject) { - return null; - } - - // the response schema is type array - encounter only 2 times and seems to always have the items prop - if (defObject?.type === "array") { - return { - items: extractDefinition(defObject.items.$ref), - }; - } - - return renderProperties(defObject); - } - - const endpoints = Object.keys(api.paths) - .filter((path) => paths.some((entry) => path.startsWith(entry))) - .map((path) => { - return { - path, - operations: Object.keys(api.paths[path]) - .filter((method) => method !== "parameters") - .map((method) => { - const apiMethod = api.paths[path][method]; - const parameters = apiMethod?.parameters; - const responses = apiMethod?.responses; - const bodyParameter = parameters?.find((parameter) => parameter.name === "body"); - let body; - - if (bodyParameter) { - body = bodyParameter.schema?.$ref - ? extractDefinition(bodyParameter.schema?.$ref) - : renderProperties(bodyParameter.schema); - } - - return { - method, - ...apiMethod, - body: JSON.stringify(body, null, 2), - parameters: parameters?.filter((parameter) => parameter.name !== "body") || [], - pathParameters: api.paths[path]?.parameters || [], - responseMessages: Object.keys(responses || {}).map((response) => { - return { - code: response, - ...responses[response], - schema: JSON.stringify( - extractDefinition(responses[response]?.schema?.$ref), - null, - 2 - ), - }; - }), - }; - }), - }; - }); - - return ; - } - - return ( - - } - extraMenu={} - > - - - - ); -} - -export const pageQuery = graphql` - query ($id: String!) { - site { - siteMetadata { - title - apiLocation - } - } - mdx(fields: { id: { eq: $id } }) { - fields { - id - title - slug - version - } - body - tableOfContents - parent { - ... on File { - relativePath - } - } - frontmatter { - metaTitle - metaDescription - fullWidth - hideToC - paths - hideToCSidebar - hideMenuSidebar - api - } - } - allMdx(filter: { fields: { isApiPage: { eq: true } } }) { - edges { - node { - tableOfContents - fields { - slug - title - icon - index - hiddenFromNav - api - version - } - } - } - } - } -`; diff --git a/src/templates/docs.js b/src/templates/docs.js deleted file mode 100644 index 8114c401bf..0000000000 --- a/src/templates/docs.js +++ /dev/null @@ -1,88 +0,0 @@ -import React, { useMemo } from "react"; -import { graphql } from "gatsby"; - -import Layout from "shared/layouts/Default"; -import DocsLayout from "shared/layouts/Docs"; -import { GenericSeoSchema } from "shared/utils/markupSchemas"; - -export default function MDXLayout({ data = {}, children, ...rest }) { - const { - allMdx, - mdx, - site: { - siteMetadata: { docsLocation }, - }, - } = data; - - const menu = useMemo(() => { - return DocsLayout.calculateMenuTree( - allMdx.edges.filter((edge) => !!edge.node.fields.isDocsPage), - { trailingSlash: true } - ); - }, [allMdx.edges]); - - return ( - - - - - ); -} - -export const pageQuery = graphql` - query ($id: String!) { - site { - siteMetadata { - title - docsLocation - } - } - mdx(fields: { id: { eq: $id } }) { - fields { - id - title - slug - } - body - tableOfContents - parent { - ... on File { - relativePath - } - } - frontmatter { - metaTitle - metaDescription - fullWidth - hideToC - hideToCSidebar - hideMenuSidebar - } - } - allMdx { - edges { - node { - tableOfContents - fields { - slug - title - icon - index - hiddenFromNav - isDocsPage - } - } - } - } - } -`; diff --git a/src/theme/CodeBlock/Container/index.js b/src/theme/CodeBlock/Container/index.js new file mode 100644 index 0000000000..ce665eb41b --- /dev/null +++ b/src/theme/CodeBlock/Container/index.js @@ -0,0 +1,21 @@ +import React from 'react'; +import clsx from 'clsx'; +import {ThemeClassNames, usePrismTheme} from '@docusaurus/theme-common'; +import {getPrismCssVariables} from '@docusaurus/theme-common/internal'; +import styles from './styles.module.css'; +export default function CodeBlockContainer({as: As, ...props}) { + const prismTheme = usePrismTheme(); + const prismCssVariables = getPrismCssVariables(prismTheme); + return ( + + ); +} diff --git a/src/theme/CodeBlock/Container/styles.module.css b/src/theme/CodeBlock/Container/styles.module.css new file mode 100644 index 0000000000..3c799683f2 --- /dev/null +++ b/src/theme/CodeBlock/Container/styles.module.css @@ -0,0 +1,7 @@ +.codeBlockContainer { + background: var(--prism-background-color); + color: var(--prism-color); + margin-bottom: var(--ifm-leading); + box-shadow: var(--ifm-global-shadow-lw); + border-radius: var(--ifm-code-border-radius); +} diff --git a/src/theme/CodeBlock/Content/Element.js b/src/theme/CodeBlock/Content/Element.js new file mode 100644 index 0000000000..dbe5604970 --- /dev/null +++ b/src/theme/CodeBlock/Content/Element.js @@ -0,0 +1,17 @@ +import React from 'react'; +import clsx from 'clsx'; +import Container from '@theme/CodeBlock/Container'; +import styles from './styles.module.css'; +//
 tags in markdown map to CodeBlocks. They may contain JSX children. When
+// the children is not a simple string, we just return a styled block without
+// actually highlighting.
+export default function CodeBlockJSX({children, className}) {
+  return (
+    
+      {children}
+    
+  );
+}
diff --git a/src/theme/CodeBlock/Content/String.js b/src/theme/CodeBlock/Content/String.js
new file mode 100644
index 0000000000..3f3604927c
--- /dev/null
+++ b/src/theme/CodeBlock/Content/String.js
@@ -0,0 +1,93 @@
+import React from "react";
+import clsx from "clsx";
+import { useThemeConfig, usePrismTheme } from "@docusaurus/theme-common";
+import {
+  parseCodeBlockTitle,
+  parseLanguage,
+  parseLines,
+  containsLineNumbers,
+  useCodeWordWrap,
+} from "@docusaurus/theme-common/internal";
+import Highlight, { defaultProps } from "prism-react-renderer";
+import Line from "@theme/CodeBlock/Line";
+import CopyButton from "@theme/CodeBlock/CopyButton";
+import WordWrapButton from "@theme/CodeBlock/WordWrapButton";
+import Container from "@theme/CodeBlock/Container";
+import styles from "./styles.module.css";
+export default function CodeBlockString({
+  children,
+  className: blockClassName = "",
+  metastring,
+  title: titleProp,
+  showLineNumbers: showLineNumbersProp,
+  language: languageProp,
+  hideClipboard = false,
+}) {
+  const {
+    prism: { defaultLanguage, magicComments },
+  } = useThemeConfig();
+  const language = languageProp ?? parseLanguage(blockClassName) ?? defaultLanguage;
+  const prismTheme = usePrismTheme();
+  const wordWrap = useCodeWordWrap();
+  // We still parse the metastring in case we want to support more syntax in the
+  // future. Note that MDX doesn't strip quotes when parsing metastring:
+  // "title=\"xyz\"" => title: "\"xyz\""
+  const title = parseCodeBlockTitle(metastring) || titleProp;
+  const { lineClassNames, code } = parseLines(children, {
+    metastring,
+    language,
+    magicComments,
+  });
+  const showLineNumbers = showLineNumbersProp ?? containsLineNumbers(metastring);
+  return (
+    
+      {title && 
{title}
} +
+ + {({ className, tokens, getLineProps, getTokenProps }) => ( +
+              
+                {tokens.map((line, i) => (
+                  
+                ))}
+              
+            
+ )} +
+
+ {(wordWrap.isEnabled || wordWrap.isCodeScrollable) && ( + wordWrap.toggle()} + isEnabled={wordWrap.isEnabled} + /> + )} + {!hideClipboard && } +
+
+
+ ); +} diff --git a/src/theme/CodeBlock/Content/styles.module.css b/src/theme/CodeBlock/Content/styles.module.css new file mode 100644 index 0000000000..c2103cd7ea --- /dev/null +++ b/src/theme/CodeBlock/Content/styles.module.css @@ -0,0 +1,80 @@ +.codeBlockContent { + position: relative; + /* rtl:ignore */ + direction: ltr; + border-radius: inherit; +} + +.codeBlockTitle { + border-bottom: 1px solid var(--ifm-color-emphasis-300); + font-size: var(--ifm-code-font-size); + font-weight: 500; + padding: 0.75rem var(--ifm-pre-padding); + border-top-left-radius: inherit; + border-top-right-radius: inherit; +} + +.codeBlock { + --ifm-pre-background: var(--prism-background-color); + margin: 0; + padding: 0; +} + +.codeBlockTitle + .codeBlockContent .codeBlock { + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +.codeBlockStandalone { + padding: 0; +} + +.codeBlockLines { + font: inherit; + /* rtl:ignore */ + float: left; + min-width: 100%; + padding: var(--ifm-pre-padding); +} + +.codeBlockLinesWithNumbering { + display: table; + padding: var(--ifm-pre-padding) 0; +} + +@media print { + .codeBlockLines { + white-space: pre-wrap; + } +} + +.buttonGroup { + display: flex; + column-gap: 0.2rem; + position: absolute; + /* rtl:ignore */ + right: calc(var(--ifm-pre-padding) / 2); + top: calc(var(--ifm-pre-padding) / 2); +} + +.buttonGroup button { + display: flex; + align-items: center; + background: var(--prism-background-color); + color: var(--prism-color); + border: 1px solid var(--ifm-color-emphasis-300); + border-radius: var(--ifm-global-radius); + padding: 0.4rem; + line-height: 0; + transition: opacity var(--ifm-transition-fast) ease-in-out; + opacity: 0; +} + +.buttonGroup button:focus-visible, +.buttonGroup button:hover { + opacity: 1 !important; +} + +:global(.theme-code-block:hover) .buttonGroup button { + opacity: 0.4; +} diff --git a/src/theme/CodeBlock/CopyButton/index.js b/src/theme/CodeBlock/CopyButton/index.js new file mode 100644 index 0000000000..57cf0dd921 --- /dev/null +++ b/src/theme/CodeBlock/CopyButton/index.js @@ -0,0 +1,54 @@ +import React, {useCallback, useState, useRef, useEffect} from 'react'; +import clsx from 'clsx'; +// @ts-expect-error: TODO, we need to make theme-classic have type: module +import copy from 'copy-text-to-clipboard'; +import {translate} from '@docusaurus/Translate'; +import IconCopy from '@theme/Icon/Copy'; +import IconSuccess from '@theme/Icon/Success'; +import styles from './styles.module.css'; +export default function CopyButton({code, className}) { + const [isCopied, setIsCopied] = useState(false); + const copyTimeout = useRef(undefined); + const handleCopyCode = useCallback(() => { + copy(code); + setIsCopied(true); + copyTimeout.current = window.setTimeout(() => { + setIsCopied(false); + }, 1000); + }, [code]); + useEffect(() => () => window.clearTimeout(copyTimeout.current), []); + return ( + + ); +} diff --git a/src/theme/CodeBlock/CopyButton/styles.module.css b/src/theme/CodeBlock/CopyButton/styles.module.css new file mode 100644 index 0000000000..d5268e900b --- /dev/null +++ b/src/theme/CodeBlock/CopyButton/styles.module.css @@ -0,0 +1,40 @@ +:global(.theme-code-block:hover) .copyButtonCopied { + opacity: 1 !important; +} + +.copyButtonIcons { + position: relative; + width: 1.125rem; + height: 1.125rem; +} + +.copyButtonIcon, +.copyButtonSuccessIcon { + position: absolute; + top: 0; + left: 0; + fill: currentColor; + opacity: inherit; + width: inherit; + height: inherit; + transition: all var(--ifm-transition-fast) ease; +} + +.copyButtonSuccessIcon { + top: 50%; + left: 50%; + transform: translate(-50%, -50%) scale(0.33); + opacity: 0; + color: #00d600; +} + +.copyButtonCopied .copyButtonIcon { + transform: scale(0.33); + opacity: 0; +} + +.copyButtonCopied .copyButtonSuccessIcon { + transform: translate(-50%, -50%) scale(1); + opacity: 1; + transition-delay: 0.075s; +} diff --git a/src/theme/CodeBlock/Line/index.js b/src/theme/CodeBlock/Line/index.js new file mode 100644 index 0000000000..f36761806a --- /dev/null +++ b/src/theme/CodeBlock/Line/index.js @@ -0,0 +1,34 @@ +import React from 'react'; +import clsx from 'clsx'; +import styles from './styles.module.css'; +export default function CodeBlockLine({ + line, + classNames, + showLineNumbers, + getLineProps, + getTokenProps, +}) { + if (line.length === 1 && line[0].content === '\n') { + line[0].content = ''; + } + const lineProps = getLineProps({ + line, + className: clsx(classNames, showLineNumbers && styles.codeLine), + }); + const lineTokens = line.map((token, key) => ( + + )); + return ( + + {showLineNumbers ? ( + <> + + {lineTokens} + + ) : ( + lineTokens + )} +
+
+ ); +} diff --git a/src/theme/CodeBlock/Line/styles.module.css b/src/theme/CodeBlock/Line/styles.module.css new file mode 100644 index 0000000000..7c28ed9aa3 --- /dev/null +++ b/src/theme/CodeBlock/Line/styles.module.css @@ -0,0 +1,45 @@ +/* Intentionally has zero specificity, so that to be able to override +the background in custom CSS file due bug https://github.com/facebook/docusaurus/issues/3678 */ +:where(:root) { + --docusaurus-highlighted-code-line-bg: rgb(72 77 91); +} + +:where([data-theme='dark']) { + --docusaurus-highlighted-code-line-bg: rgb(100 100 100); +} + +:global(.theme-code-block-highlighted-line) { + background-color: var(--docusaurus-highlighted-code-line-bg); + display: block; + margin: 0 calc(-1 * var(--ifm-pre-padding)); + padding: 0 var(--ifm-pre-padding); +} + +.codeLine { + display: table-row; + counter-increment: line-count; +} + +.codeLineNumber { + display: table-cell; + text-align: right; + width: 1%; + position: sticky; + left: 0; + padding: 0 var(--ifm-pre-padding); + background: var(--ifm-pre-background); + overflow-wrap: normal; +} + +.codeLineNumber::before { + content: counter(line-count); + opacity: 0.4; +} + +:global(.theme-code-block-highlighted-line) .codeLineNumber::before { + opacity: 0.8; +} + +.codeLineContent { + padding-right: var(--ifm-pre-padding); +} diff --git a/src/theme/CodeBlock/WordWrapButton/index.js b/src/theme/CodeBlock/WordWrapButton/index.js new file mode 100644 index 0000000000..3a4136b630 --- /dev/null +++ b/src/theme/CodeBlock/WordWrapButton/index.js @@ -0,0 +1,27 @@ +import React from 'react'; +import clsx from 'clsx'; +import {translate} from '@docusaurus/Translate'; +import IconWordWrap from '@theme/Icon/WordWrap'; +import styles from './styles.module.css'; +export default function WordWrapButton({className, onClick, isEnabled}) { + const title = translate({ + id: 'theme.CodeBlock.wordWrapToggle', + message: 'Toggle word wrap', + description: + 'The title attribute for toggle word wrapping button of code block lines', + }); + return ( + + ); +} diff --git a/src/theme/CodeBlock/WordWrapButton/styles.module.css b/src/theme/CodeBlock/WordWrapButton/styles.module.css new file mode 100644 index 0000000000..fdbc894646 --- /dev/null +++ b/src/theme/CodeBlock/WordWrapButton/styles.module.css @@ -0,0 +1,8 @@ +.wordWrapButtonIcon { + width: 1.2rem; + height: 1.2rem; +} + +.wordWrapButtonEnabled .wordWrapButtonIcon { + color: var(--ifm-color-primary); +} diff --git a/src/theme/CodeBlock/index.js b/src/theme/CodeBlock/index.js new file mode 100644 index 0000000000..7f4410efa3 --- /dev/null +++ b/src/theme/CodeBlock/index.js @@ -0,0 +1,32 @@ +import React, {isValidElement} from 'react'; +import useIsBrowser from '@docusaurus/useIsBrowser'; +import ElementContent from '@theme/CodeBlock/Content/Element'; +import StringContent from '@theme/CodeBlock/Content/String'; +/** + * Best attempt to make the children a plain string so it is copyable. If there + * are react elements, we will not be able to copy the content, and it will + * return `children` as-is; otherwise, it concatenates the string children + * together. + */ +function maybeStringifyChildren(children) { + if (React.Children.toArray(children).some((el) => isValidElement(el))) { + return children; + } + // The children is now guaranteed to be one/more plain strings + return Array.isArray(children) ? children.join('') : children; +} +export default function CodeBlock({children: rawChildren, ...props}) { + // The Prism theme on SSR is always the default theme but the site theme can + // be in a different mode. React hydration doesn't update DOM styles that come + // from SSR. Hence force a re-render after mounting to apply the current + // relevant styles. + const isBrowser = useIsBrowser(); + const children = maybeStringifyChildren(rawChildren); + const CodeBlockComp = + typeof children === 'string' ? StringContent : ElementContent; + return ( + + {children} + + ); +} diff --git a/src/theme/DocCard/index.js b/src/theme/DocCard/index.js new file mode 100644 index 0000000000..e74cd63d28 --- /dev/null +++ b/src/theme/DocCard/index.js @@ -0,0 +1,85 @@ +import React from "react"; +import clsx from "clsx"; +import Link from "@docusaurus/Link"; +import { findFirstCategoryLink, useDocById } from "@docusaurus/theme-common/internal"; +import isInternalUrl from "@docusaurus/isInternalUrl"; +import { translate } from "@docusaurus/Translate"; +import styles from "./styles.module.css"; +import { Tooltip } from "antd"; +function CardContainer({ href, children }) { + return ( + + {children} + + ); +} +function CardLayout({ href, icon, title, description }) { + return ( + + {title && title.length > 40 ? ( + +

+ {icon} {`${title.substring(0, 40)} ...`} +

+
+ ) : ( +

+ {icon} {title} +

+ )} + {description && ( +

+ {description} +

+ )} +
+ ); +} +function CardCategory({ item }) { + const href = findFirstCategoryLink(item); + // Unexpected: categories that don't have a link have been filtered upfront + if (!href) { + return null; + } + return ( + + ); +} +function CardLink({ item }) { + const icon = isInternalUrl(item.href) ? "📄️" : "🔗"; + const doc = useDocById(item.docId ?? undefined); + return ( + + ); +} +export default function DocCard({ item }) { + switch (item.type) { + case "link": + return ; + case "category": + return ; + default: + throw new Error(`unknown item type ${JSON.stringify(item)}`); + } +} diff --git a/src/theme/DocCard/styles.module.css b/src/theme/DocCard/styles.module.css new file mode 100644 index 0000000000..4f7ad27f40 --- /dev/null +++ b/src/theme/DocCard/styles.module.css @@ -0,0 +1,27 @@ +.cardContainer { + --ifm-link-color: var(--ifm-color-emphasis-800); + --ifm-link-hover-color: var(--ifm-color-emphasis-700); + --ifm-link-hover-decoration: none; + + box-shadow: 0 1.5px 3px 0 rgb(0 0 0 / 15%); + border: 1px solid var(--ifm-color-emphasis-200); + transition: all var(--ifm-transition-fast) ease; + transition-property: border, box-shadow; +} + +.cardContainer:hover { + border-color: var(--ifm-color-primary); + box-shadow: 0 3px 6px 0 rgb(0 0 0 / 20%); +} + +.cardContainer *:last-child { + margin-bottom: 0; +} + +.cardTitle { + font-size: 1.2rem; +} + +.cardDescription { + font-size: 0.8rem; +} diff --git a/src/theme/DocSidebarItem/Category/Category.module.css b/src/theme/DocSidebarItem/Category/Category.module.css new file mode 100644 index 0000000000..3d4cf4a60c --- /dev/null +++ b/src/theme/DocSidebarItem/Category/Category.module.css @@ -0,0 +1,24 @@ +.categoryItem { + width: 20px; + height: 20px; + display: flex; + align-items: center; + justify-content: center; + margin-right: 16px; + color: #aeb1be; +} + +.categoryItem svg { + stroke-width: 0; + fill: #aeb1be; + stroke: #aeb1be; +} + +.categoryItem.active { + color: var(--ifm-color-primary); +} + +.categoryItem.active svg { + fill: var(--ifm-color-primary); + stroke: var(--ifm-color-primary); +} diff --git a/src/theme/DocSidebarItem/Category/index.js b/src/theme/DocSidebarItem/Category/index.js new file mode 100644 index 0000000000..88ce33e4d3 --- /dev/null +++ b/src/theme/DocSidebarItem/Category/index.js @@ -0,0 +1,183 @@ +import React, { useEffect, useMemo } from "react"; +import clsx from "clsx"; +import { + ThemeClassNames, + useThemeConfig, + usePrevious, + Collapsible, + useCollapsible, +} from "@docusaurus/theme-common"; +import { + isActiveSidebarItem, + findFirstCategoryLink, + useDocSidebarItemsExpandedState, + isSamePath, +} from "@docusaurus/theme-common/internal"; +import Link from "@docusaurus/Link"; +import { translate } from "@docusaurus/Translate"; +import useIsBrowser from "@docusaurus/useIsBrowser"; +import DocSidebarItems from "@theme/DocSidebarItems"; +import IconMapper from "@site/src/components/IconMapper/IconMapper"; +import styles from "./Category.module.css"; +// If we navigate to a category and it becomes active, it should automatically +// expand itself +function useAutoExpandActiveCategory({ isActive, collapsed, updateCollapsed }) { + const wasActive = usePrevious(isActive); + useEffect(() => { + const justBecameActive = isActive && !wasActive; + if (justBecameActive && collapsed) { + updateCollapsed(false); + } + }, [isActive, wasActive, collapsed, updateCollapsed]); +} +/** + * When a collapsible category has no link, we still link it to its first child + * during SSR as a temporary fallback. This allows to be able to navigate inside + * the category even when JS fails to load, is delayed or simply disabled + * React hydration becomes an optional progressive enhancement + * see https://github.com/facebookincubator/infima/issues/36#issuecomment-772543188 + * see https://github.com/facebook/docusaurus/issues/3030 + */ +function useCategoryHrefWithSSRFallback(item) { + const isBrowser = useIsBrowser(); + return useMemo(() => { + if (item.href) { + return item.href; + } + // In these cases, it's not necessary to render a fallback + // We skip the "findFirstCategoryLink" computation + if (isBrowser || !item.collapsible) { + return undefined; + } + return findFirstCategoryLink(item); + }, [item, isBrowser]); +} +function CollapseButton({ categoryLabel, onClick }) { + return ( +