From ee8425a7341128895a9f223a77dc7ed9e0d0c297 Mon Sep 17 00:00:00 2001 From: caroldelwing Date: Tue, 7 May 2024 14:41:34 -0400 Subject: [PATCH] docs: document Crossplane PAD-82 (#2713) * docs: create new automation section, crossplane page, crossplane aws guide * docs: add crossplane azure guide * docs: add gcp crossplane guide, fix indentation * docs: move palette cli under automation, fix broken links * docs: fix more palette cli broken links * docs: fix more broken links * docs: add redirects, add crossplane to vale, apply vale and Tyler suggestions * docs: address suggestions * Apply suggestions from code review Co-authored-by: Yuliia Horbenko <31223054+yuliiiah@users.noreply.github.com> * docs: apply more suggestions from review * docs: more review suggestions * docs: fix palette cli link --------- Co-authored-by: Yuliia Horbenko <31223054+yuliiiah@users.noreply.github.com> --- docs/docs-content/automation/_category_.json | 3 + docs/docs-content/automation/automation.md | 29 + .../automation/crossplane/_category_.json | 3 + .../automation/crossplane/crossplane.md | 39 + .../deploy-cluster-aws-crossplane.md | 667 ++++++++++++++++++ .../deploy-cluster-azure-crossplane.md | 579 +++++++++++++++ .../deploy-cluster-gcp-crossplane.md | 479 +++++++++++++ .../palette-cli}/_category_.json | 0 .../palette-cli/commands/_category_.json | 3 + .../palette-cli/commands/commands.md | 0 .../palette-cli/commands/docs.md | 2 +- .../palette-cli/commands/ec.md | 9 +- .../palette-cli/commands/login.md | 0 .../palette-cli/commands/pcg.md | 12 +- .../palette-cli/commands/pde.md | 0 .../palette-cli/commands/project.md | 0 .../palette-cli/install-palette-cli.md | 4 +- .../palette-cli/palette-cli.md | 2 - .../automation/terraform/_category_.json | 3 + .../automation/terraform/terraform.md | 53 ++ .../clusters/pcg/deploy-app-pcg.md | 6 +- .../clusters/pcg/deploy-pcg/maas.md | 8 +- .../clusters/pcg/deploy-pcg/openstack.md | 8 +- .../clusters/pcg/deploy-pcg/vmware.md | 8 +- docs/docs-content/clusters/pcg/pcg.md | 2 +- docs/docs-content/devx/devx.md | 4 +- .../airgap-install/install.md | 6 +- .../install-on-vmware/install.md | 6 +- docs/docs-content/palette-cli/_category_.json | 3 - docs/docs-content/spectro-downloads.md | 2 +- docs/docs-content/terraform.md | 58 -- .../airgap-install/install.md | 6 +- .../install-on-vmware/install.md | 6 +- redirects.js | 60 ++ .../IconMapper/dynamicFontAwesomeImports.js | 4 +- .../config/vocabularies/Internal/accept.txt | 3 +- 36 files changed, 1967 insertions(+), 110 deletions(-) create mode 100644 docs/docs-content/automation/_category_.json create mode 100644 docs/docs-content/automation/automation.md create mode 100644 docs/docs-content/automation/crossplane/_category_.json create mode 100644 docs/docs-content/automation/crossplane/crossplane.md create mode 100644 docs/docs-content/automation/crossplane/deploy-cluster-aws-crossplane.md create mode 100644 docs/docs-content/automation/crossplane/deploy-cluster-azure-crossplane.md create mode 100644 docs/docs-content/automation/crossplane/deploy-cluster-gcp-crossplane.md rename docs/docs-content/{palette-cli/commands => automation/palette-cli}/_category_.json (100%) create mode 100644 docs/docs-content/automation/palette-cli/commands/_category_.json rename docs/docs-content/{ => automation}/palette-cli/commands/commands.md (100%) rename docs/docs-content/{ => automation}/palette-cli/commands/docs.md (97%) rename docs/docs-content/{ => automation}/palette-cli/commands/ec.md (94%) rename docs/docs-content/{ => automation}/palette-cli/commands/login.md (100%) rename docs/docs-content/{ => automation}/palette-cli/commands/pcg.md (93%) rename docs/docs-content/{ => automation}/palette-cli/commands/pde.md (100%) rename docs/docs-content/{ => automation}/palette-cli/commands/project.md (100%) rename docs/docs-content/{ => automation}/palette-cli/install-palette-cli.md (88%) rename docs/docs-content/{ => automation}/palette-cli/palette-cli.md (94%) create mode 100644 docs/docs-content/automation/terraform/_category_.json create mode 100644 docs/docs-content/automation/terraform/terraform.md delete mode 100644 docs/docs-content/palette-cli/_category_.json delete mode 100644 docs/docs-content/terraform.md diff --git a/docs/docs-content/automation/_category_.json b/docs/docs-content/automation/_category_.json new file mode 100644 index 0000000000..6374007d98 --- /dev/null +++ b/docs/docs-content/automation/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 210 +} diff --git a/docs/docs-content/automation/automation.md b/docs/docs-content/automation/automation.md new file mode 100644 index 0000000000..26959c7b06 --- /dev/null +++ b/docs/docs-content/automation/automation.md @@ -0,0 +1,29 @@ +--- +sidebar_label: "Automation" +title: "Automation" +description: "Learn how to use automation tools with Palette and Palette VerteX." +hide_table_of_contents: false +sidebar_position: 0 +sidebar_custom_props: + icon: "terminal" +tags: ["automation"] +--- + +This section contains documentation and guides for tools essential in automating tasks with Palette: + +- Palette CLI - Enables users to interact with Palette and create and manage resources, such as projects, virtual + clusters, and more. The Palette CLI is the primary method for installing a + [self-hosted Palette](../enterprise-version/enterprise-version.md) instance and deploying a + [Private Cloud Gateway](../clusters/pcg/pcg.md). + +- Palette Terraform Provider - Allows users to use [Terraform](https://www.terraform.io) for automating the deployment + and management of Palette resources such as cluster profiles, cloud accounts, clusters, and more. + +- Palette Crossplane Provider - It allows users to use [Crossplane](https://docs.crossplane.io/v1.15/) to provision and + manage Palette resources through standard Kubernetes APIs. + +## Resources + +- [Palette CLI](./palette-cli/palette-cli.md) +- [Palette Terraform Provider](./terraform/terraform.md) +- [Palette Crossplane Provider](./crossplane/crossplane.md) diff --git a/docs/docs-content/automation/crossplane/_category_.json b/docs/docs-content/automation/crossplane/_category_.json new file mode 100644 index 0000000000..c3460c6dbd --- /dev/null +++ b/docs/docs-content/automation/crossplane/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 30 +} diff --git a/docs/docs-content/automation/crossplane/crossplane.md b/docs/docs-content/automation/crossplane/crossplane.md new file mode 100644 index 0000000000..9738d81c1d --- /dev/null +++ b/docs/docs-content/automation/crossplane/crossplane.md @@ -0,0 +1,39 @@ +--- +sidebar_label: "Crossplane" +title: "Crossplane" +description: "Learn how to use Crossplane with Palette and Palette VerteX." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["crossplane", "iac", "automation", "infrastructure as code"] +--- + +Palette supports the open-source Cloud Native Computing Foundation (CNCF) project +[Crossplane](https://www.crossplane.io/). Crossplane transforms Kubernetes clusters into universal control planes, +extending the Kubernetes API and enabling infrastructure resource provisioning and management across major +infrastructure providers. + +These resources, called +[Managed Resources (MR)](https://docs.crossplane.io/latest/concepts/managed-resources/#managed-resource-fields) within +the Crossplane environment, are essentially Kubernetes Custom Resource Definitions (CRDs) that represent infrastructure +resources as native Kubernetes objects. Because they are Kubernetes objects, you can interact with them using standard +commands like `kubectl describe`. When users create a managed resource, Crossplane interacts with the infrastructure +provider API to request the creation of the resource within the provider's environment. + +## Palette Provider + +You can use the Palette Crossplane Provider to interact with the Palette API and create resources declaratively. + +Refer to the [Palette Provider](https://marketplace.upbound.io/providers/crossplane-contrib/provider-palette/v0.12.0) +page for a complete list of managed resources and examples. + +## Get Started + +For examples of end-to-end cluster provisioning with Crossplane, review the following guides: + +- [Deploy an AWS IaaS Cluster with Crossplane](./deploy-cluster-aws-crossplane.md) +- [Deploy an Azure IaaS Cluster with Crossplane](./deploy-cluster-azure-crossplane.md) +- [Deploy a GCP IaaS Cluster with Crossplane](./deploy-cluster-gcp-crossplane.md) + +## Resources + +- [Crossplane Documentation](https://docs.crossplane.io/latest/) diff --git a/docs/docs-content/automation/crossplane/deploy-cluster-aws-crossplane.md b/docs/docs-content/automation/crossplane/deploy-cluster-aws-crossplane.md new file mode 100644 index 0000000000..5021a2bfa9 --- /dev/null +++ b/docs/docs-content/automation/crossplane/deploy-cluster-aws-crossplane.md @@ -0,0 +1,667 @@ +--- +sidebar_label: "Deploy an AWS IaaS Cluster with Crossplane" +title: "Deploy an AWS IaaS Cluster with Crossplane" +description: "Learn how to deploy an AWS IaaS cluster using the Spectro Cloud Crossplane provider." +hide_table_of_contents: false +sidebar_position: 20 +tags: ["crossplane", "aws", "iac", "infrastructure as code"] +--- + +Palette supports using [Crossplane](https://www.crossplane.io) to create and manage Kubernetes +[host clusters](../../glossary-all.md#host-cluster) across major infrastructure providers. This section guides you on +how to use Crossplane to deploy a Palette-managed Kubernetes cluster in AWS. + +## Prerequisites + +- A [Palette](https://www.spectrocloud.com/get-started) account and API key. Refer to the + [Create API Key](../../user-management/authentication/api-key/create-api-key.md) page for instructions on creating an + API key. +- A public [AWS](https://repost.aws/knowledge-center/create-and-activate-aws-account) cloud account with the required + [IAM Policies](../../clusters/public-cloud/aws/required-iam-policies.md). +- An SSH key pair available in the region where you want to deploy the cluster. Check out the + [Create EC2 SSH Key Pair](https://docs.aws.amazon.com/ground-station/latest/ug/create-ec2-ssh-key-pair.html) for + guidance. +- The AWS account must be registered in Palette. Follow the + [Add an AWS Account to Palette](../../clusters/public-cloud/aws/add-aws-accounts.md) guide to register your account in + Palette. +- A Kubernetes cluster with at least 2 GB of RAM. This guide uses a [kind](https://kind.sigs.k8s.io) cluster as an + example. Refer to the [kind Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/) to learn how to install kind + and create a cluster. +- The following software is required and must be installed: + - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) + - [Helm](https://helm.sh/docs/intro/install/) version v3.2.0 or later + - [curl](https://curl.se/docs/install.html) + - A text editor such as Vi or [Nano](https://www.nano-editor.org). This guide uses Vi as an example. + +## Deploy an AWS IaaS Cluster with Crossplane + +1. Open up a terminal session and set the kubectl context to your kind cluster. Replace `` with the + name of your cluster. + + ```bash + kubectl cluster-info --context + ``` + + ```text hideClipboard + Kubernetes control plane is running at https://127.0.0.1:65306 + CoreDNS is running at https://127.0.0.1:65306/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. + ``` + +2. Next, add the Crossplane Helm chart. This will enable Crossplane to install all its Kubernetes components. + + ```bash + helm repo add \ + crossplane-stable https://charts.crossplane.io/stable + helm repo update + ``` + + ```text hideClipboard + "crossplane-stable" has been added to your repositories + Hang tight while we grab the latest from your chart repositories... + ...Successfully got an update from the "ngrok" chart repository + ...Successfully got an update from the "crossplane-stable" chart repository + ...Successfully got an update from the "stable" chart repository + Update Complete. ⎈Happy Helming!⎈ + ``` + +3. Install the Crossplane components using the `helm install` command. + + ```bash + helm install crossplane \ + crossplane-stable/crossplane \ + --namespace crossplane-system \ + --create-namespace + ``` + + You can verify the installation with the `kubectl get pods` command. The output must contain two Crossplane pods in + the _Running_ status. + + ```bash + kubectl get pods --namespace crossplane-system + ``` + + ```text hideClipboard + NAME READY STATUS RESTARTS AGE + crossplane-869d89c8f8-7jc6c 1/1 Running 0 20s + crossplane-rbac-manager-784b496b-8mr6z 1/1 Running 0 20s + ``` + +4. Once Crossplane is installed, create a folder to store the Kubernetes configuration files. + + ```bash + mkdir crossplane-aws + ``` + +5. Use a text editor of your choice to create a file for the Palette Crossplane provider configuration. + + ```bash + vi crossplane-aws/provider-palette.yaml + ``` + +6. Paste the following Kubernetes configuration into the text editor window that opens. Press the `Escape` key, type + `:wq`, and press `Enter` to save the file and exit. + + ```yaml + apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: provider-palette + spec: + package: crossplane-contrib/provider-palette:v0.12.0 + ``` + +7. Issue the command below to install the Palette Crossplane provider. Crossplane will install the CRDs that allow you + to create Palette resources directly inside Kubernetes. + + ```bash + kubectl apply --filename crossplane-aws/provider-palette.yaml + ``` + + You can check the installation with the `kubectl get providers` command. + + ```bash + kubectl get providers + ``` + + ```text hideClipboard + NAME INSTALLED HEALTHY PACKAGE AGE + provider-palette True True crossplane-contrib/provider-palette:v0.12.0 61s + ``` + +8. Create a file to store a Kubernetes Secret containing your Palette API key. The Palette provider requires + credentials to create and manage resources. + + ```bash + vi crossplane-aws/secret-aws.yaml + ``` + +9. Paste the following Kubernetes configuration into the text editor window that opens. Replace `` with + your Palette API key and change the values of `project_name` and `host` according to your environment. Save the file + and exit. + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: palette-creds + namespace: crossplane-system + type: Opaque + stringData: + credentials: | + { + "api_key": "", + "project_name": "Default", + "host": "console.spectrocloud.com" + } + ``` + +10. Create the Kubernetes secret. + + ```shell + kubectl apply --filename crossplane-aws/secret-aws.yaml + ``` + +11. Next, create a file to store the `ProviderConfig` object. This object configures the Palette Crossplane provider + with the secret containing the Palette API key. + + ```bash + vi crossplane-aws/providerconfig-aws.yaml + ``` + +12. Paste the content below into the text editor window, save the file and exit. + + ```yaml + apiVersion: palette.crossplane.io/v1beta1 + kind: ProviderConfig + metadata: + name: default + spec: + credentials: + source: Secret + secretRef: + name: palette-creds + namespace: crossplane-system + key: credentials + ``` + +13. Create the Kubernetes `ProviderConfig` object. + + ```shell + kubectl apply --filename crossplane-aws/providerconfig-aws.yaml + ``` + +14. Once the Palette Crossplane provider is installed and set up, create a file to store the AWS + [cluster profile](../../profiles/cluster-profiles/cluster-profiles.md) configuration. + + ```bash + vi crossplane-aws/cluster-profile-aws.yaml + ``` + +15. Paste the Kubernetes configuration below into the text editor window that opens. Save the file and exit. + + ```yaml + apiVersion: cluster.palette.crossplane.io/v1alpha1 + kind: Profile + metadata: + name: aws-crossplane-cluster-profile + namespace: crossplane-system + spec: + forProvider: + cloud: "aws" + description: "AWS Crossplane cluster profile" + type: "cluster" + pack: + - name: "ubuntu-aws" + tag: "22.04" + registryUid: "5eecc89d0b150045ae661cef" + values: + "# Spectro Golden images includes most of the hardening as per CIS Ubuntu Linux 22.04 LTS Server L1 v1.0.0 + standards\n\n# Uncomment below section to\n# 1. Include custom files to be copied over to the nodes + and/or\n# 2. Execute list of commands before or after kubeadm init/join is + executed\n#\n#kubeadmconfig:\n# preKubeadmCommands:\n# - echo \"Executing pre kube admin config + commands\"\n# - update-ca-certificates\n# - 'systemctl restart containerd; sleep 3'\n# - 'while [ ! -S + /var/run/containerd/containerd.sock ]; do echo \"Waiting for containerd...\"; sleep 1; + done'\n# postKubeadmCommands:\n# - echo \"Executing post kube admin config commands\"\n# files:\n# - + targetPath: /usr/local/share/ca-certificates/mycom.crt\n# targetOwner: + \"root:root\"\n# targetPermissions: \"0644\"\n# content: |\n# -----BEGIN + CERTIFICATE-----\n# MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl\n# cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE\n# AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA\n# nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz\n# qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN\n# fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2\n# 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL\n# 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK\n# jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB\n# /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki\n# HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y\n# g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ\n# ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6\n# b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56\n# IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc=\n# -----END + CERTIFICATE-----" + uid: "63bd0373764141c6622c3062" + + - name: "kubernetes" + tag: "1.29.0" + uid: "661cc50b0aa79b77ade281f9" + registryUid: "5eecc89d0b150045ae661cef" + values: + "# spectrocloud.com/enabled-presets: Kube Controller Manager:loopback-ctrlmgr,Kube + Scheduler:loopback-scheduler\npack:\n content:\n images:\n - image: + registry.k8s.io/coredns/coredns:v1.10.1\n - image: registry.k8s.io/etcd:3.5.10-0\n - image: + registry.k8s.io/kube-apiserver:v1.29.0\n - image: + registry.k8s.io/kube-controller-manager:v1.29.0\n - image: + registry.k8s.io/kube-proxy:v1.29.0\n - image: registry.k8s.io/kube-scheduler:v1.29.0\n - image: + registry.k8s.io/pause:3.9\n - image: registry.k8s.io/pause:3.8\n k8sHardening: True\n #CIDR Range + for Pods in cluster\n # Note : This must not overlap with any of the host or service network\n podCIDR: + \"192.168.0.0/16\"\n #CIDR notation IP range from which to assign service cluster IPs\n # Note : This + must not overlap with any IP ranges assigned to nodes for pods.\n serviceClusterIpRange: + \"10.96.0.0/12\"\n # serviceDomain: \"cluster.local\"\n\n# KubeAdm customization for kubernetes + hardening. Below config will be ignored if k8sHardening property above is + disabled\nkubeadmconfig:\n apiServer:\n extraArgs:\n # Note : secure-port flag is used during + kubeadm init. Do not change this flag on a running cluster\n secure-port: + \"6443\"\n anonymous-auth: \"true\"\n profiling: \"false\"\n disable-admission-plugins: + \"AlwaysAdmit\"\n default-not-ready-toleration-seconds: + \"60\"\n default-unreachable-toleration-seconds: \"60\"\n enable-admission-plugins: + \"AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity\"\n admission-control-config-file: + \"/etc/kubernetes/pod-security-standard.yaml\"\n audit-log-path: + /var/log/apiserver/audit.log\n audit-policy-file: + /etc/kubernetes/audit-policy.yaml\n audit-log-maxage: \"30\"\n audit-log-maxbackup: + \"10\"\n audit-log-maxsize: \"100\"\n authorization-mode: RBAC,Node\n tls-cipher-suites: + \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\"\n extraVolumes:\n - + name: audit-log\n hostPath: /var/log/apiserver\n mountPath: + /var/log/apiserver\n pathType: DirectoryOrCreate\n - name: audit-policy\n hostPath: + /etc/kubernetes/audit-policy.yaml\n mountPath: /etc/kubernetes/audit-policy.yaml\n readOnly: + true\n pathType: File\n - name: pod-security-standard\n hostPath: + /etc/kubernetes/pod-security-standard.yaml\n mountPath: + /etc/kubernetes/pod-security-standard.yaml\n readOnly: true\n pathType: + File\n controllerManager:\n extraArgs:\n profiling: \"false\"\n terminated-pod-gc-threshold: + \"25\"\n use-service-account-credentials: \"true\"\n feature-gates: + \"RotateKubeletServerCertificate=true\"\n scheduler:\n extraArgs:\n profiling: + \"false\"\n kubeletExtraArgs:\n read-only-port : \"0\"\n event-qps: \"0\"\n feature-gates: + \"RotateKubeletServerCertificate=true\"\n protect-kernel-defaults: + \"true\"\n rotate-server-certificates: \"true\"\n tls-cipher-suites: + \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\"\n files:\n - + path: hardening/audit-policy.yaml\n targetPath: /etc/kubernetes/audit-policy.yaml\n targetOwner: + \"root:root\"\n targetPermissions: \"0600\"\n - path: hardening/90-kubelet.conf\n targetPath: + /etc/sysctl.d/90-kubelet.conf\n targetOwner: \"root:root\"\n targetPermissions: \"0600\"\n - + targetPath: /etc/kubernetes/pod-security-standard.yaml\n targetOwner: + \"root:root\"\n targetPermissions: \"0600\"\n content: |\n apiVersion: + apiserver.config.k8s.io/v1\n kind: AdmissionConfiguration\n plugins:\n - name: + PodSecurity\n configuration:\n apiVersion: + pod-security.admission.config.k8s.io/v1\n kind: + PodSecurityConfiguration\n defaults:\n enforce: + \"baseline\"\n enforce-version: \"v1.29\"\n audit: + \"baseline\"\n audit-version: \"v1.29\"\n warn: + \"restricted\"\n warn-version: \"v1.29\"\n audit: + \"restricted\"\n audit-version: \"v1.29\"\n exemptions:\n # Array of + authenticated usernames to exempt.\n usernames: []\n # Array of runtime class + names to exempt.\n runtimeClasses: []\n # Array of namespaces to + exempt.\n namespaces: [kube-system]\n\n preKubeadmCommands:\n # For enabling + 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required\n - 'echo \"====> + Applying kernel parameters for Kubelet\"'\n - 'sysctl -p + /etc/sysctl.d/90-kubelet.conf'\n #postKubeadmCommands:\n #- 'echo \"List of post kubeadm commands to + be executed\"'\n\n# Client configuration to add OIDC based authentication flags in + kubeconfig\n#clientConfig:\n #oidc-issuer-url: \"{{ + .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}\"\n #oidc-client-id: \"{{ + .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}\"\n #oidc-client-secret: + oidc client secret\n #oidc-extra-scope: profile,email" + + - name: "cni-calico" + tag: "3.27.0" + uid: "661cc4f20aa79b7543637fa9" + registryUid: "5eecc89d0b150045ae661cef" + values: + "pack:\n content:\n images:\n - image: + gcr.io/spectro-images-public/packs/calico/3.27.0/cni:v3.27.0\n - image: + gcr.io/spectro-images-public/packs/calico/3.27.0/node:v3.27.0\n - image: + gcr.io/spectro-images-public/packs/calico/3.27.0/kube-controllers:v3.27.0\n\nmanifests:\n calico:\n images:\n cni: + \"\"\n node: \"\"\n kubecontroller: \"\"\n # IPAM type to use. Supported types are + calico-ipam, host-local\n ipamType: \"calico-ipam\"\n\n calico_ipam:\n assign_ipv4: + true\n assign_ipv6: false\n\n # Should be one of CALICO_IPV4POOL_IPIP or + CALICO_IPV4POOL_VXLAN \n encapsulationType: \"CALICO_IPV4POOL_IPIP\"\n\n # Should be one of Always, + CrossSubnet, Never\n encapsulationMode: \"Always\"\n\n env:\n # Additional env variables for + calico-node\n calicoNode:\n #IPV6: \"autodetect\"\n #FELIX_IPV6SUPPORT: + \"true\"\n #CALICO_IPV6POOL_NAT_OUTGOING: \"true\"\n #CALICO_IPV4POOL_CIDR: + \"192.168.0.0/16\"\n #IP_AUTODETECTION_METHOD: \"first-found\"\n\n # Additional env variables + for calico-kube-controller deployment\n calicoKubeControllers:\n #LOG_LEVEL: + \"info\"\n #SYNC_NODE_LABELS: \"true\"" + + - name: "csi-aws-ebs" + tag: "1.26.1" + uid: "661cc4f60aa79b75afba6d4b" + registryUid: "5eecc89d0b150045ae661cef" + values: + "pack:\n content:\n images:\n - image: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/aws-ebs-csi-driver:v1.26.1\n - image: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/external-provisioner:v3.6.3-eks-1-29-2\n - + image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/external-attacher:v4.4.3-eks-1-29-2\n - + image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/external-resizer:v1.9.3-eks-1-29-2\n - + image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/livenessprobe:v2.11.0-eks-1-29-2\n - + image: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/node-driver-registrar:v2.9.3-eks-1-29-2\n - + image: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/external-snapshotter/csi-snapshotter:v6.3.3-eks-1-29-2\n - + image: gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/volume-modifier-for-k8s:v0.1.3\n - + image: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/kubekins-e2e:v20231206-f7b83ffbe6-master\n charts:\n - + repo: https://kubernetes-sigs.github.io/aws-ebs-csi-driver \n name: + aws-ebs-csi-driver\n version: 2.26.1\n namespace: + \"kube-system\"\n\ncharts:\n aws-ebs-csi-driver: \n storageClasses: \n # Default Storage + Class\n - name: spectro-storage-class\n # annotation + metadata\n annotations:\n storageclass.kubernetes.io/is-default-class: \"true\"\n # + label metadata\n # labels:\n # my-label-is: supercool\n # defaults to + WaitForFirstConsumer\n volumeBindingMode: WaitForFirstConsumer\n # defaults to + Delete\n reclaimPolicy: Delete\n parameters:\n # File system type: xfs, ext2, ext3, + ext4\n csi.storage.k8s.io/fstype: \"ext4\"\n # EBS volume type: io1, io2, gp2, gp3, sc1, + st1, standard\n type: \"gp2\"\n # I/O operations per second per GiB. Required when io1 or + io2 volume type is specified.\n # iopsPerGB: \"\"\n # Applicable only when io1 or io2 volume + type is specified\n # allowAutoIOPSPerGBIncrease: false\n # I/O operations per second. + Applicable only for gp3 volumes.\n # iops: \"\"\n # Throughput in MiB/s. Applicable only for + gp3 volumes.\n # throughput: \"\"\n # Whether the volume should be encrypted or + not\n # encrypted: \"\"\n # The full ARN of the key to use when encrypting the volume. When + not specified, the default KMS key is used.\n # kmsKeyId: \"\"\n # Additional Storage Class + \n # - name: addon-storage-class\n # + annotations:\n # storageclass.kubernetes.io/is-default-class: \"false\"\n # + labels:\n # my-label-is: supercool\n # volumeBindingMode: WaitForFirstConsumer\n # + reclaimPolicy: Delete\n # parameters:\n # csi.storage.k8s.io/fstype: \"ext4\"\n # type: + \"gp2\"\n # iopsPerGB: \"\"\n # allowAutoIOPSPerGBIncrease: false\n # iops: + \"\"\n # throughput: \"\"\n # encrypted: \"\"\n # kmsKeyId: + \"\"\n\n image:\n repository: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/aws-ebs-csi-driver\n # Overrides the image tag + whose default is v{{ .Chart.AppVersion }}\n tag: \"\"\n pullPolicy: IfNotPresent\n\n # -- + Custom labels to add into metadata\n customLabels:\n {}\n # k8s-app: + aws-ebs-csi-driver\n\n sidecars:\n provisioner:\n env: + []\n image:\n pullPolicy: IfNotPresent\n repository: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/external-provisioner\n tag: + \"v3.6.3-eks-1-29-2\"\n logLevel: 2\n # Additional parameters provided by + external-provisioner.\n additionalArgs: []\n # Grant additional permissions to + external-provisioner\n additionalClusterRoleRules:\n resources: {}\n # Tune leader + lease election for csi-provisioner.\n # Leader election is on by + default.\n leaderElection:\n enabled: true\n # Optional values to tune lease + behavior.\n # The arguments provided must be in an acceptable time.ParseDuration + format.\n # Ref: https://pkg.go.dev/flag#Duration\n # leaseDuration: + \"15s\"\n # renewDeadline: \"10s\"\n # retryPeriod: + \"5s\"\n securityContext:\n readOnlyRootFilesystem: + true\n allowPrivilegeEscalation: false\n attacher:\n env: + []\n image:\n pullPolicy: IfNotPresent\n repository: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/external-attacher\n tag: + \"v4.4.3-eks-1-29-2\"\n # Tune leader lease election for csi-attacher.\n # Leader election + is on by default.\n leaderElection:\n enabled: true\n # Optional values to tune + lease behavior.\n # The arguments provided must be in an acceptable time.ParseDuration + format.\n # Ref: https://pkg.go.dev/flag#Duration\n # leaseDuration: + \"15s\"\n # renewDeadline: \"10s\"\n # retryPeriod: \"5s\"\n logLevel: + 2\n # Additional parameters provided by external-attacher.\n additionalArgs: []\n # + Grant additional permissions to external-attacher\n additionalClusterRoleRules: + []\n resources: {}\n securityContext:\n readOnlyRootFilesystem: + true\n allowPrivilegeEscalation: false\n snapshotter:\n # Enables the snapshotter + sidecar even if the snapshot CRDs are not installed\n forceEnable: false\n env: + []\n image:\n pullPolicy: IfNotPresent\n repository: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/external-snapshotter/csi-snapshotter\n tag: + \"v6.3.3-eks-1-29-2\"\n logLevel: 2\n # Additional parameters provided by + csi-snapshotter.\n additionalArgs: []\n # Grant additional permissions to + csi-snapshotter\n additionalClusterRoleRules: []\n resources: + {}\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: + false\n livenessProbe:\n image:\n pullPolicy: IfNotPresent\n repository: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/livenessprobe\n tag: + \"v2.11.0-eks-1-29-2\"\n # Additional parameters provided by + livenessprobe.\n additionalArgs: []\n resources: + {}\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: + false\n resizer:\n env: []\n image:\n pullPolicy: + IfNotPresent\n repository: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/external-resizer\n tag: + \"v1.9.3-eks-1-29-2\"\n # Tune leader lease election for csi-resizer.\n # Leader election is + on by default.\n leaderElection:\n enabled: true\n # Optional values to tune + lease behavior.\n # The arguments provided must be in an acceptable time.ParseDuration + format.\n # Ref: https://pkg.go.dev/flag#Duration\n # leaseDuration: + \"15s\"\n # renewDeadline: \"10s\"\n # retryPeriod: \"5s\"\n logLevel: + 2\n # Additional parameters provided by external-resizer.\n additionalArgs: []\n # + Grant additional permissions to external-resizer\n additionalClusterRoleRules: + []\n resources: {}\n securityContext:\n readOnlyRootFilesystem: + true\n allowPrivilegeEscalation: false\n nodeDriverRegistrar:\n env: + []\n image:\n pullPolicy: IfNotPresent\n repository: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/node-driver-registrar\n tag: + \"v2.9.3-eks-1-29-2\"\n logLevel: 2\n # Additional parameters provided by + node-driver-registrar.\n additionalArgs: []\n resources: + {}\n securityContext:\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: + false\n livenessProbe:\n exec:\n command:\n - + /csi-node-driver-registrar\n - + --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)\n - + --mode=kubelet-registration-probe\n initialDelaySeconds: 30\n periodSeconds: + 90\n timeoutSeconds: 15\n volumemodifier:\n env: + []\n image:\n pullPolicy: IfNotPresent\n repository: + gcr.io/spectro-images-public/packs/csi-aws-ebs/1.26.1/volume-modifier-for-k8s\n tag: + \"v0.1.3\"\n leaderElection:\n enabled: true\n # Optional values to tune lease + behavior.\n # The arguments provided must be in an acceptable time.ParseDuration + format.\n # Ref: https://pkg.go.dev/flag#Duration\n # leaseDuration: + \"15s\"\n # renewDeadline: \"10s\"\n # retryPeriod: \"5s\"\n logLevel: + 2\n # Additional parameters provided by volume-modifier-for-k8s.\n additionalArgs: + []\n resources: {}\n securityContext:\n readOnlyRootFilesystem: + true\n allowPrivilegeEscalation: + false\n\n proxy:\n http_proxy:\n no_proxy:\n\n imagePullSecrets: + []\n nameOverride:\n fullnameOverride:\n\n awsAccessSecret:\n name: aws-secret\n keyId: + key_id\n accessKey: access_key\n\n controller:\n batching: + true\n volumeModificationFeature:\n enabled: false\n # Additional parameters provided by + aws-ebs-csi-driver controller.\n additionalArgs: []\n sdkDebugLog: false\n loggingFormat: + text\n affinity:\n nodeAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - + weight: 1\n preference:\n matchExpressions:\n - key: + eks.amazonaws.com/compute-type\n operator: + NotIn\n values:\n - + fargate\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - + podAffinityTerm:\n labelSelector:\n matchExpressions:\n - key: + app\n operator: In\n values:\n - + ebs-csi-controller\n topologyKey: kubernetes.io/hostname\n weight: 100\n # + The default filesystem type of the volume to provision when fstype is unspecified in the + StorageClass.\n # If the default is not set and fstype is unset in the StorageClass, then no fstype + will be set\n defaultFsType: ext4\n env: []\n # Use envFrom to reference ConfigMaps and + Secrets across all containers in the deployment\n envFrom: []\n # If set, add pv/pvc metadata to + plugin create requests as parameters.\n extraCreateMetadata: true\n # Extra volume tags to + attach to each dynamically provisioned volume.\n # ---\n # extraVolumeTags:\n # key1: + value1\n # key2: value2\n extraVolumeTags: {}\n httpEndpoint:\n # (deprecated) The + TCP network address where the prometheus metrics endpoint\n # will run (example: `:8080` which + corresponds to port 8080 on local host).\n # The default is empty string, which means metrics + endpoint is disabled.\n # ---\n enableMetrics: false\n serviceMonitor:\n # Enables + the ServiceMonitor resource even if the prometheus-operator CRDs are not installed\n forceEnable: + false\n # Additional labels for ServiceMonitor object\n labels:\n release: + prometheus\n # If set to true, AWS API call metrics will be exported to the following\n # TCP + endpoint: \"0.0.0.0:3301\"\n # ---\n # ID of the Kubernetes cluster used for tagging provisioned + EBS volumes (optional).\n k8sTagClusterId:\n logLevel: 2\n userAgentExtra: + \"helm\"\n nodeSelector: {}\n podAnnotations: {}\n podLabels: {}\n priorityClassName: + system-cluster-critical\n # AWS region to use. If not specified then the region will be looked up via + the AWS EC2 metadata\n # service.\n # ---\n # region: + us-east-1\n region:\n replicaCount: 2\n updateStrategy:\n type: + RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n # type: RollingUpdate\n # + rollingUpdate:\n # maxSurge: 0\n # maxUnavailable: + 1\n resources:\n requests:\n cpu: 10m\n memory: + 40Mi\n limits:\n cpu: 100m\n memory: 256Mi\n serviceAccount:\n # A + service account will be created for you if set to true. Set to false if you want to use your + own.\n create: true\n name: ebs-csi-controller-sa\n annotations: {}\n ## + Enable if EKS IAM for SA is used\n # eks.amazonaws.com/role-arn: + arn::iam:::role/ebs-csi-role\n automountServiceAccountToken: + true\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: + NoExecute\n operator: Exists\n tolerationSeconds: 300\n # TSCs without the label + selector stanza\n #\n # Example:\n #\n # topologySpreadConstraints:\n # - + maxSkew: 1\n # topologyKey: topology.kubernetes.io/zone\n # whenUnsatisfiable: + ScheduleAnyway\n # - maxSkew: 1\n # topologyKey: + kubernetes.io/hostname\n # whenUnsatisfiable: ScheduleAnyway\n topologySpreadConstraints: + []\n # securityContext on the controller pod\n securityContext:\n runAsNonRoot: + true\n runAsUser: 1000\n runAsGroup: 1000\n fsGroup: 1000\n # Add additional + volume mounts on the controller with controller.volumes and controller.volumeMounts\n volumes: + []\n # Add additional volumes to be mounted onto the controller:\n # - name: + custom-dir\n # hostPath:\n # path: /path/to/dir\n # type: + Directory\n volumeMounts: []\n # And add mount paths for those additional volumes:\n # - + name: custom-dir\n # mountPath: /mount/path\n # ---\n # securityContext on the controller + container (see sidecars for securityContext on sidecar + containers)\n containerSecurityContext:\n readOnlyRootFilesystem: + true\n allowPrivilegeEscalation: false\n initContainers: []\n # containers to be run + before the controller's container starts.\n #\n # Example:\n #\n # - name: + wait\n # image: busybox\n # command: [ 'sh', '-c', \"sleep 20\" ]\n # Enable + opentelemetry tracing for the plugin running on the daemonset\n otelTracing: + {}\n # otelServiceName: ebs-csi-controller\n # otelExporterEndpoint: + \"http://localhost:4317\"\n\n node:\n env: []\n envFrom: []\n kubeletPath: + /var/lib/kubelet\n loggingFormat: text\n logLevel: + 2\n priorityClassName:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - + matchExpressions:\n - key: eks.amazonaws.com/compute-type\n operator: + NotIn\n values:\n - fargate\n - key: + node.kubernetes.io/instance-type\n operator: + NotIn\n values:\n - a1.medium\n - a1.large\n - + a1.xlarge\n - a1.2xlarge\n - a1.4xlarge\n nodeSelector: + {}\n podAnnotations: {}\n podLabels: {}\n tolerateAllTaints: + true\n tolerations:\n - operator: Exists\n effect: NoExecute\n tolerationSeconds: + 300\n resources:\n requests:\n cpu: 10m\n memory: + 40Mi\n limits:\n cpu: 100m\n memory: + 256Mi\n serviceAccount:\n create: true\n name: ebs-csi-node-sa\n annotations: + {}\n ## Enable if EKS IAM for SA is used\n # eks.amazonaws.com/role-arn: + arn::iam:::role/ebs-csi-role\n automountServiceAccountToken: true\n # + Enable the linux daemonset creation\n enableLinux: true\n enableWindows: false\n # The + \"maximum number of attachable volumes\" per + node\n volumeAttachLimit:\n updateStrategy:\n type: + RollingUpdate\n rollingUpdate:\n maxUnavailable: \"10%\"\n hostNetwork: + false\n # securityContext on the node pod\n securityContext:\n # The node pod must be run + as root to bind to the registration/driver sockets\n runAsNonRoot: false\n runAsUser: + 0\n runAsGroup: 0\n fsGroup: 0\n # Add additional volume mounts on the node pods with + node.volumes and node.volumeMounts\n volumes: []\n # Add additional volumes to be mounted onto + the node pods:\n # - name: custom-dir\n # hostPath:\n # path: + /path/to/dir\n # type: Directory\n volumeMounts: []\n # And add mount paths for those + additional volumes:\n # - name: custom-dir\n # mountPath: /mount/path\n # ---\n # + securityContext on the node container (see sidecars for securityContext on sidecar + containers)\n containerSecurityContext:\n readOnlyRootFilesystem: true\n privileged: + true\n # Enable opentelemetry tracing for the plugin running on the daemonset\n otelTracing: + {}\n # otelServiceName: ebs-csi-node\n # otelExporterEndpoint: + \"http://localhost:4317\"\n\n additionalDaemonSets:\n # Additional node DaemonSets, using the node + config structure\n # See docs/additional-daemonsets.md for more information\n #\n # + example:\n # nodeSelector:\n # node.kubernetes.io/instance-type: + c5.large\n # volumeAttachLimit: 15\n\n # Enable compatibility for the A1 instance family via use + of an AL2-based image in a separate DaemonSet\n # a1CompatibilityDaemonSet: + true\n\n #storageClasses: []\n # Add StorageClass resources like:\n # - name: ebs-sc\n # # + annotation metadata\n # annotations:\n # storageclass.kubernetes.io/is-default-class: + \"true\"\n # # label metadata\n # labels:\n # my-label-is: supercool\n # # defaults + to WaitForFirstConsumer\n # volumeBindingMode: WaitForFirstConsumer\n # # defaults to + Delete\n # reclaimPolicy: Retain\n # parameters:\n # encrypted: + \"true\"\n\n volumeSnapshotClasses: []\n # Add VolumeSnapshotClass resources like:\n # - name: + ebs-vsc\n # # annotation + metadata\n # annotations:\n # snapshot.storage.kubernetes.io/is-default-class: + \"true\"\n # # label metadata\n # labels:\n # my-label-is: supercool\n # # + deletionPolicy must be specified\n # deletionPolicy: Delete\n # parameters:\n\n # Use old + CSIDriver without an fsGroupPolicy set\n # Intended for use with older clusters that cannot easily + replace the CSIDriver object\n # This parameter should always be false for new + installations\n useOldCSIDriver: false" + + providerConfigRef: + name: default + ``` + + The cluster profile contains the following core infrastructure layers. + + | **Pack Type** | **Registry** | **Pack Name** | **Pack Version** | + | ------------- | ------------ | ------------- | ---------------- | + | OS | Public Repo | `ubuntu-aws` | `22.04` | + | Kubernetes | Public Repo | `kubernetes` | `1.29.0` | + | Network | Public Repo | `cni-calico` | `3.27.0` | + | Storage | Public Repo | `csi-aws-ebs` | `1.26.1` | + + :::tip + + If you want to use different packs in your cluster profile, you can access the Palette UI and simulate creating a + cluster profile to gather the pack's required values. During the cluster profile creation, click the API button to + display the API payload. You will find the values of each packs' `name`, `tag`, `uid`, `registryUid`, and `values` + parameters. + + ::: + +16. Create the cluster profile. + + ```bash + kubectl apply --filename crossplane-aws/cluster-profile-aws.yaml + ``` + +17. Issue the commands below to get the ID of the cluster profile once its creation is complete. + + ```bash + kubectl wait --for=condition=Ready profile.cluster.palette.crossplane.io/aws-crossplane-cluster-profile + clusterProfileId=$(kubectl get profile.cluster.palette.crossplane.io aws-crossplane-cluster-profile --output jsonpath='{.status.atProvider.id}') + echo Cluster Profile ID: $clusterProfileId + ``` + + ```text hideClipboard + profile.cluster.palette.crossplane.io/aws-crossplane-cluster-profile condition met + Cluster Profile ID: 6638e0eb8f42b99cb4d1d1bb + ``` + +18. Next, get the ID of your AWS cloud account registered in Palette by invoking the `cloudaccounts` Palette API. + Replace `` with your Palette API key and `` with the name under which you registered + your AWS account in Palette. + + ```bash + curl --location --request GET 'https://api.spectrocloud.com/v1/cloudaccounts/aws' \ + -H 'Accept: application/json' \ + -H 'ApiKey: ' | jq '.items[] | select(.metadata.name == "") | .metadata.uid' + ``` + + ```text hideClipboard + "645981f0ab3ab8105fabc932" + ``` + + Copy the API response containing your AWS cloud account ID. + +19. Create a file to store the AWS IaaS cluster configuration. + + ```bash + vi crossplane-aws/cluster-aws.yaml + ``` + +20. Paste the cluster configuration displayed below into the text editor window that opens: + + - Replace `` with the name of the SSH key available in the region where you want to deploy the + cluster. + - Replace `` and `` with the IDs you obtained in steps 17 and 18. + + Optionally, you can edit the region, availability zone, instance type, and number of nodes of your cluster according + to your workload. + + Once you are done making the alterations, save and exit the file. + + ```yaml + apiVersion: cluster.palette.crossplane.io/v1alpha1 + kind: Aws + metadata: + name: aws-crossplane-cluster + namespace: crossplane-system + spec: + forProvider: + cloudConfig: + - sshKeyName: + region: us-east-1 + machinePool: + - azs: + - us-east-1a + count: 2 + instanceType: t3.xlarge + name: machinepool1 + - azs: + - us-east-1a + count: 1 + instanceType: m4.2xlarge + name: controlplanepool + controlPlane: true + clusterProfile: + - id: + cloudAccountId: + providerConfigRef: + name: default + ``` + +21. Create the AWS IaaS cluster. + + ```bash + kubectl apply --filename crossplane-aws/cluster-aws.yaml + ``` + +22. Wait for the cluster to be created. The cluster provisioning might take up to 20 minutes. + + ```bash + kubectl wait --for=condition=Ready aws.cluster.palette.crossplane.io/aws-crossplane-cluster --timeout=1h + ``` + + Once ready, you should receive an output similar to the following. + + ```text hideClipboard + aws.cluster.palette.crossplane.io/aws-crossplane-cluster condition met + ``` + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com/). + +2. From the left **Main Menu**, click **Clusters**. + +3. Verify the deployed cluster named `aws-crossplane-cluster` is displayed and has the **Running** and **Healthy** + status. diff --git a/docs/docs-content/automation/crossplane/deploy-cluster-azure-crossplane.md b/docs/docs-content/automation/crossplane/deploy-cluster-azure-crossplane.md new file mode 100644 index 0000000000..1ab21eb5da --- /dev/null +++ b/docs/docs-content/automation/crossplane/deploy-cluster-azure-crossplane.md @@ -0,0 +1,579 @@ +--- +sidebar_label: "Deploy an Azure IaaS Cluster with Crossplane" +title: "Deploy an Azure IaaS Cluster with Crossplane" +description: "Learn how to deploy an Azure IaaS cluster using the Spectro Cloud Crossplane provider." +hide_table_of_contents: false +sidebar_position: 30 +tags: ["crossplane", "azure", "iac", "infrastructure as code"] +--- + +Palette supports using [Crossplane](https://www.crossplane.io) to create and manage Kubernetes +[host clusters](../../glossary-all.md#host-cluster) across major infrastructure providers. This section guides you on +how to use Crossplane to deploy a Palette-managed Kubernetes cluster in Azure. + +## Prerequisites + +- A [Palette](https://www.spectrocloud.com/get-started) account and API key. Refer to the + [Create API Key](../../user-management/authentication/api-key/create-api-key.md) page for instructions on creating an + API key. +- A public [Azure](https://learn.microsoft.com/en-us/training/modules/create-an-azure-account) cloud account with the + required [permissions](../../clusters/public-cloud/azure/required-permissions.md). +- An SSH key pair available. Check out the [Create an Upload an SSH Key](../../clusters/cluster-management/ssh-keys.md) + page for guidance. +- The Azure account must be registered in Palette. Follow the + [Register and Manage Azure Cloud Account](../../clusters/public-cloud/azure/azure-cloud.md) guide to register your + account in Palette. +- A Kubernetes cluster with at least 2 GB of RAM. This guide uses a [kind](https://kind.sigs.k8s.io) cluster as an + example. Refer to the [kind Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/) to learn how to install kind + and create a cluster. +- The following software is required and must be installed: + - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) + - [Helm](https://helm.sh/docs/intro/install/) version v3.2.0 or later + - [curl](https://curl.se/docs/install.html) + - A text editor such as Vi or [Nano](https://www.nano-editor.org). This guide uses Vi as an example. + +## Deploy an Azure IaaS Cluster with Crossplane + +1. Open up a terminal session and set the kubectl context to your kind cluster. Replace `` with the + name of your cluster. + + ```bash + kubectl cluster-info --context + ``` + + ```text hideClipboard + Kubernetes control plane is running at https://127.0.0.1:65306 + CoreDNS is running at https://127.0.0.1:65306/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. + ``` + +2. Next, add the Crossplane Helm chart. This will enable Crossplane to install all its Kubernetes components. + + ```bash + helm repo add \ + crossplane-stable https://charts.crossplane.io/stable + helm repo update + ``` + + ```text hideClipboard + "crossplane-stable" has been added to your repositories + Hang tight while we grab the latest from your chart repositories... + ...Successfully got an update from the "ngrok" chart repository + ...Successfully got an update from the "crossplane-stable" chart repository + ...Successfully got an update from the "stable" chart repository + Update Complete. ⎈Happy Helming!⎈ + ``` + +3. Install the Crossplane components using the `helm install` command. + + ```bash + helm install crossplane \ + crossplane-stable/crossplane \ + --namespace crossplane-system \ + --create-namespace + ``` + + You can verify the installation with the `kubectl get pods` command. The output must contain two Crossplane pods in + the _Running_ status. + + ```bash + kubectl get pods --namespace crossplane-system + ``` + + ```text hideClipboard + NAME READY STATUS RESTARTS AGE + crossplane-869d89c8f8-7jc6c 1/1 Running 0 20s + crossplane-rbac-manager-784b496b-8mr6z 1/1 Running 0 20s + ``` + +4. Once Crossplane is installed, create a folder to store the Kubernetes configuration files. + + ```bash + mkdir crossplane-azure + ``` + +5. Use a text editor of your choice to create a file for the Palette Crossplane provider configuration. + + ```bash + vi crossplane-azure/provider-palette.yaml + ``` + +6. Paste the following Kubernetes configuration into the text editor window that opens. Press the `Escape` key, type + `:wq`, and press `Enter` to save the file and exit. + + ```yaml + apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: provider-palette + spec: + package: crossplane-contrib/provider-palette:v0.12.0 + ``` + +7. Issue the command below to install the Palette Crossplane provider. Crossplane will install the CRDs that allow you + to create Palette resources directly inside Kubernetes. + + ```bash + kubectl apply --filename crossplane-azure/provider-palette.yaml + ``` + + You can check the installation with the `kubectl get providers` command. + + ```bash + kubectl get providers + ``` + + ```text hideClipboard + NAME INSTALLED HEALTHY PACKAGE AGE + provider-palette True True crossplane-contrib/provider-palette:v0.12.0 61s + ``` + +8. Create a file to store a Kubernetes Secret containing your Palette API key. The Palette provider requires + credentials to create and manage resources. + + ```bash + vi crossplane-azure/secret-azure.yaml + ``` + +9. Paste the following Kubernetes configuration into the text editor window that opens. Replace `` with + your Palette API key and change the values of `project_name` and `host` according to your environment. Save the file + and exit. + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: palette-creds + namespace: crossplane-system + type: Opaque + stringData: + credentials: | + { + "api_key": "", + "project_name": "Default", + "host": "console.spectrocloud.com" + } + ``` + +10. Create the Kubernetes secret. + + ```shell + kubectl apply --filename crossplane-azure/secret-azure.yaml + ``` + +11. Next, create a file to store the `ProviderConfig` object. This object configures the Palette Crossplane provider + with the secret containing the Palette API key. + + ```bash + vi crossplane-azure/providerconfig-azure.yaml + ``` + +12. Paste the content below into the text editor window, save the file and exit. + + ```yaml + apiVersion: palette.crossplane.io/v1beta1 + kind: ProviderConfig + metadata: + name: default + spec: + credentials: + source: Secret + secretRef: + name: palette-creds + namespace: crossplane-system + key: credentials + ``` + +13. Create the Kubernetes `ProviderConfig` object. + + ```shell + kubectl apply --filename crossplane-azure/providerconfig-azure.yaml + ``` + +14. Once the Palette Crossplane provider is installed and set up, create a file to store the Azure + [cluster profile](../../profiles/cluster-profiles/cluster-profiles.md) configuration. + + ```bash + vi crossplane-azure/cluster-profile-azure.yaml + ``` + +15. Paste the Kubernetes configuration below into the text editor window that opens. Save the file and exit. + + ```yaml + apiVersion: cluster.palette.crossplane.io/v1alpha1 + kind: Profile + metadata: + name: azure-crossplane-cluster-profile + namespace: crossplane-system + spec: + forProvider: + cloud: "azure" + description: "Azure cluster profile" + type: "cluster" + pack: + - name: "ubuntu-azure" + tag: "22.04" + registryUid: "5eecc89d0b150045ae661cef" + values: + "# Spectro Golden images includes most of the hardening as per CIS Ubuntu Linux 22.04 LTS Server L1 v1.0.0 + standards\n# Uncomment below section to\n# 1. Include custom files to be copied over to the nodes + and/or\n# 2. Execute list of commands before or after kubeadm init/join is + executed\n#\n#kubeadmconfig:\n# preKubeadmCommands:\n# - echo \"Executing pre kube admin config + commands\"\n# - update-ca-certificates\n# - 'systemctl restart containerd; sleep 3'\n# - 'while [ ! -S + /var/run/containerd/containerd.sock ]; do echo \"Waiting for containerd...\"; sleep 1; + done'\n# postKubeadmCommands:\n# - echo \"Executing post kube admin config commands\"\n# files:\n# - + targetPath: /usr/local/share/ca-certificates/mycom.crt\n# targetOwner: + \"root:root\"\n# targetPermissions: \"0644\"\n# content: |\n# -----BEGIN + CERTIFICATE-----\n# MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl\n# cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE\n# AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA\n# nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz\n# qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN\n# fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2\n# 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL\n# 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK\n# jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB\n# /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki\n# HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y\n# g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ\n# ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6\n# b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56\n# IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc=\n# -----END + CERTIFICATE-----" + uid: "63fdd137199bafb6b290c674" + + - name: "kubernetes" + tag: "1.29.0" + registryUid: "5eecc89d0b150045ae661cef" + values: + "# spectrocloud.com/enabled-presets: Kube Controller Manager:loopback-ctrlmgr,Kube + Scheduler:loopback-scheduler\npack:\n content:\n images:\n - image: + registry.k8s.io/coredns/coredns:v1.10.1\n - image: registry.k8s.io/etcd:3.5.10-0\n - image: + registry.k8s.io/kube-apiserver:v1.29.0\n - image: + registry.k8s.io/kube-controller-manager:v1.29.0\n - image: + registry.k8s.io/kube-proxy:v1.29.0\n - image: registry.k8s.io/kube-scheduler:v1.29.0\n - image: + registry.k8s.io/pause:3.9\n - image: registry.k8s.io/pause:3.8\n k8sHardening: True\n #CIDR Range + for Pods in cluster\n # Note : This must not overlap with any of the host or service network\n podCIDR: + \"192.168.0.0/16\"\n #CIDR notation IP range from which to assign service cluster IPs\n # Note : This + must not overlap with any IP ranges assigned to nodes for pods.\n serviceClusterIpRange: + \"10.96.0.0/12\"\n # serviceDomain: \"cluster.local\"\n\n# KubeAdm customization for kubernetes + hardening. Below config will be ignored if k8sHardening property above is + disabled\nkubeadmconfig:\n apiServer:\n extraArgs:\n # Note : secure-port flag is used during + kubeadm init. Do not change this flag on a running cluster\n secure-port: + \"6443\"\n anonymous-auth: \"true\"\n profiling: \"false\"\n disable-admission-plugins: + \"AlwaysAdmit\"\n default-not-ready-toleration-seconds: + \"60\"\n default-unreachable-toleration-seconds: \"60\"\n enable-admission-plugins: + \"AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity\"\n admission-control-config-file: + \"/etc/kubernetes/pod-security-standard.yaml\"\n audit-log-path: + /var/log/apiserver/audit.log\n audit-policy-file: + /etc/kubernetes/audit-policy.yaml\n audit-log-maxage: \"30\"\n audit-log-maxbackup: + \"10\"\n audit-log-maxsize: \"100\"\n authorization-mode: RBAC,Node\n tls-cipher-suites: + \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\"\n extraVolumes:\n - + name: audit-log\n hostPath: /var/log/apiserver\n mountPath: + /var/log/apiserver\n pathType: DirectoryOrCreate\n - name: audit-policy\n hostPath: + /etc/kubernetes/audit-policy.yaml\n mountPath: /etc/kubernetes/audit-policy.yaml\n readOnly: + true\n pathType: File\n - name: pod-security-standard\n hostPath: + /etc/kubernetes/pod-security-standard.yaml\n mountPath: + /etc/kubernetes/pod-security-standard.yaml\n readOnly: true\n pathType: + File\n controllerManager:\n extraArgs:\n profiling: \"false\"\n terminated-pod-gc-threshold: + \"25\"\n use-service-account-credentials: \"true\"\n feature-gates: + \"RotateKubeletServerCertificate=true\"\n scheduler:\n extraArgs:\n profiling: + \"false\"\n kubeletExtraArgs:\n read-only-port : \"0\"\n event-qps: \"0\"\n feature-gates: + \"RotateKubeletServerCertificate=true\"\n protect-kernel-defaults: + \"true\"\n rotate-server-certificates: \"true\"\n tls-cipher-suites: + \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\"\n files:\n - + path: hardening/audit-policy.yaml\n targetPath: /etc/kubernetes/audit-policy.yaml\n targetOwner: + \"root:root\"\n targetPermissions: \"0600\"\n - path: hardening/90-kubelet.conf\n targetPath: + /etc/sysctl.d/90-kubelet.conf\n targetOwner: \"root:root\"\n targetPermissions: \"0600\"\n - + targetPath: /etc/kubernetes/pod-security-standard.yaml\n targetOwner: + \"root:root\"\n targetPermissions: \"0600\"\n content: |\n apiVersion: + apiserver.config.k8s.io/v1\n kind: AdmissionConfiguration\n plugins:\n - name: + PodSecurity\n configuration:\n apiVersion: + pod-security.admission.config.k8s.io/v1\n kind: + PodSecurityConfiguration\n defaults:\n enforce: + \"baseline\"\n enforce-version: \"v1.29\"\n audit: + \"baseline\"\n audit-version: \"v1.29\"\n warn: + \"restricted\"\n warn-version: \"v1.29\"\n audit: + \"restricted\"\n audit-version: \"v1.29\"\n exemptions:\n # Array of + authenticated usernames to exempt.\n usernames: []\n # Array of runtime class + names to exempt.\n runtimeClasses: []\n # Array of namespaces to + exempt.\n namespaces: [kube-system]\n\n preKubeadmCommands:\n # For enabling + 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required\n - 'echo \"====> + Applying kernel parameters for Kubelet\"'\n - 'sysctl -p + /etc/sysctl.d/90-kubelet.conf'\n #postKubeadmCommands:\n #- 'echo \"List of post kubeadm commands to + be executed\"'\n\n# Client configuration to add OIDC based authentication flags in + kubeconfig\n#clientConfig:\n #oidc-issuer-url: \"{{ + .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}\"\n #oidc-client-id: \"{{ + .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}\"\n #oidc-client-secret: + oidc client secret\n #oidc-extra-scope: profile,email" + uid: "661cc50b0aa79b77ade281f9" + + - name: "cni-calico-azure" + tag: "3.27.0" + uid: "661cc4f20aa79b75532974cb" + registryUid: "5eecc89d0b150045ae661cef" + values: + "pack:\n content:\n images:\n - image: + gcr.io/spectro-images-public/packs/calico/3.27.0/cni:v3.27.0\n - image: + gcr.io/spectro-images-public/packs/calico/3.27.0/node:v3.27.0\n - image: + gcr.io/spectro-images-public/packs/calico/3.27.0/kube-controllers:v3.27.0\n\nmanifests:\n calico:\n images:\n cni: + \"\"\n node: \"\"\n kubecontroller: \"\"\n # IPAM type to use. Supported types are + calico-ipam, host-local\n ipamType: \"calico-ipam\"\n\n calico_ipam:\n assign_ipv4: + true\n assign_ipv6: false\n\n # Should be one of CALICO_IPV4POOL_IPIP or + CALICO_IPV4POOL_VXLAN \n encapsulationType: \"CALICO_IPV4POOL_VXLAN\"\n\n # Should be one of + Always, CrossSubnet, Never\n encapsulationMode: \"Always\"\n\n env:\n # Additional env + variables for calico-node\n calicoNode:\n #IPV6: \"autodetect\"\n #FELIX_IPV6SUPPORT: + \"true\"\n #CALICO_IPV6POOL_NAT_OUTGOING: \"true\"\n #CALICO_IPV4POOL_CIDR: + \"192.168.0.0/16\"\n #IP_AUTODETECTION_METHOD: \"first-found\"\n\n # Additional env variables + for calico-kube-controller deployment\n calicoKubeControllers:\n #LOG_LEVEL: + \"info\"\n #SYNC_NODE_LABELS: \"true\"" + + - name: "csi-azure" + tag: "1.29.1" + uid: "659a47a28b7673a6de0ac640" + registryUid: "5eecc89d0b150045ae661cef" + values: + "pack:\n content:\n images:\n - image: + mcr.microsoft.com/oss/kubernetes-csi/azuredisk-csi:v1.29.1\n - image: + mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v3.5.0\n - image: + mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.3.0\n - image: + mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.8.0\n - image: + mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.10.0\n - image: + mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.8.0\n charts:\n - repo: + https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts\n name: + azuredisk-csi-driver\n version: 1.29.1\n namespace: + \"kube-system\"\n\ncharts:\n azuredisk-csi-driver:\n storageclass:\n # Azure storage account Sku + tier. Default is empty\n storageaccounttype: \"StandardSSD_LRS\"\n\n # Possible values are + shared (default), dedicated, and managed\n kind: \"managed\"\n\n #Allowed reclaim policies are + Delete, Retain\n reclaimPolicy: \"Delete\"\n\n #Toggle for Volume + expansion\n allowVolumeExpansion: \"true\"\n\n #Toggle for Default class\n isDefaultClass: + \"true\"\n\n #Supported binding modes are Immediate, WaitForFirstConsumer\n #Setting binding + mode to WaitForFirstConsumer, so that the volumes gets created in the same AZ as that of the + pods\n volumeBindingMode: \"WaitForFirstConsumer\"\n\n image:\n baseRepo: + mcr.microsoft.com\n azuredisk:\n repository: /oss/kubernetes-csi/azuredisk-csi\n tag: + v1.29.1\n pullPolicy: IfNotPresent\n csiProvisioner:\n repository: + /oss/kubernetes-csi/csi-provisioner\n tag: v3.5.0\n pullPolicy: + IfNotPresent\n csiAttacher:\n repository: /oss/kubernetes-csi/csi-attacher\n tag: + v4.3.0\n pullPolicy: IfNotPresent\n csiResizer:\n repository: + /oss/kubernetes-csi/csi-resizer\n tag: v1.8.0\n pullPolicy: + IfNotPresent\n livenessProbe:\n repository: /oss/kubernetes-csi/livenessprobe\n tag: + v2.10.0\n pullPolicy: IfNotPresent\n nodeDriverRegistrar:\n repository: + /oss/kubernetes-csi/csi-node-driver-registrar\n tag: v2.8.0\n pullPolicy: + IfNotPresent\n\n serviceAccount:\n create: true # When true, service accounts will be created for + you. Set to false if you want to use your own.\n controller: csi-azuredisk-controller-sa # Name of + Service Account to be created or used\n node: csi-azuredisk-node-sa # Name of Service Account to be + created or used\n snapshotController: csi-snapshot-controller-sa # Name of Service Account to be + created or used\n\n rbac:\n create: true\n name: azuredisk\n\n controller:\n name: + csi-azuredisk-controller\n cloudConfigSecretName: + azure-cloud-provider\n cloudConfigSecretNamespace: kube-system\n allowEmptyCloudConfig: + false\n enableTrafficManager: false\n trafficManagerPort: 7788\n replicas: + 2\n metricsPort: 29604\n livenessProbe:\n healthPort: 29602\n runOnMaster: + false\n runOnControlPlane: false\n disableAvailabilitySetNodes: false\n vmType: + \"\"\n provisionerWorkerThreads: 100\n attacherWorkerThreads: 1000\n vmssCacheTTLInSeconds: + -1\n logLevel: 5\n otelTracing:\n enabled: false\n otelServiceName: + csi-azuredisk-controller\n otelExporterEndpoint: + \"http://localhost:4317\"\n tolerations:\n - key: + \"node-role.kubernetes.io/master\"\n operator: \"Exists\"\n effect: + \"NoSchedule\"\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: + \"Exists\"\n effect: \"NoSchedule\"\n - key: + \"node-role.kubernetes.io/control-plane\"\n operator: \"Exists\"\n effect: + \"NoSchedule\"\n hostNetwork: true # this setting could be disabled if controller does not depend on + MSI setting\n labels: {}\n annotations: {}\n podLabels: {}\n podAnnotations: + {}\n nodeSelector: {}\n affinity: + {}\n resources:\n csiProvisioner:\n limits:\n memory: + 500Mi\n requests:\n cpu: 10m\n memory: + 20Mi\n csiAttacher:\n limits:\n memory: + 500Mi\n requests:\n cpu: 10m\n memory: + 20Mi\n csiResizer:\n limits:\n memory: + 500Mi\n requests:\n cpu: 10m\n memory: + 20Mi\n csiSnapshotter:\n limits:\n memory: + 200Mi\n requests:\n cpu: 10m\n memory: + 20Mi\n livenessProbe:\n limits:\n memory: + 100Mi\n requests:\n cpu: 10m\n memory: + 20Mi\n azuredisk:\n limits:\n memory: + 500Mi\n requests:\n cpu: 10m\n memory: + 20Mi\n\n node:\n cloudConfigSecretName: azure-cloud-provider\n cloudConfigSecretNamespace: + kube-system\n supportZone: true\n allowEmptyCloudConfig: true\n getNodeIDFromIMDS: + false\n maxUnavailable: 1\n logLevel: 5\n livenessProbe:\n healthPort: + 29603\n\n snapshot:\n enabled: false\n name: + csi-snapshot-controller\n image:\n csiSnapshotter:\n repository: + /oss/kubernetes-csi/csi-snapshotter\n tag: v6.2.2\n pullPolicy: + IfNotPresent\n csiSnapshotController:\n repository: + /oss/kubernetes-csi/snapshot-controller\n tag: v6.2.2\n pullPolicy: + IfNotPresent\n snapshotController:\n name: csi-snapshot-controller\n replicas: + 2\n labels: {}\n annotations: {}\n podLabels: {}\n podAnnotations: + {}\n resources:\n limits:\n memory: + 300Mi\n requests:\n cpu: 10m\n memory: + 20Mi\n VolumeSnapshotClass:\n enabled: false\n name: + csi-azuredisk-vsc\n deletionPolicy: Delete\n parameters:\n incremental: '\"true\"' + # available values: \"true\", \"false\" (\"true\" by default for Azure Public Cloud, and \"false\" by + default for Azure Stack Cloud)\n resourceGroup: \"\" # available values: EXISTING RESOURCE GROUP + (If not specified, snapshot will be stored in the same resource group as source Azure + disk)\n tags: \"\" # tag format: 'key1=val1,key2=val2'\n additionalLabels: + {}\n\n feature:\n enableFSGroupPolicy: true\n\n driver:\n name: + disk.csi.azure.com\n # maximum number of attachable volumes per node,\n # maximum number is + defined according to node instance type by default(-1)\n volumeAttachLimit: + -1\n customUserAgent: \"\"\n userAgentSuffix: \"OSS-helm\"\n azureGoSDKLogLevel: \"\" # + available values: \"\"(no logs), DEBUG, INFO, WARNING, ERROR\n httpsProxy: \"\"\n httpProxy: + \"\"\n noProxy: \"\"\n\n linux:\n enabled: true\n dsName: csi-azuredisk-node # daemonset + name\n kubelet: /var/lib/kubelet\n distro: debian # available values: debian, + fedora\n enablePerfOptimization: true\n enableRegistrationProbe: + true\n otelTracing:\n enabled: false\n otelServiceName: + csi-azuredisk-node\n otelExporterEndpoint: \"http://localhost:4317\"\n tolerations:\n - + operator: \"Exists\"\n hostNetwork: true # this setting could be disabled if perfProfile is + `none`\n getNodeInfoFromLabels: false # get node info from node labels instead of IMDS\n labels: + {}\n annotations: {}\n podLabels: {}\n podAnnotations: {}\n nodeSelector: + {}\n affinity: + {}\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - + matchExpressions:\n - key: type\n operator: + NotIn\n values:\n - + virtual-kubelet\n resources:\n livenessProbe:\n limits:\n memory: + 100Mi\n requests:\n cpu: 10m\n memory: + 20Mi\n nodeDriverRegistrar:\n limits:\n memory: + 100Mi\n requests:\n cpu: 10m\n memory: + 20Mi\n azuredisk:\n limits:\n memory: + 200Mi\n requests:\n cpu: 10m\n memory: 20Mi\n\n windows:\n enabled: + true\n useHostProcessContainers: false\n dsName: csi-azuredisk-node-win # daemonset + name\n kubelet: 'C:\\var\\lib\\kubelet'\n getNodeInfoFromLabels: false # get node info from node + labels instead of IMDS\n enableRegistrationProbe: true\n otelTracing:\n enabled: + false\n otelServiceName: csi-azuredisk-node-win\n otelExporterEndpoint: + \"http://localhost:4317\"\n tolerations:\n - key: + \"node.kubernetes.io/os\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n labels: + {}\n annotations: {}\n podLabels: {}\n podAnnotations: {}\n nodeSelector: + {}\n affinity: + {}\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - + matchExpressions:\n - key: type\n operator: + NotIn\n values:\n - + virtual-kubelet\n resources:\n livenessProbe:\n limits:\n memory: + 150Mi\n requests:\n cpu: 10m\n memory: + 40Mi\n nodeDriverRegistrar:\n limits:\n memory: + 150Mi\n requests:\n cpu: 30m\n memory: + 40Mi\n azuredisk:\n limits:\n memory: + 200Mi\n requests:\n cpu: 10m\n memory: 40Mi\n\n cloud: + AzurePublicCloud\n\n ## Reference to one or more secrets to be used when pulling images\n ## ref: + https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n ##\n imagePullSecrets: + []\n # - name: \"image-pull-secret\"\n\n workloadIdentity:\n clientID: \"\"\n # [optional] + If the AAD application or user-assigned managed identity is not in the same tenant as the cluster\n # + then set tenantID with the application or user-assigned managed identity tenant ID\n tenantID: + \"\"\n\n azureCredentialFileConfigMap: azure-cred-file" + + providerConfigRef: + name: default + ``` + + The cluster profile contains the following core infrastructure layers. + + | **Pack Type** | **Registry** | **Pack Name** | **Pack Version** | + | ------------- | ------------ | ------------------ | ---------------- | + | OS | Public Repo | `ubuntu-azure` | `22.04` | + | Kubernetes | Public Repo | `kubernetes` | `1.29.0` | + | Network | Public Repo | `cni-calico-azure` | `3.27.0` | + | Storage | Public Repo | `csi-azure` | `1.29.1` | + + :::tip + + If you want to use different packs in your cluster profile, you can access the Palette UI and simulate creating a + cluster profile to gather the pack's required values. During the cluster profile creation, click the API button to + display the API payload. You will find the values of each packs' `name`, `tag`, `uid`, `registryUid`, and `values` + parameters. + + ::: + +16. Create the cluster profile. + + ```bash + kubectl apply --filename crossplane-azure/cluster-profile-azure.yaml + ``` + +17. Issue the commands below to get the ID of the cluster profile once its creation is complete. + + ```bash + kubectl wait --for=condition=Ready profile.cluster.palette.crossplane.io/azure-crossplane-cluster-profile + clusterProfileId=$(kubectl get profile.cluster.palette.crossplane.io azure-crossplane-cluster-profile --output jsonpath='{.status.atProvider.id}') + echo Cluster Profile ID: $clusterProfileId + ``` + + ```text hideClipboard + profile.cluster.palette.crossplane.io/azure-crossplane-cluster-profile condition met + Cluster Profile ID: 6638e0eb8f42b22cb4d1d1bb + ``` + +18. Next, get the ID of your Azure cloud account registered in Palette by invoking the `cloudaccounts` Palette API. + Replace `` with your Palette API key and `` with the name under which you + registered your Azure account in Palette. + + ```bash + curl --location --request GET 'https://api.spectrocloud.com/v1/cloudaccounts/azure' \ + -H 'Accept: application/json' \ + -H 'ApiKey: ' | jq '.items[] | select(.metadata.name == "") | .metadata.uid' + ``` + + ```text hideClipboard + "645981f0ab3ab8105fabc940" + ``` + + Copy the API response containing your Azure cloud account ID. + +19. Create a file to store the Azure IaaS cluster configuration. + + ```bash + vi crossplane-azure/cluster-azure.yaml + ``` + +20. Paste the cluster configuration displayed below into the text editor window that opens: + + - Replace `` with the content of your public SSH key. + - Replace `` with your resource group name, and `` with your Azure + subscription ID. + - Replace `` and `` with the IDs obtained in steps **17** and **18** of this + guide. + + Optionally, you can edit the region, availability zone, instance type, and number of nodes of your cluster according + to your workload. + + Once you are done making the alterations, save and exit the file. + + ```yaml + apiVersion: cluster.palette.crossplane.io/v1alpha1 + kind: Azure + metadata: + name: azure-crossplane-cluster + namespace: crossplane-system + spec: + forProvider: + cloudConfig: + - sshKey: "" + region: "eastus" + resourceGroup: "" + subscriptionId: "" + machinePool: + - azs: + - "1" + count: 1 + instanceType: Standard_A8_v2 + name: machinepool1 + - azs: + - "1" + count: 1 + instanceType: Standard_A8_v2 + name: controlplanepool + controlPlane: true + clusterProfile: + - id: + cloudAccountId: + providerConfigRef: + name: default + ``` + +21. Create the Azure IaaS cluster. + + ```bash + kubectl apply --filename crossplane-azure/cluster-azure.yaml + ``` + +22. Wait for the cluster to be created. The cluster provisioning might take up to 20 minutes. + + ```bash + kubectl wait --for=condition=Ready azure.cluster.palette.crossplane.io/azure-crossplane-cluster --timeout=1h + ``` + + Once ready, you should receive an output similar to the following. + + ```text hideClipboard + azure.cluster.palette.crossplane.io/azure-crossplane-cluster condition met + ``` + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com/). + +2. From the left **Main Menu**, click **Clusters**. + +3. Verify the deployed cluster named `azure-crossplane-cluster` is displayed and has the **Running** and **Healthy** + status. diff --git a/docs/docs-content/automation/crossplane/deploy-cluster-gcp-crossplane.md b/docs/docs-content/automation/crossplane/deploy-cluster-gcp-crossplane.md new file mode 100644 index 0000000000..deaebe6a27 --- /dev/null +++ b/docs/docs-content/automation/crossplane/deploy-cluster-gcp-crossplane.md @@ -0,0 +1,479 @@ +--- +sidebar_label: "Deploy a GCP IaaS Cluster with Crossplane" +title: "Deploy a GCP IaaS Cluster with Crossplane" +description: "Learn how to deploy a GCP IaaS cluster using the Spectro Cloud Crossplane provider." +hide_table_of_contents: false +sidebar_position: 30 +tags: ["crossplane", "gcp", "iac", "infrastructure as code"] +--- + +Palette supports using [Crossplane](https://www.crossplane.io) to create and manage Kubernetes +[host clusters](../../glossary-all.md#host-cluster) across major infrastructure providers. This section guides you on +how to use Crossplane to deploy a Palette-managed Kubernetes cluster in GCP. + +## Prerequisites + +- A [Palette](https://www.spectrocloud.com/get-started) account and API key. Refer to the + [Create API Key](../../user-management/authentication/api-key/create-api-key.md) page for instructions on creating an + API key. +- A public [GCP](https://cloud.google.com/iam/docs/creating-managing-service-accounts) service account with the required + [roles](../../clusters/public-cloud/gcp/add-gcp-accounts.md#prerequisites). +- An SSH key pair available. Check out the [Create an Upload an SSH Key](../../clusters/cluster-management/ssh-keys.md) + page for guidance. +- The GCP account must be registered in Palette. Follow the + [Register and Manage GCP Accounts](../../clusters/public-cloud/gcp/add-gcp-accounts.md) guide to register your account + in Palette. +- A Kubernetes cluster with at least 2 GB of RAM. This guide uses a [kind](https://kind.sigs.k8s.io) cluster as an + example. Refer to the [kind Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/) to learn how to install kind + and create a cluster. +- The following software is required and must be installed: + - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) + - [Helm](https://helm.sh/docs/intro/install/) version v3.2.0 or later + - [curl](https://curl.se/docs/install.html) + - A text editor such as Vi or [Nano](https://www.nano-editor.org). This guide uses Vi as an example. + +## Deploy a GCP IaaS Cluster with Crossplane + +1. Open up a terminal session and set the kubectl context to your kind cluster. Replace `` with the + name of your cluster. + + ```bash + kubectl cluster-info --context + ``` + + ```text hideClipboard + Kubernetes control plane is running at https://127.0.0.1:65306 + CoreDNS is running at https://127.0.0.1:65306/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. + ``` + +2. Next, add the Crossplane Helm chart. This will enable Crossplane to install all its Kubernetes components. + + ```bash + helm repo add \ + crossplane-stable https://charts.crossplane.io/stable + helm repo update + ``` + + ```text hideClipboard + "crossplane-stable" has been added to your repositories + Hang tight while we grab the latest from your chart repositories... + ...Successfully got an update from the "ngrok" chart repository + ...Successfully got an update from the "crossplane-stable" chart repository + ...Successfully got an update from the "stable" chart repository + Update Complete. ⎈Happy Helming!⎈ + ``` + +3. Install the Crossplane components using the `helm install` command. + + ```bash + helm install crossplane \ + crossplane-stable/crossplane \ + --namespace crossplane-system \ + --create-namespace + ``` + + You can verify the installation with the `kubectl get pods` command. The output must contain two Crossplane pods in + the _Running_ status. + + ```bash + kubectl get pods --namespace crossplane-system + ``` + + ```text hideClipboard + NAME READY STATUS RESTARTS AGE + crossplane-869d89c8f8-7jc6c 1/1 Running 0 20s + crossplane-rbac-manager-784b496b-8mr6z 1/1 Running 0 20s + ``` + +4. Once Crossplane is installed, create a folder to store the Kubernetes configuration files. + + ```bash + mkdir crossplane-gcp + ``` + +5. Use a text editor of your choice to create a file for the Palette Crossplane provider configuration. + + ```bash + vi crossplane-gcp/provider-palette.yaml + ``` + +6. Paste the following Kubernetes configuration into the text editor window that opens. Press the `Escape` key, type + `:wq`, and press `Enter` to save the file and exit. + + ```yaml + apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: provider-palette + spec: + package: crossplane-contrib/provider-palette:v0.12.0 + ``` + +7. Issue the command below to install the Palette Crossplane provider. Crossplane will install the CRDs that allow you + to create Palette resources directly inside Kubernetes. + + ```bash + kubectl apply --filename crossplane-gcp/provider-palette.yaml + ``` + + You can check the installation with the `kubectl get providers` command. + + ```bash + kubectl get providers + ``` + + ```text hideClipboard + NAME INSTALLED HEALTHY PACKAGE AGE + provider-palette True True crossplane-contrib/provider-palette:v0.12.0 61s + ``` + +8. Create a file to store a Kubernetes Secret containing your Palette API key. The Palette provider requires + credentials to create and manage resources. + + ```bash + vi crossplane-gcp/secret-gcp.yaml + ``` + +9. Paste the following Kubernetes configuration into the text editor window that opens. Replace `` with + your Palette API key and change the values of `project_name` and `host` according to your environment. Save the file + and exit. + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: palette-creds + namespace: crossplane-system + type: Opaque + stringData: + credentials: | + { + "api_key": "", + "project_name": "Default", + "host": "console.spectrocloud.com" + } + ``` + +10. Create the Kubernetes secret. + + ```shell + kubectl apply --filename crossplane-gcp/secret-gcp.yaml + ``` + +11. Next, create a file to store the `ProviderConfig` object. This object configures the Palette Crossplane provider + with the secret containing the Palette API key. + + ```bash + vi crossplane-gcp/providerconfig-gcp.yaml + ``` + +12. Paste the content below into the text editor window, save the file and exit. + + ```yaml + apiVersion: palette.crossplane.io/v1beta1 + kind: ProviderConfig + metadata: + name: default + spec: + credentials: + source: Secret + secretRef: + name: palette-creds + namespace: crossplane-system + key: credentials + ``` + +13. Create the Kubernetes `ProviderConfig` object. + + ```shell + kubectl apply --filename crossplane-gcp/providerconfig-gcp.yaml + ``` + +14. Once the Palette Crossplane provider is installed and set up, create a file to store the GCP + [cluster profile](../../profiles/cluster-profiles/cluster-profiles.md) configuration. + + ```bash + vi crossplane-gcp/cluster-profile-gcp.yaml + ``` + +15. Paste the Kubernetes configuration below into the text editor window that opens. Save the file and exit. + + ```yaml + apiVersion: cluster.palette.crossplane.io/v1alpha1 + kind: Profile + metadata: + name: gcp-crossplane-cluster-profile + namespace: crossplane-system + spec: + forProvider: + cloud: "gcp" + description: "GCP cluster profile" + type: "cluster" + pack: + - name: "ubuntu-gcp" + tag: "22.04" + registryUid: "5eecc89d0b150045ae661cef" + values: + "# Spectro Golden images includes most of the hardening as per CIS Ubuntu Linux 22.04 LTS Server L1 v1.0.0 + standards\n\n# Uncomment below section to\n# 1. Include custom files to be copied over to the nodes + and/or\n# 2. Execute list of commands before or after kubeadm init/join is + executed\n#\n#kubeadmconfig:\n# preKubeadmCommands:\n# - echo \"Executing pre kube admin config + commands\"\n# - update-ca-certificates\n# - 'systemctl restart containerd; sleep 3'\n# - 'while [ ! -S + /var/run/containerd/containerd.sock ]; do echo \"Waiting for containerd...\"; sleep 1; + done'\n# postKubeadmCommands:\n# - echo \"Executing post kube admin config commands\"\n# files:\n# - + targetPath: /usr/local/share/ca-certificates/mycom.crt\n# targetOwner: + \"root:root\"\n# targetPermissions: \"0644\"\n# content: |\n# -----BEGIN + CERTIFICATE-----\n# MIICyzCCAbOgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl\n# cm5ldGVzMB4XDTIwMDkyMjIzNDMyM1oXDTMwMDkyMDIzNDgyM1owFTETMBEGA1UE\n# AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMdA\n# nZYs1el/6f9PgV/aO9mzy7MvqaZoFnqO7Qi4LZfYzixLYmMUzi+h8/RLPFIoYLiz\n# qiDn+P8c9I1uxB6UqGrBt7dkXfjrUZPs0JXEOX9U/6GFXL5C+n3AUlAxNCS5jobN\n# fbLt7DH3WoT6tLcQefTta2K+9S7zJKcIgLmBlPNDijwcQsbenSwDSlSLkGz8v6N2\n# 7SEYNCV542lbYwn42kbcEq2pzzAaCqa5uEPsR9y+uzUiJpv5tDHUdjbFT8tme3vL\n# 9EdCPODkqtMJtCvz0hqd5SxkfeC2L+ypaiHIxbwbWe7GtliROvz9bClIeGY7gFBK\n# jZqpLdbBVjo0NZBTJFUCAwEAAaMmMCQwDgYDVR0PAQH/BAQDAgKkMBIGA1UdEwEB\n# /wQIMAYBAf8CAQAwDQYJKoZIhvcNAQELBQADggEBADIKoE0P+aVJGV9LWGLiOhki\n# HFv/vPPAQ2MPk02rLjWzCaNrXD7aPPgT/1uDMYMHD36u8rYyf4qPtB8S5REWBM/Y\n# g8uhnpa/tGsaqO8LOFj6zsInKrsXSbE6YMY6+A8qvv5lPWpJfrcCVEo2zOj7WGoJ\n# ixi4B3fFNI+wih8/+p4xW+n3fvgqVYHJ3zo8aRLXbXwztp00lXurXUyR8EZxyR+6\n# b+IDLmHPEGsY9KOZ9VLLPcPhx5FR9njFyXvDKmjUMJJgUpRkmsuU1mCFC+OHhj56\n# IkLaSJf6z/p2a3YjTxvHNCqFMLbJ2FvJwYCRzsoT2wm2oulnUAMWPI10vdVM+Nc=\n# -----END + CERTIFICATE-----" + uid: "63fdd138199bafb6b657b7e6" + + - name: "kubernetes" + tag: "1.28.3" + registryUid: "5eecc89d0b150045ae661cef" + values: + "# spectrocloud.com/enabled-presets: Kube Controller Manager:loopback-ctrlmgr,Kube + Scheduler:loopback-scheduler\npack:\n k8sHardening: True\n #CIDR Range for Pods in cluster\n # Note : + This must not overlap with any of the host or service network\n podCIDR: \"192.168.0.0/16\"\n #CIDR + notation IP range from which to assign service cluster IPs\n # Note : This must not overlap with any IP + ranges assigned to nodes for pods.\n serviceClusterIpRange: \"10.96.0.0/12\"\n # serviceDomain: + \"cluster.local\"\n\n# KubeAdm customization for kubernetes hardening. Below config will be ignored if + k8sHardening property above is disabled\nkubeadmconfig:\n apiServer:\n extraArgs:\n # Note : + secure-port flag is used during kubeadm init. Do not change this flag on a running + cluster\n secure-port: \"6443\"\n anonymous-auth: \"true\"\n profiling: + \"false\"\n disable-admission-plugins: \"AlwaysAdmit\"\n default-not-ready-toleration-seconds: + \"60\"\n default-unreachable-toleration-seconds: \"60\"\n enable-admission-plugins: + \"AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity\"\n admission-control-config-file: + \"/etc/kubernetes/pod-security-standard.yaml\"\n audit-log-path: + /var/log/apiserver/audit.log\n audit-policy-file: + /etc/kubernetes/audit-policy.yaml\n audit-log-maxage: \"30\"\n audit-log-maxbackup: + \"10\"\n audit-log-maxsize: \"100\"\n authorization-mode: RBAC,Node\n tls-cipher-suites: + \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\"\n extraVolumes:\n - + name: audit-log\n hostPath: /var/log/apiserver\n mountPath: + /var/log/apiserver\n pathType: DirectoryOrCreate\n - name: audit-policy\n hostPath: + /etc/kubernetes/audit-policy.yaml\n mountPath: /etc/kubernetes/audit-policy.yaml\n readOnly: + true\n pathType: File\n - name: pod-security-standard\n hostPath: + /etc/kubernetes/pod-security-standard.yaml\n mountPath: + /etc/kubernetes/pod-security-standard.yaml\n readOnly: true\n pathType: + File\n controllerManager:\n extraArgs:\n profiling: \"false\"\n terminated-pod-gc-threshold: + \"25\"\n use-service-account-credentials: \"true\"\n feature-gates: + \"RotateKubeletServerCertificate=true\"\n scheduler:\n extraArgs:\n profiling: + \"false\"\n kubeletExtraArgs:\n read-only-port : \"0\"\n event-qps: \"0\"\n feature-gates: + \"RotateKubeletServerCertificate=true\"\n protect-kernel-defaults: + \"true\"\n rotate-server-certificates: \"true\"\n tls-cipher-suites: + \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\"\n files:\n - + path: hardening/audit-policy.yaml\n targetPath: /etc/kubernetes/audit-policy.yaml\n targetOwner: + \"root:root\"\n targetPermissions: \"0600\"\n - path: hardening/90-kubelet.conf\n targetPath: + /etc/sysctl.d/90-kubelet.conf\n targetOwner: \"root:root\"\n targetPermissions: \"0600\"\n - + targetPath: /etc/kubernetes/pod-security-standard.yaml\n targetOwner: + \"root:root\"\n targetPermissions: \"0600\"\n content: |\n apiVersion: + apiserver.config.k8s.io/v1\n kind: AdmissionConfiguration\n plugins:\n - name: + PodSecurity\n configuration:\n apiVersion: + pod-security.admission.config.k8s.io/v1\n kind: + PodSecurityConfiguration\n defaults:\n enforce: + \"baseline\"\n enforce-version: \"v1.28\"\n audit: + \"baseline\"\n audit-version: \"v1.28\"\n warn: + \"restricted\"\n warn-version: \"v1.28\"\n audit: + \"restricted\"\n audit-version: \"v1.28\"\n exemptions:\n # Array of + authenticated usernames to exempt.\n usernames: []\n # Array of runtime class + names to exempt.\n runtimeClasses: []\n # Array of namespaces to + exempt.\n namespaces: [kube-system]\n\n preKubeadmCommands:\n # For enabling + 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required\n - 'echo \"====> + Applying kernel parameters for Kubelet\"'\n - 'sysctl -p + /etc/sysctl.d/90-kubelet.conf'\n #postKubeadmCommands:\n #- 'echo \"List of post kubeadm commands to + be executed\"'\n\n# Client configuration to add OIDC based authentication flags in + kubeconfig\n#clientConfig:\n #oidc-issuer-url: \"{{ + .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}\"\n #oidc-client-id: \"{{ + .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}\"\n #oidc-client-secret: + 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv\n #oidc-extra-scope: profile,email" + uid: "659a47b28b7673a7dff73658" + + - name: "cni-calico" + tag: "3.27.0" + uid: "661cc4f20aa79b7543637fa9" + registryUid: "5eecc89d0b150045ae661cef" + values: + "pack:\n content:\n images:\n - image: + gcr.io/spectro-images-public/packs/calico/3.27.0/cni:v3.27.0\n - image: + gcr.io/spectro-images-public/packs/calico/3.27.0/node:v3.27.0\n - image: + gcr.io/spectro-images-public/packs/calico/3.27.0/kube-controllers:v3.27.0\n\nmanifests:\n calico:\n images:\n cni: + \"\"\n node: \"\"\n kubecontroller: \"\"\n # IPAM type to use. Supported types are + calico-ipam, host-local\n ipamType: \"calico-ipam\"\n\n calico_ipam:\n assign_ipv4: + true\n assign_ipv6: false\n\n # Should be one of CALICO_IPV4POOL_IPIP or + CALICO_IPV4POOL_VXLAN \n encapsulationType: \"CALICO_IPV4POOL_IPIP\"\n\n # Should be one of Always, + CrossSubnet, Never\n encapsulationMode: \"Always\"\n\n env:\n # Additional env variables for + calico-node\n calicoNode:\n #IPV6: \"autodetect\"\n #FELIX_IPV6SUPPORT: + \"true\"\n #CALICO_IPV6POOL_NAT_OUTGOING: \"true\"\n #CALICO_IPV4POOL_CIDR: + \"192.168.0.0/16\"\n #IP_AUTODETECTION_METHOD: \"first-found\"\n\n # Additional env variables + for calico-kube-controller deployment\n calicoKubeControllers:\n #LOG_LEVEL: + \"info\"\n #SYNC_NODE_LABELS: \"true\"" + + - name: "csi-gcp-driver" + tag: "1.12.4" + uid: "661cc4f80aa79b75d4440676" + registryUid: "5eecc89d0b150045ae661cef" + values: + "pack:\n content:\n images:\n - image: + gcr.io/spectro-images-public/packs/csi-gcp-driver/1.12.4/csi-provisioner:v3.6.2\n - image: + gcr.io/spectro-images-public/packs/csi-gcp-driver/1.12.4/csi-attacher:v4.4.2\n - image: + gcr.io/spectro-images-public/packs/csi-gcp-driver/1.12.4/csi-resizer:v1.9.2\n - image: + gcr.io/spectro-images-public/packs/csi-gcp-driver/1.12.4/csi-snapshotter:v6.3.2\n - image: + gcr.io/spectro-images-public/packs/csi-gcp-driver/1.12.4/gcp-compute-persistent-disk-csi-driver:v1.12.4\n - + image: + gcr.io/spectro-images-public/packs/csi-gcp-driver/1.12.4/csi-node-driver-registrar:v2.9.2\n \nmanifests:\n storageclass:\n #Flag + to denote if this should be the default storage class for dynamic provisioning\n isDefaultClass: + \"true\"\n\n parameters:\n #Possible values : pd-standard or pd-ssd\n type: + \"pd-standard\"\n \n #Possible values: none or regional-pd\n replication-type: + \"none\"\n \n #Supported binding modes are Immediate, + WaitForFirstConsumer\n volumeBindingMode: \"WaitForFirstConsumer\"\n\n #Set this flag to true to + enable volume expansion\n allowVolumeExpansion: true\n\n #Allowed reclaim policies are Delete, + Retain\n reclaimPolicy: \"Delete\"\n\n #allowedTopologies\n zones:\n #- + us-central1-a\n #- us-central1-b\n\n k8sVersion: \"{{ .spectro.system.kubernetes.version + }}\"\n\n controller:\n args:\n csiProvisioner:\n - \"--v=5\"\n - + \"--csi-address=/csi/csi.sock\"\n - \"--feature-gates=Topology=true\"\n - + \"--http-endpoint=:22011\"\n - \"--leader-election-namespace=$(PDCSI_NAMESPACE)\"\n - + \"--timeout=250s\"\n - \"--extra-create-metadata\"\n #- + \"--run-controller-service=false\" # disable the controller service of the CSI driver\n #- + \"--run-node-service=false\" # disable the node service of the CSI driver\n - + \"--leader-election\"\n - \"--default-fstype=ext4\"\n - + \"--controller-publish-readonly=true\"\n \n csiAttacher:\n - \"--v=5\"\n - + \"--csi-address=/csi/csi.sock\"\n - \"--http-endpoint=:22012\"\n - + \"--leader-election\"\n - \"--leader-election-namespace=$(PDCSI_NAMESPACE)\"\n - + \"--timeout=250s\"\n\n csiResizer:\n - \"--v=5\"\n - + \"--csi-address=/csi/csi.sock\"\n - \"--http-endpoint=:22013\"\n - + \"--leader-election\"\n - \"--leader-election-namespace=$(PDCSI_NAMESPACE)\"\n - + \"--handle-volume-inuse-error=false\"\n\n csiSnapshotter:\n - \"--v=5\"\n - + \"--csi-address=/csi/csi.sock\"\n - \"--metrics-address=:22014\"\n - + \"--leader-election\"\n - \"--leader-election-namespace=$(PDCSI_NAMESPACE)\"\n - + \"--timeout=300s\"\n\n csiDriver:\n - \"--v=5\"\n - \"--endpoint=unix:/csi/csi.sock\"" + + providerConfigRef: + name: default + ``` + + The cluster profile contains the following core infrastructure layers. + + | **Pack Type** | **Registry** | **Pack Name** | **Pack Version** | + | ------------- | ------------ | ---------------- | ---------------- | + | OS | Public Repo | `ubuntu-gcp` | `22.04` | + | Kubernetes | Public Repo | `kubernetes` | `1.28.3` | + | Network | Public Repo | `cni-calico` | `3.27.0` | + | Storage | Public Repo | `csi-gcp-driver` | `1.12.4` | + + :::tip + + If you want to use different packs in your cluster profile, you can access the Palette UI and simulate creating a + cluster profile to gather the packs' required values. During the cluster profile creation, click the API button to + display the API payload. You will find the values of each pack's `name`, `tag`, `uid`, `registryUid`, and `values` + parameters. + + ::: + +16. Create the cluster profile. + + ```bash + kubectl apply --filename crossplane-gcp/cluster-profile-gcp.yaml + ``` + +17. Issue the commands below to get the ID of the cluster profile once its creation is complete. + + ```bash + kubectl wait --for=condition=Ready profile.cluster.palette.crossplane.io/gcp-crossplane-cluster-profile + clusterProfileId=$(kubectl get profile.cluster.palette.crossplane.io gcp-crossplane-cluster-profile --output jsonpath='{.status.atProvider.id}') + echo Cluster Profile ID: $clusterProfileId + ``` + + ```text hideClipboard + profile.cluster.palette.crossplane.io/gcp-crossplane-cluster-profile condition met + Cluster Profile ID: 6638e0eb8f42b00cb4d1d22bb + ``` + +18. Next, get the ID of your GCP cloud account registered in Palette by invoking the `cloudaccounts` Palette API. + Replace `` with your Palette API key and `` with the name under which you registered + your GCP account in Palette. + + ```bash + curl --location --request GET 'https://api.spectrocloud.com/v1/cloudaccounts/gcp' \ + -H 'Accept: application/json' \ + -H 'ApiKey: ' | jq '.items[] | select(.metadata.name == "") | .metadata.uid' + ``` + + ```text hideClipboard + "645981f0ab3ab8105fabc982" + ``` + + Copy the API response containing your GCP cloud account ID. + +19. Create a file to store the GCP IaaS cluster configuration. + + ```bash + vi crossplane-gcp/cluster-gcp.yaml + ``` + +20. Paste the cluster configuration displayed below into the text editor window that opens: + + - Replace `` with the name of your GCP project. + - Replace `` and `` with the IDs obtained in steps **17** and **18** of this + guide. + + Optionally, you can edit the region, availability zone, instance type, and number of nodes of your cluster according + to your workload. + + Once you are done making the alterations, save and exit the file. + + ```yaml + apiVersion: cluster.palette.crossplane.io/v1alpha1 + kind: GCP + metadata: + name: gcp-crossplane-cluster + namespace: crossplane-system + spec: + forProvider: + cloudConfig: + - project: "" + region: "us-east1" + machinePool: + - azs: + - us-east1-b + count: 2 + instanceType: n1-standard-4 + name: machinepool1 + - azs: + - us-east1-b + count: 1 + instanceType: n1-standard-4 + name: controlplanepool + controlPlane: true + clusterProfile: + - id: + cloudAccountId: + providerConfigRef: + name: default + ``` + +21. Create the GCP IaaS cluster. + + ```bash + kubectl apply --filename crossplane-gcp/cluster-gcp.yaml + ``` + +22. Wait for the cluster to be created. The cluster provisioning might take up to 20 minutes. + + ```bash + kubectl wait --for=condition=Ready gcp.cluster.palette.crossplane.io/gcp-crossplane-cluster --timeout=1h + ``` + + Once ready, you should receive an output similar to the following. + + ```text hideClipboard + gcp.cluster.palette.crossplane.io/gcp-crossplane-cluster condition met + ``` + +## Validate + +1. Log in to [Palette](https://console.spectrocloud.com/). + +2. From the left **Main Menu**, click **Clusters**. + +3. Verify the deployed cluster named `gcp-crossplane-cluster` is displayed and has the **Running** and **Healthy** + status. diff --git a/docs/docs-content/palette-cli/commands/_category_.json b/docs/docs-content/automation/palette-cli/_category_.json similarity index 100% rename from docs/docs-content/palette-cli/commands/_category_.json rename to docs/docs-content/automation/palette-cli/_category_.json diff --git a/docs/docs-content/automation/palette-cli/commands/_category_.json b/docs/docs-content/automation/palette-cli/commands/_category_.json new file mode 100644 index 0000000000..094470741d --- /dev/null +++ b/docs/docs-content/automation/palette-cli/commands/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 10 +} diff --git a/docs/docs-content/palette-cli/commands/commands.md b/docs/docs-content/automation/palette-cli/commands/commands.md similarity index 100% rename from docs/docs-content/palette-cli/commands/commands.md rename to docs/docs-content/automation/palette-cli/commands/commands.md diff --git a/docs/docs-content/palette-cli/commands/docs.md b/docs/docs-content/automation/palette-cli/commands/docs.md similarity index 97% rename from docs/docs-content/palette-cli/commands/docs.md rename to docs/docs-content/automation/palette-cli/commands/docs.md index e535c4e0f6..a461161f5b 100644 --- a/docs/docs-content/palette-cli/commands/docs.md +++ b/docs/docs-content/automation/palette-cli/commands/docs.md @@ -74,7 +74,7 @@ Started ghcr.io/spectrocloud/librarium:nightly at 127.0.0.1:8080 ``` Start the documentation site in the background and change container image to a self-hosted registry. Check put the -[Offline Documentation](../../vertex/install-palette-vertex/airgap/offline-docs.md) to learn how to extract the +[Offline Documentation](../../../vertex/install-palette-vertex/airgap/offline-docs.md) to learn how to extract the container image into a tarball which you can then load into another device and upload to a self-hosted registry. ```bash diff --git a/docs/docs-content/palette-cli/commands/ec.md b/docs/docs-content/automation/palette-cli/commands/ec.md similarity index 94% rename from docs/docs-content/palette-cli/commands/ec.md rename to docs/docs-content/automation/palette-cli/commands/ec.md index cad1325808..701ec83beb 100644 --- a/docs/docs-content/palette-cli/commands/ec.md +++ b/docs/docs-content/automation/palette-cli/commands/ec.md @@ -11,8 +11,9 @@ The `ec` command installs a self-hosted Palette Enterprise Cluster (EC) in your conducted through an interactive wizard that guides you through the various install configurations available. A local kind cluster is created to facilitate creating the Enterprise cluster in the target environment. You do not need to install kind or any other dependencies. The CLI includes all the required dependencies to set up the kind cluster. You -can use the `ec` command to install a [self-hosted Palette](../../enterprise-version/install-palette/install-palette.md) -instance or a self-hosted [VerteX](../../vertex/install-palette-vertex/install-palette-vertex.md) instance. +can use the `ec` command to install a +[self-hosted Palette](../../../enterprise-version/install-palette/install-palette.md) instance or a self-hosted +[VerteX](../../../vertex/install-palette-vertex/install-palette-vertex.md) instance. The `ec` command exposes the following subcommand. @@ -80,8 +81,8 @@ palette ec install --config-file ~/.palette/ec/ec-20230807143205/ec.yaml --updat ## Custom Value File -You can customize the [Cluster Profile](../../glossary-all.md#cluster-profile) that makes up the Enterprise Cluster by -providing a custom **values.yaml** file that contains values for the various Cluster Profile layers that make up the +You can customize the [Cluster Profile](../../../glossary-all.md#cluster-profile) that makes up the Enterprise Cluster +by providing a custom **values.yaml** file that contains values for the various Cluster Profile layers that make up the Enterprise Cluster. The custom **values.yaml** file is used to customize the Enterprise Cluster to your specific needs. This is an advanced feature and should only be used by advanced users or when explicitly instructed by our support team. diff --git a/docs/docs-content/palette-cli/commands/login.md b/docs/docs-content/automation/palette-cli/commands/login.md similarity index 100% rename from docs/docs-content/palette-cli/commands/login.md rename to docs/docs-content/automation/palette-cli/commands/login.md diff --git a/docs/docs-content/palette-cli/commands/pcg.md b/docs/docs-content/automation/palette-cli/commands/pcg.md similarity index 93% rename from docs/docs-content/palette-cli/commands/pcg.md rename to docs/docs-content/automation/palette-cli/commands/pcg.md index 82801f111f..2accee9641 100644 --- a/docs/docs-content/palette-cli/commands/pcg.md +++ b/docs/docs-content/automation/palette-cli/commands/pcg.md @@ -37,7 +37,7 @@ by the `install` subcommand. :::warning Use the latest version of the Palette CLI that matches the version of your Palette or Palette VerteX instance. You can -find the newest version of the Palette CLI on the [Downloads](../../spectro-downloads.md#palette-cli) page. +find the newest version of the Palette CLI on the [Downloads](../../../spectro-downloads.md#palette-cli) page. ::: @@ -83,8 +83,8 @@ palette pcg install --config-file --update-passwords You can use the `install` subcommand to install a PCG cluster in the following environments. -| **Platform** | **Install Guide** | -| ------------ | -------------------------------------------------- | -| MAAS | [Link](../../clusters/pcg/deploy-pcg/maas.md) | -| OpenStack | [Link](../../clusters/pcg/deploy-pcg/openstack.md) | -| VMware | [Link](../../clusters/pcg/deploy-pcg/vmware.md) | +| **Platform** | **Install Guide** | +| ------------ | ----------------------------------------------------- | +| MAAS | [Link](../../../clusters/pcg/deploy-pcg/maas.md) | +| OpenStack | [Link](../../../clusters/pcg/deploy-pcg/openstack.md) | +| VMware | [Link](../../../clusters/pcg/deploy-pcg/vmware.md) | diff --git a/docs/docs-content/palette-cli/commands/pde.md b/docs/docs-content/automation/palette-cli/commands/pde.md similarity index 100% rename from docs/docs-content/palette-cli/commands/pde.md rename to docs/docs-content/automation/palette-cli/commands/pde.md diff --git a/docs/docs-content/palette-cli/commands/project.md b/docs/docs-content/automation/palette-cli/commands/project.md similarity index 100% rename from docs/docs-content/palette-cli/commands/project.md rename to docs/docs-content/automation/palette-cli/commands/project.md diff --git a/docs/docs-content/palette-cli/install-palette-cli.md b/docs/docs-content/automation/palette-cli/install-palette-cli.md similarity index 88% rename from docs/docs-content/palette-cli/install-palette-cli.md rename to docs/docs-content/automation/palette-cli/install-palette-cli.md index fbaf9f9853..5a18bb87b6 100644 --- a/docs/docs-content/palette-cli/install-palette-cli.md +++ b/docs/docs-content/automation/palette-cli/install-palette-cli.md @@ -15,12 +15,12 @@ Use the following steps to install and set up the Palette CLI. - A Palette account. Click [here](https://console.spectrocloud.com/) to create a Palette account. -- A Palette API key. Refer to the [Create API Key](../user-management/authentication/api-key/create-api-key.md) +- A Palette API key. Refer to the [Create API Key](../../user-management/authentication/api-key/create-api-key.md) reference page to learn how to create an API key. ## Download and Setup -1. Visit the [Downloads](../spectro-downloads.md#palette-cli) page and download the Palette CLI by using the URL +1. Visit the [Downloads](../../spectro-downloads.md#palette-cli) page and download the Palette CLI by using the URL provided. 2. Open up a terminal session on your local system. diff --git a/docs/docs-content/palette-cli/palette-cli.md b/docs/docs-content/automation/palette-cli/palette-cli.md similarity index 94% rename from docs/docs-content/palette-cli/palette-cli.md rename to docs/docs-content/automation/palette-cli/palette-cli.md index a08cd8c124..c740920d89 100644 --- a/docs/docs-content/palette-cli/palette-cli.md +++ b/docs/docs-content/automation/palette-cli/palette-cli.md @@ -3,8 +3,6 @@ sidebar_label: "Palette CLI" title: "Palette CLI" description: "Learn how to use the Palette CLI." hide_table_of_contents: false -sidebar_custom_props: - icon: "terminal" tags: ["palette-cli"] --- diff --git a/docs/docs-content/automation/terraform/_category_.json b/docs/docs-content/automation/terraform/_category_.json new file mode 100644 index 0000000000..455b8e4969 --- /dev/null +++ b/docs/docs-content/automation/terraform/_category_.json @@ -0,0 +1,3 @@ +{ + "position": 20 +} diff --git a/docs/docs-content/automation/terraform/terraform.md b/docs/docs-content/automation/terraform/terraform.md new file mode 100644 index 0000000000..abc085cbd5 --- /dev/null +++ b/docs/docs-content/automation/terraform/terraform.md @@ -0,0 +1,53 @@ +--- +sidebar_label: "Terraform" +title: "Terraform" +description: "Learn how to use Terraform with Palette and Palette VerteX." +hide_table_of_contents: false +sidebar_position: 0 +tags: ["terraform"] +--- + +Palette supports the Infrastructure as Code (IaC) software tool, [Terraform](https://www.terraform.io/), to provide +consistent CLI workflow support to multiple cloud providers. + +Terraform organizes cloud APIs into declarative configuration files, allowing users to define their infrastructure as +code. Terraform validates configuration files and checks whether the execution plan for a configuration matches +expectations before deployment, and applies the changes to managed resources accordingly. + +## Spectro Cloud Provider + +Palette and Palette VerteX API can be used with the Spectro Cloud Terraform provider. The provider is available in the +HashiCorp Terraform registry as the +[Spectro Cloud Provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs). + +### Release Notes + +Information about the latest changes in the Spectro Cloud provider can be found in the +[release notes](https://github.com/spectrocloud/terraform-provider-spectrocloud/releases). + +### Provider Documentation + +Detailed documentation on supported data sources and resources are available in the Terraform Spectro Cloud Provider +[documentation](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) page. + +### Modules + +Two modules are available to help you provision Spectro Cloud infrastructure resources. + +- [Palette Edge Native Terraform Module](https://registry.terraform.io/modules/spectrocloud/edge/spectrocloud/latest) +- [Spectro Cloud Terraform Modules](https://registry.terraform.io/modules/spectrocloud/modules/spectrocloud/latest) + +Refer to the +[Spectro Cloud Modules](https://github.com/spectrocloud/terraform-spectrocloud-modules#module-resources--requirements) +README file to learn more about supported provider versions and other essential requirements. + +## Get Started + +For an end-to-end cluster provisioning example, refer to the +[Deploy a Cluster](../../clusters/public-cloud/deploy-k8s-cluster.md) tutorial and the +[end-to-end examples](https://github.com/spectrocloud/terraform-provider-spectrocloud/tree/main/examples/e2e) available +in the Spectro Cloud Terraform Provider GitHub repository. + +## Resources + +- [Terraform Documentation](https://developer.hashicorp.com/terraform/docs) diff --git a/docs/docs-content/clusters/pcg/deploy-app-pcg.md b/docs/docs-content/clusters/pcg/deploy-app-pcg.md index db2caed565..b9ac44ace1 100644 --- a/docs/docs-content/clusters/pcg/deploy-app-pcg.md +++ b/docs/docs-content/clusters/pcg/deploy-app-pcg.md @@ -69,7 +69,7 @@ To complete this tutorial, you will need the following prerequisites in place. ::: - Ensure the following software is installed and available on your Linux machine. - - [Palette CLI](../../palette-cli/install-palette-cli.md). + - [Palette CLI](../../automation/palette-cli/install-palette-cli.md). - [Docker](https://docs.docker.com/desktop). - [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation). - [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). @@ -78,7 +78,7 @@ To complete this tutorial, you will need the following prerequisites in place. ## Authenticate with Palette The initial step to deploy a PCG using Palette CLI involves authenticating with your Palette environment using the -[`palette login`](../../palette-cli/commands/login.md) command. +[`palette login`](../../automation/palette-cli/commands/login.md) command. In your terminal, execute the following command. @@ -240,7 +240,7 @@ VMware cloud account with the same name as the PCG. The following recording demonstrates the `pcg install` command with the `--config-only` flag. When using this flag, a reusable configuration file named **pcg.yaml** is created under the path **.palette/pcg**. You can then utilize this file to install a PCG with predefined values using the command `pcg install` with the `--config-file` flag. Refer to the -[Palette CLI PCG Command](../../palette-cli/commands/pcg.md) page for further information about the command. +[Palette CLI PCG Command](../../automation/palette-cli/commands/pcg.md) page for further information about the command. diff --git a/docs/docs-content/clusters/pcg/deploy-pcg/maas.md b/docs/docs-content/clusters/pcg/deploy-pcg/maas.md index 7c6ef739f4..73d04f8307 100644 --- a/docs/docs-content/clusters/pcg/deploy-pcg/maas.md +++ b/docs/docs-content/clusters/pcg/deploy-pcg/maas.md @@ -26,7 +26,7 @@ carefully review the [Prerequisites](#prerequisites) section. ::: - Download and install the Palette CLI from the [Downloads](../../../spectro-downloads.md#palette-cli) page. Refer to - the [Palette CLI Install](../../../palette-cli/install-palette-cli.md) guide to learn more. + the [Palette CLI Install](../../../automation/palette-cli/install-palette-cli.md) guide to learn more. The following system requirements must be met to install a PCG in MAAS: @@ -121,7 +121,7 @@ is not required for the DNS records to be accessible from the internet. The `CloudAccount.apiKey` and `Mgmt.apiKey` values in the **pcg.yaml** file are encrypted and cannot be manually updated. To change these values, use the `palette pcg install --update-passwords` command. Refer to the - [PCG command](../../../palette-cli/commands/pcg.md#update-passwords) reference page for more information. + [PCG command](../../../automation/palette-cli/commands/pcg.md#update-passwords) reference page for more information. ::: @@ -215,8 +215,8 @@ is not required for the DNS records to be accessible from the internet. You cannot modify a deployed PCG cluster. If you need to make changes to the PCG cluster, you must first delete the cluster and redeploy it. We recommend you save your PCG configuration file for future use. Use the `--config-only` flag to save the configuration file without deploying the PCG cluster. Refer to the - [Generate a Configuration File](../../../palette-cli/commands/pcg.md#generate-a-configuration-file) section to learn - more. For additional assistance, visit our + [Generate a Configuration File](../../../automation/palette-cli/commands/pcg.md#generate-a-configuration-file) + section to learn more. For additional assistance, visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. ::: diff --git a/docs/docs-content/clusters/pcg/deploy-pcg/openstack.md b/docs/docs-content/clusters/pcg/deploy-pcg/openstack.md index 9876ea8712..c2bbe2b99a 100644 --- a/docs/docs-content/clusters/pcg/deploy-pcg/openstack.md +++ b/docs/docs-content/clusters/pcg/deploy-pcg/openstack.md @@ -26,7 +26,7 @@ installation, carefully review the [Prerequisites](#prerequisites) section. ::: - Download and install the Palette CLI from the [Downloads](../../../spectro-downloads.md#palette-cli) page. Refer to - the [Palette CLI Install](../../../palette-cli/install-palette-cli.md) guide to learn more. + the [Palette CLI Install](../../../automation/palette-cli/install-palette-cli.md) guide to learn more. The following system requirements must be met to install a PCG in OpenStack: @@ -340,7 +340,7 @@ The following permissions are required to deploy a PCG to OpenStack and for Pale The `CloudAccount.apiKey` and `Mgmt.apiKey` values in the **pcg.yaml** file are encrypted and cannot be manually updated. To change these values, use the `palette pcg install --update-passwords` command. Refer to the - [PCG command](../../../palette-cli/commands/pcg.md#update-passwords) reference page for more information. + [PCG command](../../../automation/palette-cli/commands/pcg.md#update-passwords) reference page for more information. ::: @@ -448,8 +448,8 @@ The following permissions are required to deploy a PCG to OpenStack and for Pale You cannot modify a deployed PCG cluster. If you need to make changes to the PCG cluster, you must first delete the cluster and redeploy it. We recommend you save your PCG configuration file for future use. Use the `--config-only` flag to save the configuration file without deploying the PCG cluster. Refer to the - [Generate a Configuration File](../../../palette-cli/commands/pcg.md#generate-a-configuration-file) section to learn - more. For additional assistance, visit our + [Generate a Configuration File](../../../automation/palette-cli/commands/pcg.md#generate-a-configuration-file) + section to learn more. For additional assistance, visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. ::: diff --git a/docs/docs-content/clusters/pcg/deploy-pcg/vmware.md b/docs/docs-content/clusters/pcg/deploy-pcg/vmware.md index ec7e1257a7..57881b4746 100644 --- a/docs/docs-content/clusters/pcg/deploy-pcg/vmware.md +++ b/docs/docs-content/clusters/pcg/deploy-pcg/vmware.md @@ -34,7 +34,7 @@ environment, then you already have all the required permissions and roles. Proce ::: - Download and install the Palette CLI from the [Downloads](../../../spectro-downloads.md#palette-cli) page. Refer to - the [Palette CLI Install](../../../palette-cli/install-palette-cli.md) guide to learn more. + the [Palette CLI Install](../../../automation/palette-cli/install-palette-cli.md) guide to learn more. The following system requirements must be met to install a PCG in VMware vSphere: @@ -374,7 +374,7 @@ The following requirements apply to tags: The `CloudAccount.apiKey` and `Mgmt.apiKey` values in the **pcg.yaml** file are encrypted and cannot be manually updated. To change these values, use the `palette pcg install --update-passwords` command. Refer to the - [PCG command](../../../palette-cli/commands/pcg.md#update-passwords) reference page for more information. + [PCG command](../../../automation/palette-cli/commands/pcg.md#update-passwords) reference page for more information. ::: @@ -530,8 +530,8 @@ The following requirements apply to tags: You cannot modify a deployed PCG cluster. If you need to make changes to the PCG cluster, you must first delete the cluster and redeploy it. We recommend you save your PCG configuration file for future use. Use the `--config-only` flag to save the configuration file without deploying the PCG cluster. Refer to the - [Generate a Configuration File](../../../palette-cli/commands/pcg.md#generate-a-configuration-file) section to learn - more. For additional assistance, visit our + [Generate a Configuration File](../../../automation/palette-cli/commands/pcg.md#generate-a-configuration-file) + section to learn more. For additional assistance, visit our [Customer Support](https://spectrocloud.atlassian.net/servicedesk/customer/portals) portal. ::: diff --git a/docs/docs-content/clusters/pcg/pcg.md b/docs/docs-content/clusters/pcg/pcg.md index d4da350293..91aad4ff9c 100644 --- a/docs/docs-content/clusters/pcg/pcg.md +++ b/docs/docs-content/clusters/pcg/pcg.md @@ -10,7 +10,7 @@ tags: ["pcg"] A Private Cloud Gateway (PCG) is a Palette infrastructure support component that enables the communication between Palette and a private cloud or private data center environment. The PCG is typically deployed into the private cloud -environments through the [Palette CLI](../../palette-cli/install-palette-cli.md). +environments through the [Palette CLI](../../automation/palette-cli/install-palette-cli.md). A PCG is necessary in private cloud environments where Palette does not have direct network access to the environment where workload clusters are deployed to. When the PCG is installed, it registers itself with a Palette instance and diff --git a/docs/docs-content/devx/devx.md b/docs/docs-content/devx/devx.md index ffdde874c4..d6b0e96551 100644 --- a/docs/docs-content/devx/devx.md +++ b/docs/docs-content/devx/devx.md @@ -73,8 +73,8 @@ programmatically using PDE. :::tip -Check out the Palette CLI [install guide](../palette-cli/install-palette-cli.md) for more information on how to install -and configure the CLI. +Check out the Palette CLI [install guide](../automation/palette-cli/install-palette-cli.md) for more information on how +to install and configure the CLI. ::: diff --git a/docs/docs-content/enterprise-version/install-palette/install-on-vmware/airgap-install/install.md b/docs/docs-content/enterprise-version/install-palette/install-on-vmware/airgap-install/install.md index db92a3a87f..d15850608a 100644 --- a/docs/docs-content/enterprise-version/install-palette/install-on-vmware/airgap-install/install.md +++ b/docs/docs-content/enterprise-version/install-palette/install-on-vmware/airgap-install/install.md @@ -28,7 +28,7 @@ Complete the [Environment Setup](./vmware-vsphere-airgap-instructions.md) steps host. - Palette CLI installed and available. Refer to the Palette CLI - [Install](../../../../palette-cli/install-palette-cli.md#download-and-setup) page for guidance. + [Install](../../../../automation/palette-cli/install-palette-cli.md#download-and-setup) page for guidance. - Review the required VMware vSphere [permissions](../vmware-system-requirements.md). Ensure you have created the proper custom roles and zone tags. @@ -117,7 +117,7 @@ Use the following steps to install Palette. 5. Open a terminal window and invoke the Palette CLI by using the `ec` command to install the enterprise cluster. The interactive CLI prompts you for configuration details and then initiates the installation. For more information - about the `ec` subcommand, refer to [Palette Commands](../../../../palette-cli/commands/commands.md). + about the `ec` subcommand, refer to [Palette Commands](../../../../automation/palette-cli/commands/commands.md). ```bash palette ec install @@ -386,7 +386,7 @@ teams. ## Resources -- [Palette CLI](../../../../palette-cli/install-palette-cli.md#download-and-setup) +- [Palette CLI](../../../../automation/palette-cli/install-palette-cli.md#download-and-setup) - [VMware System Requirements](../vmware-system-requirements.md) diff --git a/docs/docs-content/enterprise-version/install-palette/install-on-vmware/install.md b/docs/docs-content/enterprise-version/install-palette/install-on-vmware/install.md index 73f7570700..abe5b67072 100644 --- a/docs/docs-content/enterprise-version/install-palette/install-on-vmware/install.md +++ b/docs/docs-content/enterprise-version/install-palette/install-on-vmware/install.md @@ -30,7 +30,7 @@ for more information. host. - Palette CLI installed and available. Refer to the Palette CLI - [Install](../../../palette-cli/install-palette-cli.md#download-and-setup) page for guidance. + [Install](../../../automation/palette-cli/install-palette-cli.md#download-and-setup) page for guidance. - Review the required VMware vSphere [permissions](vmware-system-requirements.md). Ensure you have created the proper custom roles and zone tags. @@ -119,7 +119,7 @@ Use the following steps to install Palette. 5. Open a terminal window and invoke the Palette CLI by using the `ec` command to install the enterprise cluster. The interactive CLI prompts you for configuration details and then initiates the installation. For more information - about the `ec` subcommand, refer to [Palette Commands](../../../palette-cli/commands/commands.md). + about the `ec` subcommand, refer to [Palette Commands](../../../automation/palette-cli/commands/commands.md). ```bash palette ec install @@ -415,7 +415,7 @@ teams. ## Resources -- [Palette CLI](../../../palette-cli/install-palette-cli.md#download-and-setup) +- [Palette CLI](../../../automation/palette-cli/install-palette-cli.md#download-and-setup) - [VMware System Requirements](vmware-system-requirements.md) diff --git a/docs/docs-content/palette-cli/_category_.json b/docs/docs-content/palette-cli/_category_.json deleted file mode 100644 index fa4c219dfb..0000000000 --- a/docs/docs-content/palette-cli/_category_.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "position": 180 -} diff --git a/docs/docs-content/spectro-downloads.md b/docs/docs-content/spectro-downloads.md index 481d34cfd9..cf53d79a7c 100644 --- a/docs/docs-content/spectro-downloads.md +++ b/docs/docs-content/spectro-downloads.md @@ -21,7 +21,7 @@ on how to install Palette. Palette VerteX installation guide can be found in the ## Palette CLI The Palette Command Line Interface (CLI) is a tool that you can use to interact with Palette programmatically. Check out -the [Palette CLI](./palette-cli/palette-cli.md) document for installation guidance. +the [Palette CLI](./automation/palette-cli/palette-cli.md) document for installation guidance. | Version | Operating System | Checksum (SHA256) | | ------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------ | diff --git a/docs/docs-content/terraform.md b/docs/docs-content/terraform.md deleted file mode 100644 index d045322d56..0000000000 --- a/docs/docs-content/terraform.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -sidebar_label: "Palette Terraform Support" -title: "Palette Terraform Support" -description: "Understanding, installing and operating Spectro Cloud's Terraform Provider." -hide_table_of_contents: false -sidebar_position: 200 -sidebar_custom_props: - icon: "terraform" -tags: ["terraform"] ---- - -Palette supports the open-source Infrastructure as Code (IaC) software tool, [Terraform](https://www.terraform.io/), to -provide consistent CLI workflow support to multiple cloud services. - -Terraform organizes cloud APIs into declarative, configuration files. Terraform supports the ability to write -configuration files, checks whether the execution plan for a configuration matches your expectations (before -deployment), and applies the changes to all the managed resources. - -## Spectro Cloud Provider - -Spectro Cloud Palette's SaaS and On-Premise management API can be used with the Spectro Cloud Terraform provider. The -provider is available in the HashiCorp Terraform registry as -[Spectro Cloud Provider](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs). - -
- -### Release Notes - -Information about the latest changes in the Spectro Cloud provider can be found in the -[release notes](https://github.com/spectrocloud/terraform-provider-spectrocloud/releases). - -
- -### Provider Documentation - -Detailed documentation on supported data sources and resources are available in the Terraform Spectro Cloud Provider -[documentation](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs) page. - -
- -## Usage - -For an end-to-end cluster provisioning example, check out the -[end-to-end examples](https://github.com/spectrocloud/terraform-provider-spectrocloud/tree/main/examples/e2e). - -You can find resource examples in the -[resource directory](https://registry.terraform.io/providers/spectrocloud/spectrocloud). - -## Modules - -There are two modules available to help you provision Spectro Cloud infrastructure resources. - -- [Palette Edge Native Terraform Module](https://registry.terraform.io/modules/spectrocloud/edge/spectrocloud/latest) -- [Spectro Cloud Terraform Modules](https://registry.terraform.io/modules/spectrocloud/modules/spectrocloud/latest) - -Review the -[Spectro Cloud modules readme](https://github.com/spectrocloud/terraform-spectrocloud-modules#module-resources--requirements) -document to learn more about supported provider versions and other requirements. diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/airgap-install/install.md b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/airgap-install/install.md index 28326d48c0..be5641097d 100644 --- a/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/airgap-install/install.md +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/airgap-install/install.md @@ -30,7 +30,7 @@ before proceeding with the installation. Refer to the host. - Palette CLI installed and available. Refer to the Palette CLI - [Install](../../../../palette-cli/install-palette-cli.md#download-and-setup) page for guidance. + [Install](../../../../automation/palette-cli/install-palette-cli.md#download-and-setup) page for guidance. - An Ubuntu Pro Subscription and token. Ubuntu Pro provides access to FIPS 140-2 certified cryptographic packages. @@ -132,7 +132,7 @@ Use the following steps to install Palette VerteX. 5. Open a terminal window and invoke the Palette CLI by using the `ec` command to install the enterprise cluster. The interactive CLI prompts you for configuration details and then initiates the installation. For more information - about the `ec` subcommand, refer to [Palette Commands](../../../../palette-cli/commands/ec.md). + about the `ec` subcommand, refer to [Palette Commands](../../../../automation/palette-cli/commands/ec.md). ```bash palette ec install @@ -405,7 +405,7 @@ teams. - [Enterprise Install Troubleshooting](../../../../troubleshooting/enterprise-install.md) -- [Palette CLI](../../../../palette-cli/install-palette-cli.md#download-and-setup) +- [Palette CLI](../../../../automation/palette-cli/install-palette-cli.md#download-and-setup) - [System Management](../../../system-management/system-management.md) diff --git a/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install.md b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install.md index cc52daba89..f7ef33b1b4 100644 --- a/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install.md +++ b/docs/docs-content/vertex/install-palette-vertex/install-on-vmware/install.md @@ -22,7 +22,7 @@ Palette VerteX will be deployed. host. - Palette CLI installed and available. Refer to the Palette CLI - [Install](../../../palette-cli/install-palette-cli.md#download-and-setup) page for guidance. + [Install](../../../automation/palette-cli/install-palette-cli.md#download-and-setup) page for guidance. - An Ubuntu Pro Subscription and token. Ubuntu Pro provides access to FIPS 140-2 certified cryptographic packages. @@ -124,7 +124,7 @@ Use the following steps to install Palette VerteX. 5. Open a terminal window and invoke the Palette CLI by using the `ec` command to install the enterprise cluster. The interactive CLI prompts you for configuration details and then initiates the installation. For more information - about the `ec` subcommand, refer to [Palette Commands](../../../palette-cli/commands/ec.md). + about the `ec` subcommand, refer to [Palette Commands](../../../automation/palette-cli/commands/ec.md). ```bash palette ec install @@ -345,7 +345,7 @@ teams. - [Enterprise Install Troubleshooting](../../../troubleshooting/enterprise-install.md) -- [Palette CLI](../../../palette-cli/install-palette-cli.md#download-and-setup) +- [Palette CLI](../../../automation/palette-cli/install-palette-cli.md#download-and-setup) - [System Management](../../system-management/system-management.md) diff --git a/redirects.js b/redirects.js index bf35a4561b..b1d72d9bad 100644 --- a/redirects.js +++ b/redirects.js @@ -412,6 +412,66 @@ const redirects = [ from: "/vertex/install-palette-vertex/airgap/checklist/", to: "/vertex/install-palette-vertex/airgap/", }, + + { + from: "/terraform/", + to: "/automation/terraform/", + }, + + { + from: "/palette-cli/", + to: "/automation/palette-cli/", + }, + + { + from: "/palette-cli/install-palette-cli/", + to: "/automation/palette-cli/install-palette-cli/", + }, + + { + from: "/palette-cli/commands/", + to: "/automation/palette-cli/commands/", + }, + + { + from: "/palette-cli/commands/docs/", + to: "/automation/palette-cli/commands/docs/", + }, + + { + from: "/palette-cli/commands/ec/", + to: "/automation/palette-cli/commands/ec/", + }, + + { + from: "/palette-cli/commands/fips-validate/", + to: "/automation/palette-cli/commands/fips-validate/", + }, + + { + from: "/palette-cli/commands/login/", + to: "/automation/palette-cli/commands/login/", + }, + + { + from: "/palette-cli/commands/pcg/", + to: "/automation/palette-cli/commands/pcg/", + }, + + { + from: "/palette-cli/commands/pde/", + to: "/automation/palette-cli/commands/pde/", + }, + + { + from: "/palette-cli/commands/project/", + to: "/automation/palette-cli/commands/project/", + }, + + { + from: "/palette-cli/commands/validator/", + to: "/automation/palette-cli/commands/validator/", + }, ]; module.exports = redirects; diff --git a/src/components/IconMapper/dynamicFontAwesomeImports.js b/src/components/IconMapper/dynamicFontAwesomeImports.js index 73b89a07ec..f168fbb419 100644 --- a/src/components/IconMapper/dynamicFontAwesomeImports.js +++ b/src/components/IconMapper/dynamicFontAwesomeImports.js @@ -1,4 +1,5 @@ import { faCubes } from '@fortawesome/free-solid-svg-icons'; +import { faTerminal } from '@fortawesome/free-solid-svg-icons'; import { faPersonWalkingLuggage } from '@fortawesome/free-solid-svg-icons'; import { faObjectGroup } from '@fortawesome/free-solid-svg-icons'; import { faEnvelopeOpenText } from '@fortawesome/free-solid-svg-icons'; @@ -13,7 +14,6 @@ import { faPalette } from '@fortawesome/free-solid-svg-icons'; import { faBook } from '@fortawesome/free-solid-svg-icons'; import { faBookmark } from '@fortawesome/free-solid-svg-icons'; import { faGavel } from '@fortawesome/free-solid-svg-icons'; -import { faTerminal } from '@fortawesome/free-solid-svg-icons'; import { faTowerObservation } from '@fortawesome/free-solid-svg-icons'; import { faLock } from '@fortawesome/free-solid-svg-icons'; import { faGears } from '@fortawesome/free-solid-svg-icons'; @@ -23,6 +23,7 @@ import { faShield } from '@fortawesome/free-solid-svg-icons'; export const fontAwesomeIcons = { "cubes": faCubes, + "terminal": faTerminal, "person-walking-luggage": faPersonWalkingLuggage, "object-group": faObjectGroup, "envelope-open-text": faEnvelopeOpenText, @@ -37,7 +38,6 @@ export const fontAwesomeIcons = { "book": faBook, "bookmark": faBookmark, "gavel": faGavel, - "terminal": faTerminal, "tower-observation": faTowerObservation, "lock": faLock, "gears": faGears, diff --git a/vale/styles/config/vocabularies/Internal/accept.txt b/vale/styles/config/vocabularies/Internal/accept.txt index 00541d78a3..b4c79ebcff 100644 --- a/vale/styles/config/vocabularies/Internal/accept.txt +++ b/vale/styles/config/vocabularies/Internal/accept.txt @@ -198,4 +198,5 @@ rhel Ubuntu RHEL repave -airgap \ No newline at end of file +airgap +Crossplane \ No newline at end of file