From a19b6c4c96bec5f6e79f00caa1356bb58f636f53 Mon Sep 17 00:00:00 2001 From: Lenny Chen <55669665+lennessyy@users.noreply.github.com> Date: Fri, 2 Feb 2024 13:43:11 -0800 Subject: [PATCH] chore: upgrade to docusaurus 3.1.1 (#2135) * upgrade to docusaurus 3.1.1 * fix most broken anchors * fix more broken anchors * fix new anchors --------- Co-authored-by: Lenny Chen --- docs/api-content/api-docs/1-introduction.md | 2 +- .../cluster-groups/create-cluster-group.md | 2 +- .../clusters/data-center/maas/architecture.md | 2 +- .../clusters/data-center/openstack.md | 2 +- .../clusters/data-center/vmware.md | 2 +- .../clusters/public-cloud/aws/eks.md | 2 +- .../clusters/public-cloud/azure/aks.md | 14 +- .../azure/create-azure-cluster.md | 21 +- .../devx/services/service-listings/vault.md | 18 +- .../vmware-system-requirements.md | 4 +- docs/docs-content/glossary-all.md | 6 +- docs/docs-content/integrations/k3s.md | 64 +- docs/docs-content/integrations/kubernetes.md | 356 ++++++++++- .../update-cluster-profile.md | 6 +- .../adding-a-custom-registry.md | 4 +- package-lock.json | 568 ++++++++++-------- package.json | 8 +- 17 files changed, 739 insertions(+), 342 deletions(-) diff --git a/docs/api-content/api-docs/1-introduction.md b/docs/api-content/api-docs/1-introduction.md index 1773592229..253f71d4c8 100644 --- a/docs/api-content/api-docs/1-introduction.md +++ b/docs/api-content/api-docs/1-introduction.md @@ -172,7 +172,7 @@ The API rate limits are as follows: - There is a limit of ten API requests per second for each source IP address. The API supports additional bursts through the usage of a burst queue. The default burst queue size is set to five. You could make 50 (10 \* 5) requests in seconds before the API returns a `429 - TooManyRequests` error. Refer to the - [Endpoint Prefix Rate](#endpointprefixrate) for additional information. + [Endpoint Prefix Rate](#endpoint-prefix-rate) for additional information. - API request limits are categorized by the parent resources, such as `/v1/cloudconfig/:uid` and `/v1/roles`. You can find a list of all resource types in the [API documentation](/api/category/palette-api-v1). The requests are counted diff --git a/docs/docs-content/clusters/cluster-groups/create-cluster-group.md b/docs/docs-content/clusters/cluster-groups/create-cluster-group.md index 4ee9a9c232..41ab77fd82 100644 --- a/docs/docs-content/clusters/cluster-groups/create-cluster-group.md +++ b/docs/docs-content/clusters/cluster-groups/create-cluster-group.md @@ -108,7 +108,7 @@ Use the instructions below to create a cluster group. ::: To enable virtual clusters for OpenShift, review the OpenShit - [instructions below](#enable-virtual-cluster-for-openshift). + [instructions below](#enable-virtual-clusters-for-openshift). 8. Click **Next** to complete the cluster group creation process. diff --git a/docs/docs-content/clusters/data-center/maas/architecture.md b/docs/docs-content/clusters/data-center/maas/architecture.md index affe19823d..765e15d106 100644 --- a/docs/docs-content/clusters/data-center/maas/architecture.md +++ b/docs/docs-content/clusters/data-center/maas/architecture.md @@ -81,7 +81,7 @@ installation. A System Private Gateway is a PCG service that is enabled inside t :::warning Only self-hosted Palette instances support the option of using the System Private Gateway. Use the default -[PCG deployment](#private-cloud-gatewayy) option if you have NAT gateways or network firewalls between Palette and MAAS. +[PCG deployment](#private-cloud-gateway) option if you have NAT gateways or network firewalls between Palette and MAAS. ::: diff --git a/docs/docs-content/clusters/data-center/openstack.md b/docs/docs-content/clusters/data-center/openstack.md index 929003a4aa..37b2ec3a0e 100644 --- a/docs/docs-content/clusters/data-center/openstack.md +++ b/docs/docs-content/clusters/data-center/openstack.md @@ -579,7 +579,7 @@ The following steps need to be performed to provision a new OpenStack cluster: | **Enable Autoscaler** | You can enable the autoscaler, by toggling the **Enable Autoscaler** button. Autoscaler scales up and down resources between the defined minimum and the maximum number of nodes to optimize resource utilization. | | | Set the scaling limit by setting the **Minimum Size** and **Maximum Size**, as per the workload the number of nods will scale up from minimum set value to maximum set value and the scale down from maximum set value to minimum set value | | **Size** | Number of VMs to be provisioned for the node pool. | -| **Rolling Update** | Rolling update has two available options. Review the [Update Parameter](#update-parameter-table) table below for more details. | +| **Rolling Update** | Rolling update has two available options. The expand option launches a new node first, then shuts down old one. The contract option shuts down a old one first, then launches new one. | | **[Labels](../cluster-management/taints.md#labels)** | Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. | | **[Taints](../cluster-management/taints.md#taints)** | To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints. | | **Instance type** | Select the compute instance type to be used for all nodes in the node pool. | diff --git a/docs/docs-content/clusters/data-center/vmware.md b/docs/docs-content/clusters/data-center/vmware.md index e448bd103d..87abd51743 100644 --- a/docs/docs-content/clusters/data-center/vmware.md +++ b/docs/docs-content/clusters/data-center/vmware.md @@ -928,7 +928,7 @@ The following steps need to be performed to provision a new VMware cluster: | **Enable Autoscaler** | You can enable the autoscaler, by toggling the **Enable Autoscaler** button. Autoscaler scales up and down resources between the defined minimum and the maximum number of nodes to optimize resource utilization. | | | Set the scaling limit by setting the **Minimum Size** and **Maximum Size**, as per the workload the number of nods will scale up from minimum set value to maximum set value and the scale down from maximum set value to minimum set value | | **Size** | Number of VMs to be provisioned for the node pool. | -| **Rolling Update** | Rolling update has two available options. Review the [Update Parameter](#update-parameter-table) table below for more details. | +| **Rolling Update** | Rolling update has two available options. The expand option launches a new node first, then shuts down old one. The contract option shuts down a old one first, then launches new one.details. | | **[Labels](../cluster-management/taints.md#labels)** | Add a label to apply placement constraints on a pod, such as a node eligible for receiving the workload. | | **[Taints](../cluster-management/taints.md#taints)** | To set toleration to pods and allow (but do not require) the pods to schedule onto nodes with matching taints. | | **Instance type** | Select the compute instance type to be used for all nodes in the node pool. | diff --git a/docs/docs-content/clusters/public-cloud/aws/eks.md b/docs/docs-content/clusters/public-cloud/aws/eks.md index e6cf47f96e..074ecc8f64 100644 --- a/docs/docs-content/clusters/public-cloud/aws/eks.md +++ b/docs/docs-content/clusters/public-cloud/aws/eks.md @@ -273,7 +273,7 @@ For guidance in setting up kubectl, review the [Kubectl](../../cluster-managemen - [Create an Infrastructure Profile](../../../profiles/cluster-profiles/create-cluster-profiles/create-infrastructure-profile.md) -- [EKS Cluster Encryption](#eks-cluster-secrets-encryption) +- [Enable Secrets Encryption for EKS Cluster](enable-secrets-encryption-kms-key.md) - [Configure Custom OIDC](../../../integrations/kubernetes.md#configure-custom-oidc) diff --git a/docs/docs-content/clusters/public-cloud/azure/aks.md b/docs/docs-content/clusters/public-cloud/azure/aks.md index 53f051bb13..bb4376e633 100644 --- a/docs/docs-content/clusters/public-cloud/azure/aks.md +++ b/docs/docs-content/clusters/public-cloud/azure/aks.md @@ -109,13 +109,13 @@ The following steps need to be performed to provision a new cluster: 6. Complete the **Cluster config** section with the information for each parameter listed below. - | **Parameter** | **Description** | - | -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | - | **Subscription** | Select the subscription which is to be used to access Azure Services. | - | **Region** | Select a region in Azure in where the cluster should be deployed. | - | **Resource Group** | Select the resource group in which the cluster should be deployed. | - | **SSH Key** | The public SSH key for connecting to the nodes. Review Microsoft's [supported SSH](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys#supported-ssh-key-formats) formats. | - | **Static Placement** | By default, Palette uses dynamic placement. This creates a new VNet for the cluster that contains two subnets in different Availability Zones (AZs). Palette places resources in these clusters, manages the resources, and deletes them when the corresponding cluster is deleted.

If you want to place resources into a pre-existing VNet, enable the **Static Placement** option, and fill out the input values listed in the [Static Placement](#static-placement-table) table below. | + | **Parameter** | **Description** | + | -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | + | **Subscription** | Select the subscription which is to be used to access Azure Services. | + | **Region** | Select a region in Azure in where the cluster should be deployed. | + | **Resource Group** | Select the resource group in which the cluster should be deployed. | + | **SSH Key** | The public SSH key for connecting to the nodes. Review Microsoft's [supported SSH](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys#supported-ssh-key-formats) formats. | + | **Static Placement** | By default, Palette uses dynamic placement. This creates a new VNet for the cluster that contains two subnets in different Availability Zones (AZs). Palette places resources in these clusters, manages the resources, and deletes them when the corresponding cluster is deleted.

If you want to place resources into a pre-existing VNet, enable the **Static Placement** option, and fill out the input values listed in the [Static Placement](#static-placement-settings) table below. | #### Static Placement Settings diff --git a/docs/docs-content/clusters/public-cloud/azure/create-azure-cluster.md b/docs/docs-content/clusters/public-cloud/azure/create-azure-cluster.md index ec827d4427..1f02d93570 100644 --- a/docs/docs-content/clusters/public-cloud/azure/create-azure-cluster.md +++ b/docs/docs-content/clusters/public-cloud/azure/create-azure-cluster.md @@ -106,15 +106,15 @@ Use the following steps to deploy an Azure cluster. ::: -| **Parameter** | **Description** | -| --------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **Subscription** | Use the **drop-down Menu** to select the subscription that will be used to access Azure services. | -| **Region** | Use the **drop-down Menu** to choose the Azure region where you would like to provision the cluster. | -| **Resource Group** | Select the name of the resource group that contains the Azure resources you will be accessing. | -| **Storage Account** | Optionally, if you have a custom storage account available, you can use the **drop-down Menu** to select the storage account name. For information about use cases for custom storage, review [Azure Storage](../azure/architecture.md#azure-storage). | -| **Storage Container** | Optionally, if you will be using a custom storage container, use the **drop-down Menu** to select it. For information about use cases for custom storage, review [Azure Storage](../azure/architecture.md#azure-storage). | -| **SSH Key** | The public SSH key for connecting to the nodes. SSH key pairs must be pre-configured in your Azure environment. The key you select is inserted into the provisioned VMs. For more information, review Microsoft's [Supported SSH key formats](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys#supported-ssh-key-formats). | -| **Static Placement** | By default, Palette uses dynamic placement. This creates a new VNet for clusters with two subnets in different Availability Zones (AZs). Palette places resources in these clusters, manages the resources, and deletes them when the corresponding cluster is deleted.

If you want to place resources into a pre-existing VNet, enable the **Static Placement** option, and fill out the input values listed in the [Static Placement](#static-placement-table) table below. | +| **Parameter** | **Description** | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Subscription** | Use the **drop-down Menu** to select the subscription that will be used to access Azure services. | +| **Region** | Use the **drop-down Menu** to choose the Azure region where you would like to provision the cluster. | +| **Resource Group** | Select the name of the resource group that contains the Azure resources you will be accessing. | +| **Storage Account** | Optionally, if you have a custom storage account available, you can use the **drop-down Menu** to select the storage account name. For information about use cases for custom storage, review [Azure Storage](../azure/architecture.md#azure-storage). | +| **Storage Container** | Optionally, if you will be using a custom storage container, use the **drop-down Menu** to select it. For information about use cases for custom storage, review [Azure Storage](../azure/architecture.md#azure-storage). | +| **SSH Key** | The public SSH key for connecting to the nodes. SSH key pairs must be pre-configured in your Azure environment. The key you select is inserted into the provisioned VMs. For more information, review Microsoft's [Supported SSH key formats](https://learn.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys#supported-ssh-key-formats). | +| **Static Placement** | By default, Palette uses dynamic placement. This creates a new VNet for clusters with two subnets in different Availability Zones (AZs). Palette places resources in these clusters, manages the resources, and deletes them when the corresponding cluster is deleted.

If you want to place resources into a pre-existing VNet, enable the **Static Placement** option, and fill out the input values listed in the [Static Placement](#static-placement-settings) table below. | #### Static Placement Settings @@ -143,9 +143,6 @@ Standard_NC12s_v3 can be configured for Graphics Processing Unit (GPU) workloads ::: -You can apply autoscale capability to dynamically increase resources during high loads and reduce them during low loads. -To learn more, refer to [Enable Autoscale for Azure IaaS Cluster](#enable-autoscale-for-azure-iaas-cluster). - #### Control Plane Pool Configuration Settings | **Parameter** | **Description** | diff --git a/docs/docs-content/devx/services/service-listings/vault.md b/docs/docs-content/devx/services/service-listings/vault.md index 4ea5b4798f..e56922c6e4 100644 --- a/docs/docs-content/devx/services/service-listings/vault.md +++ b/docs/docs-content/devx/services/service-listings/vault.md @@ -9,7 +9,7 @@ sidebar_position: 40 logoUrl: "https://icon-library.com/images/padlock-icon-png/padlock-icon-png-29.jpg" --- -# Vault +## Vault Palette Dev Engine (PDE) users can deploy Vault onto their virtual cluster using the out-of-the-box Vault offering. Vault deployed through PDE is using Banzai Cloud Bank-Vaults. Bank-Vaults is a wrapper for the official @@ -32,11 +32,11 @@ Vault is deployed as a single container in the virtual cluster, and the containe ::: -# Deploy Vault +## Deploy Vault Use the following steps to learn how to deploy Vault to your virtual cluster. -## Prerequisites +### Prerequisites - A Spectro Cloud [account](https://www.spectrocloud.com/get-started/). @@ -50,7 +50,7 @@ Use the following steps to learn how to deploy Vault to your virtual cluster.
-## Enablement +### Enablement 1. Log in to [Palette](https://console.spectrocloud.com). @@ -81,7 +81,7 @@ Use the following steps to learn how to deploy Vault to your virtual cluster. [Deploy a Virtual Cluster](../../../clusters/palette-virtual-clusters/deploy-virtual-cluster.md) guide for additional guidance or check out the [Deploy an Application using Palette Dev Engine](../../apps/deploy-app.md) tutorial. -## Validate +### Validate You can validate the Vault instance deployed successfully by using the following steps. @@ -135,7 +135,7 @@ You can validate the Vault instance deployed successfully by using the following To acquire the Vault root token, review the [Vault Credentials](#vault-credentials) section. -# Output Variables +## Output Variables The exposed output variables. Use these variables when connecting higher-level services with Vault: @@ -146,7 +146,7 @@ The exposed output variables. Use these variables when connecting higher-level s | Service Port | `{{.spectro.app.$appDeploymentName..VAULTMSTR_SVC_PORT}}` | The exposed port for the Vault service. | | Namespace | `{{.spectro.app.$appDeploymentName..VAULTMSTR_SVC_NAMESPACE}}` | The Kubernetes namespace the Vault instance is deployed to. | -# Vault Credentials +## Vault Credentials The Vault root token and the unseal keys are stored as a Kubernetes secret inside the virtual cluster. You can retrieve the Vault root token by following these steps.

@@ -188,13 +188,13 @@ the Vault root token by following these steps.

| jq -r '.data | to_entries | .[] | select(.key | startswith("vault-unseal-")) | .value | @base64d + "\n"' ``` -# Next Steps +## Next Steps You can add Vault to your application profile and start integrating Vault with your applications. To learn more about integrating Vault with your applications, check out the [Vault App Integrations](https://developer.hashicorp.com/vault/tutorials/app-integration) tutorials from HashiCorp. -# Resources +## Resources - [Vault Documentation](https://developer.hashicorp.com/vault/docs) diff --git a/docs/docs-content/enterprise-version/install-palette/install-on-vmware/vmware-system-requirements.md b/docs/docs-content/enterprise-version/install-palette/install-on-vmware/vmware-system-requirements.md index 29f049bfbf..9a198c52c5 100644 --- a/docs/docs-content/enterprise-version/install-palette/install-on-vmware/vmware-system-requirements.md +++ b/docs/docs-content/enterprise-version/install-palette/install-on-vmware/vmware-system-requirements.md @@ -36,8 +36,8 @@ Palette requires two custom roles to be created in vSphere before the installati guide if you need help creating a custom role in vSphere. The required custom roles are: - A root-level role with access to higher-level vSphere objects. This role is referred to as the _spectro root role_. - Check out the [Root-Level Role Privileges](#root-level-role-privileges) table for the list of privileges required for - the root-level role. + Check out the [Root-Level Role Privileges](#spectro-root-role-privileges) table for the list of privileges required + for the root-level role. - A role with the required privileges for deploying VMs. This role is referred to as the _Spectro role_. Review the [Spectro Role Privileges](#spectro-role-privileges) table for the list of privileges required for the Spectro role. diff --git a/docs/docs-content/glossary-all.md b/docs/docs-content/glossary-all.md index a3463d58c8..8b3a3e468d 100644 --- a/docs/docs-content/glossary-all.md +++ b/docs/docs-content/glossary-all.md @@ -282,7 +282,7 @@ which users can bring up in their environment using Palette's pack registry soft Projects provide a way for grouping clusters together for logical separation. Role-based access controls within Palette are applied at the project level. [Users](#user) and [teams](#team) can be assigned one or more [roles](#role) within a -project for granular control over [permissions](#permission) within the project scope. +project for granular control over [permissions](#permissions) within the project scope. ## Public Pack Registry @@ -299,7 +299,7 @@ pool. The process is fully automated and does not require manual intervention. ## Role -A Role is a collection of [permissions](#permission). There are two kinds of roles in Palette: _tenant roles_ and +A Role is a collection of [permissions](#permissions). There are two kinds of roles in Palette: _tenant roles_ and _project roles_. _Tenant roles_ are a collection of tenant-level permissions such as create a new user, add a new project, etc. _Project roles_ consist of permissions for various actions within the scope of a project such as create a cluster profile, create a cluster, etc. @@ -328,7 +328,7 @@ it.Initial login:admin/admin. A Team is a group of [users](#user). Users can be part of one or more teams. Teams provide a convenient way to control platform access for a group of users. [Roles](#role) assigned to a team grant associated tenant or [project](#project) -[permissions](#permission) to all users that are part of the team. +[permissions](#permissions) to all users that are part of the team. ## Tenant diff --git a/docs/docs-content/integrations/k3s.md b/docs/docs-content/integrations/k3s.md index c2928f11cb..95aefcb87c 100644 --- a/docs/docs-content/integrations/k3s.md +++ b/docs/docs-content/integrations/k3s.md @@ -40,14 +40,14 @@ depending on the cluster type. -| **Parameter** | **Description** | -| ------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `cluster.config.cluster-cidr` | Specifies the CIDR range that can be used by pods in the cluster. | -| `cluster.config.service-cidr` | Specifies the CIDR range that can be used by services in the cluster. | -| `kube-apiserver-arg` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port. | -| `kube-controller-manager-arg` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | -| `kubelet-arg` | This parameter contains extra arguments for Kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | -| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the K3s pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-custom-oidc). | +| **Parameter** | **Description** | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `cluster.config.cluster-cidr` | Specifies the CIDR range that can be used by pods in the cluster. | +| `cluster.config.service-cidr` | Specifies the CIDR range that can be used by services in the cluster. | +| `kube-apiserver-arg` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port. | +| `kube-controller-manager-arg` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | +| `kubelet-arg` | This parameter contains extra arguments for Kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | +| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the K3s pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-oidc-identity-provider-for-edge). | You can add cloud-init stages, which allow you to customize your instances declaratively. The cloud-init stages are exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open source project. For more information, @@ -177,14 +177,14 @@ depending on the cluster type. -| **Parameter** | **Description** | -| ------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `cluster.config.cluster-cidr` | Specifies the CIDR range that can be used by pods in the cluster. | -| `cluster.config.service-cidr` | Specifies the CIDR range that can be used by services in the cluster. | -| `kube-apiserver-arg` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port. | -| `kube-controller-manager-arg` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | -| `kubelet-arg` | This parameter contains extra arguments for Kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | -| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the K3s pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-custom-oidc). | +| **Parameter** | **Description** | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `cluster.config.cluster-cidr` | Specifies the CIDR range that can be used by pods in the cluster. | +| `cluster.config.service-cidr` | Specifies the CIDR range that can be used by services in the cluster. | +| `kube-apiserver-arg` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port. | +| `kube-controller-manager-arg` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | +| `kubelet-arg` | This parameter contains extra arguments for Kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | +| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the K3s pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-oidc-identity-provider-for-edge). | You can add cloud-init stages, which allow you to customize your instances declaratively. The cloud-init stages are exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open source project. For more information, @@ -316,14 +316,14 @@ depending on the cluster type. -| **Parameter** | **Description** | -| ------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `cluster.config.cluster-cidr` | Specifies the CIDR range that can be used by pods in the cluster. | -| `cluster.config.service-cidr` | Specifies the CIDR range that can be used by services in the cluster. | -| `kube-apiserver-arg` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port. | -| `kube-controller-manager-arg` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | -| `kubelet-arg` | This parameter contains extra arguments for Kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | -| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the K3s pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-custom-oidc). | +| **Parameter** | **Description** | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `cluster.config.cluster-cidr` | Specifies the CIDR range that can be used by pods in the cluster. | +| `cluster.config.service-cidr` | Specifies the CIDR range that can be used by services in the cluster. | +| `kube-apiserver-arg` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port. | +| `kube-controller-manager-arg` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | +| `kubelet-arg` | This parameter contains extra arguments for Kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | +| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the K3s pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-oidc-identity-provider-for-edge). | You can add cloud-init stages, which allow you to customize your instances declaratively. The cloud-init stages are exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open source project. For more information, @@ -454,14 +454,14 @@ depending on the cluster type. -| **Parameter** | **Description** | -| ------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `cluster.config.cluster-cidr` | Specifies the CIDR range that can be used by pods in the cluster. | -| `cluster.config.service-cidr` | Specifies the CIDR range that can be used by services in the cluster. | -| `kube-apiserver-arg` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port. | -| `kube-controller-manager-arg` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | -| `kubelet-arg` | This parameter contains extra arguments for Kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | -| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the K3s pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-custom-oidc). | +| **Parameter** | **Description** | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `cluster.config.cluster-cidr` | Specifies the CIDR range that can be used by pods in the cluster. | +| `cluster.config.service-cidr` | Specifies the CIDR range that can be used by services in the cluster. | +| `kube-apiserver-arg` | This parameter contains extra arguments for the Kubernetes API server, such as enabling audit logging, enabling certain authorization modes, and setting profiling and secure-port. | +| `kube-controller-manager-arg` | This parameter describes extra arguments for the Kubernetes Controller Manager, such as enabling certain feature gates and setting profiling. | +| `kubelet-arg` | This parameter contains extra arguments for Kubelet during node registration, such as setting feature gates, protecting kernel defaults, and disabling the read-only port. | +| `pack.palette.config.oidc.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the K3s pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](#configure-oidc-identity-provider-for-edge). | You can add cloud-init stages, which allow you to customize your instances declaratively. The cloud-init stages are exposed by [Kairos](https://kairos.io/docs/architecture/cloud-init/), an open source project. For more information, diff --git a/docs/docs-content/integrations/kubernetes.md b/docs/docs-content/integrations/kubernetes.md index 3bbaf13baf..99b3189d35 100644 --- a/docs/docs-content/integrations/kubernetes.md +++ b/docs/docs-content/integrations/kubernetes.md @@ -60,6 +60,346 @@ four months. Once we stop supporting the minor version, we initiate the deprecat + + +## Prerequisites + +- A minimum of 4 CPU and 4GB Memory. + +- Operating System (OS) dependencies as listed in the table. + +| OS Distribution | OS Version | Supports Kubernetes 1.27.x | +| --------------- | ---------- | -------------------------- | +| CentOS | 7.7 | ✅ | +| Ubuntu | 22.04 | ✅ | +| Ubuntu | 20.04 | ❌ | +| Ubuntu | 18.04 | ❌ | + +## Parameters + +| Parameter | Description | +| ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `pack.podCIDR` | The CIDR range for Pods in the cluster. This should match the networking layer property. Default: `192.168.0.0/16` | +| `pack.serviceClusterIpRange` | The CIDR range for services in the cluster. This should not overlap with any IP ranges assigned to nodes or pods. Default: `10.96.0.0/12` | +| `pack.serviceDomain` | The cluster DNS service domain. Default: `cluster.local`. To change the default, you must add this parameter to the Kubernetes YAML file at cluster creation and specify the cluster DNS service domain to use. This value cannot be changed after cluster creation is complete. Refer to the [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.27#change-cluster-dns-service-domain) section. | +| `pack.palette.config.dashboard.identityProvider` | Dynamically enabled OpenID Connect (OIDC) Identity Provider (IDP) setting based on your UI selection when you add the PXK pack to your profile. This parameter appears in the YAML file after you make a selection. Refer to [Configure OIDC Identity Provider](kubernetes.md#configure-oidc-identity-provider). | +| `kubeadmconfig.apiServer.extraArgs` | A list of additional apiServer flags you can set. | +| `kubeadmconfig.apiServer.extraVolumes` | A list of additional volumes to mount on the apiServer. | +| `kubeadmconfig.controllerManager.extraArgs` | A list of additional ControllerManager flags to set. | +| `kubeadmconfig.scheduler.extraArgs` | A list of additional Kube scheduler flags to set. | +| `kubeadmconfig.kubeletExtraArgs` | A list of kubelet arguments to set and copy to the nodes. | +| `kubeadmconfig.files` | A list of additional files to copy to the nodes. | +| `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands. | +| `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands. | +| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](#configure-custom-oidc). | + +## Usage + +The Kubeadm configuration file is where you can do the following: + +- Change the default `podCIDR` and `serviceClusterIpRange` values. CIDR IPs specified in the configuration file take + precedence over other defined CIDR IPs in your environment. + + As you build your cluster, check that the `podCIDR` value does not overlap with any hosts or with the service network + and the `serviceClusterIpRange` value does not overlap with any IP ranges assigned to nodes or pods. For more + information, refer to the [Clusters](../clusters/clusters.md) guide and + [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). + +- Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only + change the DNS domain during cluster creation. For more information, refer to + [Change Cluster DNS Service Domain](kubernetes.md?versions=k8s_v1.27#change-cluster-dns-service-domain). + +- Manually configure a third-party OpenID Connect (OIDC) Identity Provider (IDP). For more information, check out + [Configure Custom OIDC](#configure-custom-oidc). + +- Add a certificate for the Spectro Proxy pack if you want to use a reverse proxy with a Kubernetes cluster. For more + information, refer to the [Spectro Proxy](frp.md) guide. + +### Change Cluster DNS Service Domain + +The `pack.serviceDomain` parameter with default value `cluster.local` is not visible in the Kubernetes YAML file, and +its value can only be changed at cluster creation. To change the value, you must add `serviceDomain: "cluster.local"` to +the Kubernetes YAML file when you create a cluster, and specify the service domain you want to use. + +```yaml hideClipboard +pack: + k8sHardening: True + podCIDR: "172.16.0.0/16" + serviceClusterIPRange: "10.96.0.0/12" + serviceDomain: "" +``` + +:::warning + +You can only specify the service domain at cluster creation. After cluster creation completes, you cannot update the +value. Attempting to update it results in the error `serviceDomain update is forbidden for existing cluster`. + +::: + +For more information about networking configuration with DNS domains, refer to the Kubernetes +[Networking](https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-Networking) +API documentation. + +### Configuration Changes + +The PXK Kubeadm configuration is updated to dynamically enable OIDC based on your IDP selection by adding the +`identityProvider` parameter. + +```yaml +palette: + config: + dashboard: + identityProvider: +``` + +### Example Kubeadm Configuration File + +```yaml hideClipboard +pack: + k8sHardening: True + podCIDR: "192.168.0.0/16" + serviceClusterIpRange: "10.96.0.0/12" + palette: + config: + dashboard: + identityProvider: palette +kubeadmconfig: + apiServer: + extraArgs: + secure-port: "6443" + anonymous-auth: "true" + profiling: "false" + disable-admission-plugins: "AlwaysAdmit" + default-not-ready-toleration-seconds: "60" + default-unreachable-toleration-seconds: "60" + enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity" + admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml" + audit-log-path: /var/log/apiserver/audit.log + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "100" + authorization-mode: RBAC,Node + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extraVolumes: + - name: audit-log + hostPath: /var/log/apiserver + mountPath: /var/log/apiserver + pathType: DirectoryOrCreate + - name: audit-policy + hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + readOnly: true + pathType: File + - name: pod-security-standard + hostPath: /etc/kubernetes/pod-security-standard.yaml + mountPath: /etc/kubernetes/pod-security-standard.yaml + readOnly: true + pathType: File + controllerManager: + extraArgs: + profiling: "false" + terminated-pod-gc-threshold: "25" + pod-eviction-timeout: "1m0s" + use-service-account-credentials: "true" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extraArgs: + profiling: "false" + kubeletExtraArgs: + read-only-port: "0" + event-qps: "0" + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + files: + - path: hardening/audit-policy.yaml + targetPath: /etc/kubernetes/audit-policy.yaml + targetOwner: "root:root" + targetPermissions: "0600" + - path: hardening/90-kubelet.conf + targetPath: /etc/sysctl.d/90-kubelet.conf + targetOwner: "root:root" + targetPermissions: "0600" + - targetPath: /etc/kubernetes/pod-security-standard.yaml + targetOwner: "root:root" + targetPermissions: "0600" + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: AdmissionConfiguration + plugins: + - name: PodSecurity + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1 + kind: PodSecurityConfiguration + defaults: + enforce: "baseline" + enforce-version: "v1.26" + audit: "baseline" + audit-version: "v1.26" + warn: "restricted" + warn-version: "v1.26" + audit: "restricted" + audit-version: "v1.26" + exemptions: + # Array of authenticated usernames to exempt. + usernames: [] + # Array of runtime class names to exempt. + runtimeClasses: [] + # Array of namespaces to exempt. + namespaces: [kube-system] + + preKubeadmCommands: + - 'echo "====> Applying kernel parameters for Kubelet"' + - 'sysctl -p /etc/sysctl.d/90-kubelet.conf' + postKubeadmCommands: + - 'echo "List of post kubeadm commands to be executed"' + + # Client configuration to add OIDC based authentication flags in kubeconfig + #clientConfig: + #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}" + #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}" + #oidc-client-secret: yourSecretClientSecretGoesHere + #oidc-extra-scope: profile,email +``` + +### Configure OIDC Identity Provider + +Platforms that use PXK can use the OIDC IDP feature, which offers the convenience of managing OIDC at the Kubernetes +layer. The OIDC IDP feature is particularly useful for environments that do not have their own IDP configured. In this +scenario, you can leverage Palette as an IDP without having to configure a third-party IDP. We also support the ability +to take advantage of other OIDC providers by making it possible for you to configure OIDC at the tenant level. For +additional flexibility, if you wish to use a different IDP than the one configured at the tenant level, you can select a +different IDP by adding the OIDC configuration to your cluster profile. + +When you add the PXK pack to a cluster profile, Palette displays the OIDC IDP options listed below. + +All the options require you to map a set of users or groups to a Kubernetes RBAC role. To learn how to map a Kubernetes +role to users and groups, refer to +[Create Role Bindings](/clusters/cluster-management/cluster-rbac#create-role-bindings). You can also configure OIDC for +virtual clusters. For guidance, refer to [Configure OIDC for a Virtual Cluster](./oidc-eks.md). + +- **None**: This setting does not require OIDC configuration for the cluster. It displays in the YAML file as `noauth`. + + :::warning + + We do not recommend choosing **None** in a production environment, as it may disable authentication for add-ons that + rely on OIDC. + + ::: + +- **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to + specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in + [Configure Custom OIDC](#configure-custom-oidc). This setting displays in the YAML file as `none`. + +- **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper + permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting + displays in the YAML file as `palette`. + +- **Inherit from Tenant**: This setting allows you to apply RBAC to multiple clusters and requires you to configure + OpenID Connect (OIDC) in **Tenant Settings**. In Tenant Admin scope, navigate to **Tenant Settings** > **SSO**, choose + **OIDC**, and provide your third-party IDP details. This setting displays in the YAML file as `tenant`. For more + information, check out the [SSO Setup](../user-management/saml-sso/saml-sso.md) guide. + + :::info + + If your IDP uses Security Assertion Markup Language (SAML) authentication, then the **Inherit from Tenant** option + will not work, and you will need to use the **Custom** option instead. This is because Kubernetes supports only OIDC + authentication and not SAML authentication. + + ::: + +### Configure Custom OIDC + +The custom method to configure OIDC and apply RBAC for an OIDC provider can be used for all cloud services except Amazon +Elastic Kubernetes Service (EKS) and +[Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory). + + + + + +Follow these steps to configure a third-party OIDC IDP. You can apply these steps to all the public cloud providers +except Azure AKS and Amazon EKS clusters. Azure AKS and Amazon EKS require different configurations. AKS requires you to +use Azure Active Directory (AAD) to enable OIDC integration. Refer to +[Azure-AKS](../clusters/public-cloud/azure/aks.md#configure-an-azure-active-directory) to learn more. Click the **Amazon +EKS** tab for steps to configure OIDC for EKS clusters. + +1. Add the following parameters to your Kubernetes YAML file when creating a cluster profile. + +```yaml +kubeadmconfig: + apiServer: + extraArgs: + oidc-issuer-url: "provider URL" + oidc-client-id: "client-id" + oidc-groups-claim: "groups" + oidc-username-claim: "email" +``` + +2. Under the `clientConfig` parameter section of Kubernetes YAML file, uncomment the `oidc-` configuration lines. + +```yaml +kubeadmconfig: + clientConfig: + oidc-issuer-url: "" + oidc-client-id: "" + oidc-client-secret: "" + oidc-extra-scope: profile,email,openid +``` + + + + + +Follow these steps to configure OIDC for managed EKS clusters. + +1. In the Kubernetes pack, uncomment the lines in the `oidcIdentityProvider` parameter section of the Kubernetes pack, + and enter your third-party provider details. + +```yaml +oidcIdentityProvider: + identityProviderConfigName: "Spectro-docs" + issuerUrl: "issuer-url" + clientId: "user-client-id-from-Palette" + usernameClaim: "email" + usernamePrefix: "-" + groupsClaim: "groups" + groupsPrefix: "" + requiredClaims: +``` + +2. Under the `clientConfig` parameter section of Kubernetes pack, uncomment the `oidc-` configuration lines. + +```yaml +clientConfig: + oidc-issuer-url: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.issuerUrl }}" + oidc-client-id: "{{ .spectro.pack.kubernetes-eks.managedControlPlane.oidcIdentityProvider.clientId }}" + oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv + oidc-extra-scope: profile,email +``` + +3. Provide third-party OIDC IDP details. + + + + + +### Use RBAC with OIDC + +You can create a role binding that uses individual users as the subject or specify a group name as the subject to map +many users to a role. The group name is the group assigned in the OIDC provider's configuration. Below is an example. To +learn more, review [Create Role Bindings](../clusters/cluster-management/cluster-rbac.md#create-role-bindings). + +Assume you created a group named `dev-east-2` within an OIDC provider. If you configure the host cluster's Kubernetes +pack with all the correct OIDC settings, you could then create a role binding for the `dev-east-2` group. + +In this example, Palette is used as the IDP, and all users in the `dev-east-2` would inherit the `cluster-admin` role. + +![A subject of the type group is assigned as the subject in a RoleBinding](/clusters_cluster-management_cluster-rbac_cluster-subject-group.png) + + + ## Prerequisites @@ -295,7 +635,7 @@ role to users and groups, refer to - **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in - [Configure Custom OIDC](#configurecustomoidc). This setting displays in the YAML file as `none`. + [Configure Custom OIDC](#configure-custom-oidc). This setting displays in the YAML file as `none`. - **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting @@ -440,7 +780,7 @@ In this example, Palette is used as the IDP, and all users in the `dev-east-2` w | `kubeadmconfig.files` | A list of additional files to copy to the nodes. | | `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands. | | `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands. | -| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](#configurecustomoidc). | +| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](#configure-custom-oidc). | ## Usage @@ -455,7 +795,7 @@ The Kubeadm configuration file is where you can do the following: [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). - Manually configure a third-party OpenID Connect (OIDC) Identity Provider (IDP). For more information, check out - [Configure Custom OIDC](#configurecustomoidc). + [Configure Custom OIDC](#configure-custom-oidc). - Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to @@ -625,7 +965,7 @@ role to users and groups, refer to - **Custom**: This is the default setting and does not require OIDC configuration. However, if desired, it allows you to specify a third-party OIDC provider by configuring OIDC statements in the YAML file as described in - [Configure Custom OIDC](#configurecustomoidc). This setting displays in the YAML file as `none`. + [Configure Custom OIDC](#configure-custom-oidc). This setting displays in the YAML file as `none`. - **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting @@ -770,7 +1110,7 @@ In this example, Palette is used as the IDP, and all users in the `dev-east-2` w | `kubeadmconfig.files` | A list of additional files to copy to the nodes. | | `kubeadmconfig.preKubeadmCommands` | A list of additional commands to invoke **before** running kubeadm commands. | | `kubeadmconfig.postKubeadmCommands` | A list of additional commands to invoke **after** running kubeadm commands. | -| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](#configurecustomoidc). | +| `kubeadmconfig.clientConfig` | Settings to manually configure OIDC-based authentication when you choose a third-party (Custom) IDP. Refer to [Configure Custom OIDC](#configure-custom-oidc). | ## Usage @@ -785,7 +1125,7 @@ The Kubeadm configuration file is where you can do the following: [Cluster Deployment Errors](../troubleshooting/cluster-deployment.md). - Manually configure a third-party OpenID Connect (OIDC) Identity Provider (IDP). For more information, check out - [Configure Custom OIDC](#configurecustomoidc). + [Configure Custom OIDC](#configure-custom-oidc). - Change the default cluster DNS service domain from `cluster.local` to a DNS domain that you specify. You can only change the DNS domain during cluster creation. For more information, refer to @@ -974,8 +1314,8 @@ role to users and groups, refer to
- **Custom**: This setting allows you to specify a third-party OIDC provider by configuring OIDC statements in the - Kubeadm configuration file as described in [Configure Custom OIDC](#configurecustomoidc). This setting displays in the - YAML file as `none`. + Kubeadm configuration file as described in [Configure Custom OIDC](#configure-custom-oidc). This setting displays in + the YAML file as `none`. - **Palette**: This setting makes Palette the IDP. Any user with a Palette account in the tenant and the proper permissions to view and access the project's resources is able to log into the Kubernetes dashboard. This setting diff --git a/docs/docs-content/profiles/cluster-profiles/modify-cluster-profiles/update-cluster-profile.md b/docs/docs-content/profiles/cluster-profiles/modify-cluster-profiles/update-cluster-profile.md index 3601f93899..a13840724e 100644 --- a/docs/docs-content/profiles/cluster-profiles/modify-cluster-profiles/update-cluster-profile.md +++ b/docs/docs-content/profiles/cluster-profiles/modify-cluster-profiles/update-cluster-profile.md @@ -58,7 +58,7 @@ profiles, check out [Version a Cluster Profile](version-cluster-profile.md). 3. Check that profile details display your changes. -To learn how to apply the changes, review [Apply Profile Updates to Clusters](#apply-updates-to-clusters). +To learn how to apply the changes, review [Apply Profile Updates to Clusters](#apply-profile-updates-to-clusters). ## Update a Profile Layer @@ -110,7 +110,7 @@ To learn how to apply the changes, review [Apply Profile Updates to Clusters](#a 5. If you added, removed, or modified a manifest, click the layer in the stack that you updated and verify the manifest changes. -To learn how to apply the changes, review [Apply Profile Updates to Clusters](#apply-updates-to-clusters). +To learn how to apply the changes, review [Apply Profile Updates to Clusters](#apply-profile-updates-to-clusters). ## Update the Pack Version @@ -183,7 +183,7 @@ Ensure you follow these practices when updating to a new pack version. 10. Confirm your updates. -To learn how to apply the changes, review [Apply Profile Updates to Clusters](#apply-updates-to-clusters). +To learn how to apply the changes, review [Apply Profile Updates to Clusters](#apply-profile-updates-to-clusters). ### Validate diff --git a/docs/docs-content/registries-and-packs/adding-a-custom-registry.md b/docs/docs-content/registries-and-packs/adding-a-custom-registry.md index cc2d701a7e..0eb446581d 100644 --- a/docs/docs-content/registries-and-packs/adding-a-custom-registry.md +++ b/docs/docs-content/registries-and-packs/adding-a-custom-registry.md @@ -7,8 +7,6 @@ hide_table_of_contents: false sidebar_position: 0 --- -# Add Custom Registries - Setting up a custom pack registry is a two-step process. The first step is to deploy a pack registry server using a Docker image provided by us. While deploying a pack registry server, you can employ a TLS certificate from a Certificate Authority (CA) or a self-signed certificate. The current guide will provide instructions for both methods - using TLS @@ -289,7 +287,7 @@ to affirm the certificate's authenticity before establishing a communication cha 7. Click the **Confirm** button to finish configuring the pack registry server. After you finish the configuration, Palette will periodically synchronize with the pack registry server to download pack updates, if any. -# Self-Signed Certificates +## Self-Signed Certificates For self-signed certificates, use the following command to generate certificates. diff --git a/package-lock.json b/package-lock.json index 1381c268e4..02644dca00 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,10 +10,10 @@ "dependencies": { "@commitlint/cli": "^17.6.7", "@commitlint/config-conventional": "^17.6.7", - "@docusaurus/core": "^3.1.0", - "@docusaurus/plugin-client-redirects": "^3.1.0", - "@docusaurus/plugin-ideal-image": "^3.1.0", - "@docusaurus/preset-classic": "^3.1.0", + "@docusaurus/core": "^3.1.1", + "@docusaurus/plugin-client-redirects": "^3.1.1", + "@docusaurus/plugin-ideal-image": "^3.1.1", + "@docusaurus/preset-classic": "^3.1.1", "@docusaurus/types": "^3.0.1", "@fortawesome/fontawesome-svg-core": "^6.4.0", "@fortawesome/free-solid-svg-icons": "^6.4.0", @@ -88,7 +88,8 @@ }, "node_modules/@algolia/autocomplete-core": { "version": "1.9.3", - "license": "MIT", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-core/-/autocomplete-core-1.9.3.tgz", + "integrity": "sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==", "dependencies": { "@algolia/autocomplete-plugin-algolia-insights": "1.9.3", "@algolia/autocomplete-shared": "1.9.3" @@ -96,7 +97,8 @@ }, "node_modules/@algolia/autocomplete-plugin-algolia-insights": { "version": "1.9.3", - "license": "MIT", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-plugin-algolia-insights/-/autocomplete-plugin-algolia-insights-1.9.3.tgz", + "integrity": "sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==", "dependencies": { "@algolia/autocomplete-shared": "1.9.3" }, @@ -106,7 +108,8 @@ }, "node_modules/@algolia/autocomplete-preset-algolia": { "version": "1.9.3", - "license": "MIT", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-preset-algolia/-/autocomplete-preset-algolia-1.9.3.tgz", + "integrity": "sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==", "dependencies": { "@algolia/autocomplete-shared": "1.9.3" }, @@ -117,115 +120,131 @@ }, "node_modules/@algolia/autocomplete-shared": { "version": "1.9.3", - "license": "MIT", + "resolved": "https://registry.npmjs.org/@algolia/autocomplete-shared/-/autocomplete-shared-1.9.3.tgz", + "integrity": "sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==", "peerDependencies": { "@algolia/client-search": ">= 4.9.1 < 6", "algoliasearch": ">= 4.9.1 < 6" } }, "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.22.1.tgz", + "integrity": "sha512-Sw6IAmOCvvP6QNgY9j+Hv09mvkvEIDKjYW8ow0UDDAxSXy664RBNQk3i/0nt7gvceOJ6jGmOTimaZoY1THmU7g==", "dependencies": { - "@algolia/cache-common": "4.22.0" + "@algolia/cache-common": "4.22.1" } }, "node_modules/@algolia/cache-common": { - "version": "4.22.0", - "license": "MIT" + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.22.1.tgz", + "integrity": "sha512-TJMBKqZNKYB9TptRRjSUtevJeQVXRmg6rk9qgFKWvOy8jhCPdyNZV1nB3SKGufzvTVbomAukFR8guu/8NRKBTA==" }, "node_modules/@algolia/cache-in-memory": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.22.1.tgz", + "integrity": "sha512-ve+6Ac2LhwpufuWavM/aHjLoNz/Z/sYSgNIXsinGofWOysPilQZPUetqLj8vbvi+DHZZaYSEP9H5SRVXnpsNNw==", "dependencies": { - "@algolia/cache-common": "4.22.0" + "@algolia/cache-common": "4.22.1" } }, "node_modules/@algolia/client-account": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.22.1.tgz", + "integrity": "sha512-k8m+oegM2zlns/TwZyi4YgCtyToackkOpE+xCaKCYfBfDtdGOaVZCM5YvGPtK+HGaJMIN/DoTL8asbM3NzHonw==", "dependencies": { - "@algolia/client-common": "4.22.0", - "@algolia/client-search": "4.22.0", - "@algolia/transporter": "4.22.0" + "@algolia/client-common": "4.22.1", + "@algolia/client-search": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/client-analytics": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.22.1.tgz", + "integrity": "sha512-1ssi9pyxyQNN4a7Ji9R50nSdISIumMFDwKNuwZipB6TkauJ8J7ha/uO60sPJFqQyqvvI+px7RSNRQT3Zrvzieg==", "dependencies": { - "@algolia/client-common": "4.22.0", - "@algolia/client-search": "4.22.0", - "@algolia/requester-common": "4.22.0", - "@algolia/transporter": "4.22.0" + "@algolia/client-common": "4.22.1", + "@algolia/client-search": "4.22.1", + "@algolia/requester-common": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/client-common": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.22.1.tgz", + "integrity": "sha512-IvaL5v9mZtm4k4QHbBGDmU3wa/mKokmqNBqPj0K7lcR8ZDKzUorhcGp/u8PkPC/e0zoHSTvRh7TRkGX3Lm7iOQ==", "dependencies": { - "@algolia/requester-common": "4.22.0", - "@algolia/transporter": "4.22.0" + "@algolia/requester-common": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/client-personalization": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.22.1.tgz", + "integrity": "sha512-sl+/klQJ93+4yaqZ7ezOttMQ/nczly/3GmgZXJ1xmoewP5jmdP/X/nV5U7EHHH3hCUEHeN7X1nsIhGPVt9E1cQ==", "dependencies": { - "@algolia/client-common": "4.22.0", - "@algolia/requester-common": "4.22.0", - "@algolia/transporter": "4.22.0" + "@algolia/client-common": "4.22.1", + "@algolia/requester-common": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/client-search": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.22.1.tgz", + "integrity": "sha512-yb05NA4tNaOgx3+rOxAmFztgMTtGBi97X7PC3jyNeGiwkAjOZc2QrdZBYyIdcDLoI09N0gjtpClcackoTN0gPA==", "dependencies": { - "@algolia/client-common": "4.22.0", - "@algolia/requester-common": "4.22.0", - "@algolia/transporter": "4.22.0" + "@algolia/client-common": "4.22.1", + "@algolia/requester-common": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/events": { "version": "4.0.1", - "license": "MIT" + "resolved": "https://registry.npmjs.org/@algolia/events/-/events-4.0.1.tgz", + "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" }, "node_modules/@algolia/logger-common": { - "version": "4.22.0", - "license": "MIT" + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.22.1.tgz", + "integrity": "sha512-OnTFymd2odHSO39r4DSWRFETkBufnY2iGUZNrMXpIhF5cmFE8pGoINNPzwg02QLBlGSaLqdKy0bM8S0GyqPLBg==" }, "node_modules/@algolia/logger-console": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.22.1.tgz", + "integrity": "sha512-O99rcqpVPKN1RlpgD6H3khUWylU24OXlzkavUAMy6QZd1776QAcauE3oP8CmD43nbaTjBexZj2nGsBH9Tc0FVA==", "dependencies": { - "@algolia/logger-common": "4.22.0" + "@algolia/logger-common": "4.22.1" } }, "node_modules/@algolia/requester-browser-xhr": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.22.1.tgz", + "integrity": "sha512-dtQGYIg6MteqT1Uay3J/0NDqD+UciHy3QgRbk7bNddOJu+p3hzjTRYESqEnoX/DpEkaNYdRHUKNylsqMpgwaEw==", "dependencies": { - "@algolia/requester-common": "4.22.0" + "@algolia/requester-common": "4.22.1" } }, "node_modules/@algolia/requester-common": { - "version": "4.22.0", - "license": "MIT" + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.22.1.tgz", + "integrity": "sha512-dgvhSAtg2MJnR+BxrIFqlLtkLlVVhas9HgYKMk2Uxiy5m6/8HZBL40JVAMb2LovoPFs9I/EWIoFVjOrFwzn5Qg==" }, "node_modules/@algolia/requester-node-http": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.22.1.tgz", + "integrity": "sha512-JfmZ3MVFQkAU+zug8H3s8rZ6h0ahHZL/SpMaSasTCGYR5EEJsCc8SI5UZ6raPN2tjxa5bxS13BRpGSBUens7EA==", "dependencies": { - "@algolia/requester-common": "4.22.0" + "@algolia/requester-common": "4.22.1" } }, "node_modules/@algolia/transporter": { - "version": "4.22.0", - "license": "MIT", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.22.1.tgz", + "integrity": "sha512-kzWgc2c9IdxMa3YqA6TN0NW5VrKYYW/BELIn7vnLyn+U/RFdZ4lxxt9/8yq3DKV5snvoDzzO4ClyejZRdV3lMQ==", "dependencies": { - "@algolia/cache-common": "4.22.0", - "@algolia/logger-common": "4.22.0", - "@algolia/requester-common": "4.22.0" + "@algolia/cache-common": "4.22.1", + "@algolia/logger-common": "4.22.1", + "@algolia/requester-common": "4.22.1" } }, "node_modules/@ampproject/remapping": { @@ -2408,11 +2427,13 @@ }, "node_modules/@docsearch/css": { "version": "3.5.2", - "license": "MIT" + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.5.2.tgz", + "integrity": "sha512-SPiDHaWKQZpwR2siD0KQUwlStvIAnEyK6tAE2h2Wuoq8ue9skzhlyVQ1ddzOxX6khULnAALDiR/isSF3bnuciA==" }, "node_modules/@docsearch/react": { "version": "3.5.2", - "license": "MIT", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.5.2.tgz", + "integrity": "sha512-9Ahcrs5z2jq/DcAvYtvlqEBHImbm4YJI8M9y0x6Tqg598P40HTEkX7hsMcIuThI+hTFxRGZ9hll0Wygm2yEjng==", "dependencies": { "@algolia/autocomplete-core": "1.9.3", "@algolia/autocomplete-preset-algolia": "1.9.3", @@ -2441,8 +2462,9 @@ } }, "node_modules/@docusaurus/core": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.1.1.tgz", + "integrity": "sha512-2nQfKFcf+MLEM7JXsXwQxPOmQAR6ytKMZVSx7tVi9HEm9WtfwBH1fp6bn8Gj4zLUhjWKCLoysQ9/Wm+EZCQ4yQ==", "dependencies": { "@babel/core": "^7.23.3", "@babel/generator": "^7.23.3", @@ -2454,13 +2476,13 @@ "@babel/runtime": "^7.22.6", "@babel/runtime-corejs3": "^7.22.6", "@babel/traverse": "^7.22.8", - "@docusaurus/cssnano-preset": "3.1.0", - "@docusaurus/logger": "3.1.0", - "@docusaurus/mdx-loader": "3.1.0", + "@docusaurus/cssnano-preset": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-common": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@slorber/static-site-generator-webpack-plugin": "^4.0.7", "@svgr/webpack": "^6.5.1", "autoprefixer": "^10.4.14", @@ -2611,8 +2633,9 @@ "license": "ISC" }, "node_modules/@docusaurus/cssnano-preset": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.1.1.tgz", + "integrity": "sha512-LnoIDjJWbirdbVZDMq+4hwmrTl2yHDnBf9MLG9qyExeAE3ac35s4yUhJI8yyTCdixzNfKit4cbXblzzqMu4+8g==", "dependencies": { "cssnano-preset-advanced": "^5.3.10", "postcss": "^8.4.26", @@ -2624,8 +2647,9 @@ } }, "node_modules/@docusaurus/logger": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.1.1.tgz", + "integrity": "sha512-BjkNDpQzewcTnST8trx4idSoAla6zZ3w22NqM/UMcFtvYJgmoE4layuTzlfql3VFPNuivvj7BOExa/+21y4X2Q==", "dependencies": { "chalk": "^4.1.2", "tslib": "^2.6.0" @@ -2636,7 +2660,8 @@ }, "node_modules/@docusaurus/logger/node_modules/ansi-styles": { "version": "4.3.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dependencies": { "color-convert": "^2.0.1" }, @@ -2649,7 +2674,8 @@ }, "node_modules/@docusaurus/logger/node_modules/chalk": { "version": "4.1.2", - "license": "MIT", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -2663,7 +2689,8 @@ }, "node_modules/@docusaurus/logger/node_modules/color-convert": { "version": "2.0.1", - "license": "MIT", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "dependencies": { "color-name": "~1.1.4" }, @@ -2673,18 +2700,21 @@ }, "node_modules/@docusaurus/logger/node_modules/color-name": { "version": "1.1.4", - "license": "MIT" + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, "node_modules/@docusaurus/logger/node_modules/has-flag": { "version": "4.0.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "engines": { "node": ">=8" } }, "node_modules/@docusaurus/logger/node_modules/supports-color": { "version": "7.2.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dependencies": { "has-flag": "^4.0.0" }, @@ -2693,10 +2723,11 @@ } }, "node_modules/@docusaurus/lqip-loader": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/lqip-loader/-/lqip-loader-3.1.1.tgz", + "integrity": "sha512-s06lySAX5ghCiQe0+/GaMWcVvgkBQ6U8p182fW+JbdjxABS8ecx2in2AQJbvrwKNgiMjOhsXiaE6BmbQAmT6nw==", "dependencies": { - "@docusaurus/logger": "3.1.0", + "@docusaurus/logger": "3.1.1", "file-loader": "^6.2.0", "lodash": "^4.17.21", "sharp": "^0.32.3", @@ -2707,14 +2738,15 @@ } }, "node_modules/@docusaurus/mdx-loader": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.1.1.tgz", + "integrity": "sha512-xN2IccH9+sv7TmxwsDJNS97BHdmlqWwho+kIVY4tcCXkp+k4QuzvWBeunIMzeayY4Fu13A6sAjHGv5qm72KyGA==", "dependencies": { "@babel/parser": "^7.22.7", "@babel/traverse": "^7.22.8", - "@docusaurus/logger": "3.1.0", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "@docusaurus/logger": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@mdx-js/mdx": "^3.0.0", "@slorber/remark-comment": "^1.0.0", "escape-html": "^1.0.3", @@ -2746,11 +2778,12 @@ } }, "node_modules/@docusaurus/module-type-aliases": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.1.1.tgz", + "integrity": "sha512-xBJyx0TMfAfVZ9ZeIOb1awdXgR4YJMocIEzTps91rq+hJDFJgJaylDtmoRhUxkwuYmNK1GJpW95b7DLztSBJ3A==", "dependencies": { "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "3.1.0", + "@docusaurus/types": "3.1.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2764,14 +2797,15 @@ } }, "node_modules/@docusaurus/plugin-client-redirects": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/logger": "3.1.0", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-common": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-client-redirects/-/plugin-client-redirects-3.1.1.tgz", + "integrity": "sha512-J/1Z75XkO+BmUXHW17FrCIYZQ3b0IKaJECH6yCxW5RQ8NMMJ+SZCtPtx5oYoAd0VHersNiUu+ZAxfOqbsn1jKQ==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "eta": "^2.2.0", "fs-extra": "^11.1.1", "lodash": "^4.17.21", @@ -2786,16 +2820,17 @@ } }, "node_modules/@docusaurus/plugin-content-blog": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/logger": "3.1.0", - "@docusaurus/mdx-loader": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-common": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.1.1.tgz", + "integrity": "sha512-ew/3VtVoG3emoAKmoZl7oKe1zdFOsI0NbcHS26kIxt2Z8vcXKCUgK9jJJrz0TbOipyETPhqwq4nbitrY3baibg==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^11.1.1", @@ -2816,16 +2851,17 @@ } }, "node_modules/@docusaurus/plugin-content-docs": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/logger": "3.1.0", - "@docusaurus/mdx-loader": "3.1.0", - "@docusaurus/module-type-aliases": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.1.1.tgz", + "integrity": "sha512-lhFq4E874zw0UOH7ujzxnCayOyAt0f9YPVYSb9ohxrdCM8B4szxitUw9rIX4V9JLLHVoqIJb6k+lJJ1jrcGJ0A==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/module-type-aliases": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@types/react-router-config": "^5.0.7", "combine-promises": "^1.1.0", "fs-extra": "^11.1.1", @@ -2844,14 +2880,15 @@ } }, "node_modules/@docusaurus/plugin-content-pages": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/mdx-loader": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.1.1.tgz", + "integrity": "sha512-NQHncNRAJbyLtgTim9GlEnNYsFhuCxaCNkMwikuxLTiGIPH7r/jpb7O3f3jUMYMebZZZrDq5S7om9a6rvB/YCA==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "fs-extra": "^11.1.1", "tslib": "^2.6.0", "webpack": "^5.88.1" @@ -2865,12 +2902,13 @@ } }, "node_modules/@docusaurus/plugin-debug": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.1.1.tgz", + "integrity": "sha512-xWeMkueM9wE/8LVvl4+Qf1WqwXmreMjI5Kgr7GYCDoJ8zu4kD+KaMhrh7py7MNM38IFvU1RfrGKacCEe2DRRfQ==", "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils": "3.1.0", + "@docusaurus/core": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", "fs-extra": "^11.1.1", "react-json-view-lite": "^1.2.0", "tslib": "^2.6.0" @@ -2884,12 +2922,13 @@ } }, "node_modules/@docusaurus/plugin-google-analytics": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.1.1.tgz", + "integrity": "sha512-+q2UpWTqVi8GdlLoSlD5bS/YpxW+QMoBwrPrUH/NpvpuOi0Of7MTotsQf9JWd3hymZxl2uu1o3PIrbpxfeDFDQ==", "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "@docusaurus/core": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "tslib": "^2.6.0" }, "engines": { @@ -2901,12 +2940,13 @@ } }, "node_modules/@docusaurus/plugin-google-gtag": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.1.1.tgz", + "integrity": "sha512-0mMPiBBlQ5LFHTtjxuvt/6yzh8v7OxLi3CbeEsxXZpUzcKO/GC7UA1VOWUoBeQzQL508J12HTAlR3IBU9OofSw==", "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "@docusaurus/core": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@types/gtag.js": "^0.0.12", "tslib": "^2.6.0" }, @@ -2919,12 +2959,13 @@ } }, "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.1.1.tgz", + "integrity": "sha512-d07bsrMLdDIryDtY17DgqYUbjkswZQr8cLWl4tzXrt5OR/T/zxC1SYKajzB3fd87zTu5W5klV5GmUwcNSMXQXA==", "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "@docusaurus/core": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "tslib": "^2.6.0" }, "engines": { @@ -2936,15 +2977,16 @@ } }, "node_modules/@docusaurus/plugin-ideal-image": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-ideal-image/-/plugin-ideal-image-3.1.1.tgz", + "integrity": "sha512-FYce5eV5/fWO4qIG8sKYdK3MTwusdxQML/M62IiltUNM/cqFkDrty1d+H+/I2PYX1s7AOoL3YomdJNP4vra/Tg==", "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/lqip-loader": "3.1.0", + "@docusaurus/core": "3.1.1", + "@docusaurus/lqip-loader": "3.1.1", "@docusaurus/responsive-loader": "^1.7.0", - "@docusaurus/theme-translations": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "@docusaurus/theme-translations": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@slorber/react-ideal-image": "^0.0.12", "react-waypoint": "^10.3.0", "sharp": "^0.32.3", @@ -2966,15 +3008,16 @@ } }, "node_modules/@docusaurus/plugin-sitemap": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/logger": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-common": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.1.1.tgz", + "integrity": "sha512-iJ4hCaMmDaUqRv131XJdt/C/jJQx8UreDWTRqZKtNydvZVh/o4yXGRRFOplea1D9b/zpwL1Y+ZDwX7xMhIOTmg==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "fs-extra": "^11.1.1", "sitemap": "^7.1.1", "tslib": "^2.6.0" @@ -2988,22 +3031,23 @@ } }, "node_modules/@docusaurus/preset-classic": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/plugin-content-blog": "3.1.0", - "@docusaurus/plugin-content-docs": "3.1.0", - "@docusaurus/plugin-content-pages": "3.1.0", - "@docusaurus/plugin-debug": "3.1.0", - "@docusaurus/plugin-google-analytics": "3.1.0", - "@docusaurus/plugin-google-gtag": "3.1.0", - "@docusaurus/plugin-google-tag-manager": "3.1.0", - "@docusaurus/plugin-sitemap": "3.1.0", - "@docusaurus/theme-classic": "3.1.0", - "@docusaurus/theme-common": "3.1.0", - "@docusaurus/theme-search-algolia": "3.1.0", - "@docusaurus/types": "3.1.0" + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.1.1.tgz", + "integrity": "sha512-jG4ys/hWYf69iaN/xOmF+3kjs4Nnz1Ay3CjFLDtYa8KdxbmUhArA9HmP26ru5N0wbVWhY+6kmpYhTJpez5wTyg==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/plugin-content-blog": "3.1.1", + "@docusaurus/plugin-content-docs": "3.1.1", + "@docusaurus/plugin-content-pages": "3.1.1", + "@docusaurus/plugin-debug": "3.1.1", + "@docusaurus/plugin-google-analytics": "3.1.1", + "@docusaurus/plugin-google-gtag": "3.1.1", + "@docusaurus/plugin-google-tag-manager": "3.1.1", + "@docusaurus/plugin-sitemap": "3.1.1", + "@docusaurus/theme-classic": "3.1.1", + "@docusaurus/theme-common": "3.1.1", + "@docusaurus/theme-search-algolia": "3.1.1", + "@docusaurus/types": "3.1.1" }, "engines": { "node": ">=18.0" @@ -3047,21 +3091,22 @@ } }, "node_modules/@docusaurus/theme-classic": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "@docusaurus/core": "3.1.0", - "@docusaurus/mdx-loader": "3.1.0", - "@docusaurus/module-type-aliases": "3.1.0", - "@docusaurus/plugin-content-blog": "3.1.0", - "@docusaurus/plugin-content-docs": "3.1.0", - "@docusaurus/plugin-content-pages": "3.1.0", - "@docusaurus/theme-common": "3.1.0", - "@docusaurus/theme-translations": "3.1.0", - "@docusaurus/types": "3.1.0", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-common": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.1.1.tgz", + "integrity": "sha512-GiPE/jbWM8Qv1A14lk6s9fhc0LhPEQ00eIczRO4QL2nAQJZXkjPG6zaVx+1cZxPFWbAsqSjKe2lqkwF3fGkQ7Q==", + "dependencies": { + "@docusaurus/core": "3.1.1", + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/module-type-aliases": "3.1.1", + "@docusaurus/plugin-content-blog": "3.1.1", + "@docusaurus/plugin-content-docs": "3.1.1", + "@docusaurus/plugin-content-pages": "3.1.1", + "@docusaurus/theme-common": "3.1.1", + "@docusaurus/theme-translations": "3.1.1", + "@docusaurus/types": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "copy-text-to-clipboard": "^3.2.0", @@ -3092,16 +3137,17 @@ } }, "node_modules/@docusaurus/theme-common": { - "version": "3.1.0", - "license": "MIT", - "dependencies": { - "@docusaurus/mdx-loader": "3.1.0", - "@docusaurus/module-type-aliases": "3.1.0", - "@docusaurus/plugin-content-blog": "3.1.0", - "@docusaurus/plugin-content-docs": "3.1.0", - "@docusaurus/plugin-content-pages": "3.1.0", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-common": "3.1.0", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.1.1.tgz", + "integrity": "sha512-38urZfeMhN70YaXkwIGXmcUcv2CEYK/2l4b05GkJPrbEbgpsIZM3Xc+Js2ehBGGZmfZq8GjjQ5RNQYG+MYzCYg==", + "dependencies": { + "@docusaurus/mdx-loader": "3.1.1", + "@docusaurus/module-type-aliases": "3.1.1", + "@docusaurus/plugin-content-blog": "3.1.1", + "@docusaurus/plugin-content-docs": "3.1.1", + "@docusaurus/plugin-content-pages": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-common": "3.1.1", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -3127,17 +3173,18 @@ } }, "node_modules/@docusaurus/theme-search-algolia": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.1.1.tgz", + "integrity": "sha512-tBH9VY5EpRctVdaAhT+b1BY8y5dyHVZGFXyCHgTrvcXQy5CV4q7serEX7U3SveNT9zksmchPyct6i1sFDC4Z5g==", "dependencies": { "@docsearch/react": "^3.5.2", - "@docusaurus/core": "3.1.0", - "@docusaurus/logger": "3.1.0", - "@docusaurus/plugin-content-docs": "3.1.0", - "@docusaurus/theme-common": "3.1.0", - "@docusaurus/theme-translations": "3.1.0", - "@docusaurus/utils": "3.1.0", - "@docusaurus/utils-validation": "3.1.0", + "@docusaurus/core": "3.1.1", + "@docusaurus/logger": "3.1.1", + "@docusaurus/plugin-content-docs": "3.1.1", + "@docusaurus/theme-common": "3.1.1", + "@docusaurus/theme-translations": "3.1.1", + "@docusaurus/utils": "3.1.1", + "@docusaurus/utils-validation": "3.1.1", "algoliasearch": "^4.18.0", "algoliasearch-helper": "^3.13.3", "clsx": "^2.0.0", @@ -3157,14 +3204,16 @@ }, "node_modules/@docusaurus/theme-search-algolia/node_modules/clsx": { "version": "2.1.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.0.tgz", + "integrity": "sha512-m3iNNWpd9rl3jvvcBnu70ylMdrXt8Vlq4HYadnU5fwcOtvkSQWPmj7amUcDT2qYI7risszBjI5AUIUox9D16pg==", "engines": { "node": ">=6" } }, "node_modules/@docusaurus/theme-translations": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.1.1.tgz", + "integrity": "sha512-xvWQFwjxHphpJq5fgk37FXCDdAa2o+r7FX8IpMg+bGZBNXyWBu3MjZ+G4+eUVNpDhVinTc+j6ucL0Ain5KCGrg==", "dependencies": { "fs-extra": "^11.1.1", "tslib": "^2.6.0" @@ -3174,8 +3223,9 @@ } }, "node_modules/@docusaurus/types": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.1.1.tgz", + "integrity": "sha512-grBqOLnubUecgKFXN9q3uit2HFbCxTWX4Fam3ZFbMN0sWX9wOcDoA7lwdX/8AmeL20Oc4kQvWVgNrsT8bKRvzg==", "dependencies": { "@mdx-js/mdx": "^3.0.0", "@types/history": "^4.7.11", @@ -3193,10 +3243,11 @@ } }, "node_modules/@docusaurus/utils": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.1.1.tgz", + "integrity": "sha512-ZJfJa5cJQtRYtqijsPEnAZoduW6sjAQ7ZCWSZavLcV10Fw0Z3gSaPKA/B4micvj2afRZ4gZxT7KfYqe5H8Cetg==", "dependencies": { - "@docusaurus/logger": "3.1.0", + "@docusaurus/logger": "3.1.1", "@svgr/webpack": "^6.5.1", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", @@ -3227,8 +3278,9 @@ } }, "node_modules/@docusaurus/utils-common": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.1.1.tgz", + "integrity": "sha512-eGne3olsIoNfPug5ixjepZAIxeYFzHHnor55Wb2P57jNbtVaFvij/T+MS8U0dtZRFi50QU+UPmRrXdVUM8uyMg==", "dependencies": { "tslib": "^2.6.0" }, @@ -3245,11 +3297,12 @@ } }, "node_modules/@docusaurus/utils-validation": { - "version": "3.1.0", - "license": "MIT", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.1.1.tgz", + "integrity": "sha512-KlY4P9YVDnwL+nExvlIpu79abfEv6ZCHuOX4ZQ+gtip+Wxj0daccdReIWWtqxM/Fb5Cz1nQvUCc7VEtT8IBUAA==", "dependencies": { - "@docusaurus/logger": "3.1.0", - "@docusaurus/utils": "3.1.0", + "@docusaurus/logger": "3.1.1", + "@docusaurus/utils": "3.1.1", "joi": "^17.9.2", "js-yaml": "^4.1.0", "tslib": "^2.6.0" @@ -6355,7 +6408,8 @@ }, "node_modules/@types/gtag.js": { "version": "0.0.12", - "license": "MIT" + "resolved": "https://registry.npmjs.org/@types/gtag.js/-/gtag.js-0.0.12.tgz", + "integrity": "sha512-YQV9bUsemkzG81Ea295/nF/5GijnD2Af7QhEofh7xu+kvCN6RdodgNwwGWXB5GMI3NoyvQo0odNctoH/qLMIpg==" }, "node_modules/@types/hast": { "version": "3.0.3", @@ -6627,7 +6681,8 @@ }, "node_modules/@types/sax": { "version": "1.2.7", - "license": "MIT", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", "dependencies": { "@types/node": "*" } @@ -7251,28 +7306,30 @@ } }, "node_modules/algoliasearch": { - "version": "4.22.0", - "license": "MIT", - "dependencies": { - "@algolia/cache-browser-local-storage": "4.22.0", - "@algolia/cache-common": "4.22.0", - "@algolia/cache-in-memory": "4.22.0", - "@algolia/client-account": "4.22.0", - "@algolia/client-analytics": "4.22.0", - "@algolia/client-common": "4.22.0", - "@algolia/client-personalization": "4.22.0", - "@algolia/client-search": "4.22.0", - "@algolia/logger-common": "4.22.0", - "@algolia/logger-console": "4.22.0", - "@algolia/requester-browser-xhr": "4.22.0", - "@algolia/requester-common": "4.22.0", - "@algolia/requester-node-http": "4.22.0", - "@algolia/transporter": "4.22.0" + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.22.1.tgz", + "integrity": "sha512-jwydKFQJKIx9kIZ8Jm44SdpigFwRGPESaxZBaHSV0XWN2yBJAOT4mT7ppvlrpA4UGzz92pqFnVKr/kaZXrcreg==", + "dependencies": { + "@algolia/cache-browser-local-storage": "4.22.1", + "@algolia/cache-common": "4.22.1", + "@algolia/cache-in-memory": "4.22.1", + "@algolia/client-account": "4.22.1", + "@algolia/client-analytics": "4.22.1", + "@algolia/client-common": "4.22.1", + "@algolia/client-personalization": "4.22.1", + "@algolia/client-search": "4.22.1", + "@algolia/logger-common": "4.22.1", + "@algolia/logger-console": "4.22.1", + "@algolia/requester-browser-xhr": "4.22.1", + "@algolia/requester-common": "4.22.1", + "@algolia/requester-node-http": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/algoliasearch-helper": { - "version": "3.16.1", - "license": "MIT", + "version": "3.16.2", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.16.2.tgz", + "integrity": "sha512-Yl/Gu5Cq4Z5s/AJ0jR37OPI1H3+z7PHz657ibyaXgMOaWvPlZ3OACN13N+7HCLPUlB0BN+8BtmrG/CqTilowBA==", "dependencies": { "@algolia/events": "^4.0.1" }, @@ -7446,7 +7503,8 @@ }, "node_modules/arg": { "version": "5.0.2", - "license": "MIT" + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" }, "node_modules/argparse": { "version": "2.0.1", @@ -47857,7 +47915,8 @@ }, "node_modules/react-json-view-lite": { "version": "1.2.1", - "license": "MIT", + "resolved": "https://registry.npmjs.org/react-json-view-lite/-/react-json-view-lite-1.2.1.tgz", + "integrity": "sha512-Itc0g86fytOmKZoIoJyGgvNqohWSbh3NXIKNgH6W6FT9PC1ck4xas1tT3Rr/b3UlFXyA9Jjaw9QSXdZy2JwGMQ==", "engines": { "node": ">=14" }, @@ -50216,7 +50275,8 @@ }, "node_modules/search-insights": { "version": "2.13.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.13.0.tgz", + "integrity": "sha512-Orrsjf9trHHxFRuo9/rzm0KIWmgzE8RMlZMzuhZOJ01Rnz3D0YBAe+V6473t6/H6c7irs6Lt48brULAiRWb3Vw==", "peer": true }, "node_modules/section-matter": { @@ -54159,7 +54219,8 @@ }, "node_modules/sitemap": { "version": "7.1.1", - "license": "MIT", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-7.1.1.tgz", + "integrity": "sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg==", "dependencies": { "@types/node": "^17.0.5", "@types/sax": "^1.2.1", @@ -54176,7 +54237,8 @@ }, "node_modules/sitemap/node_modules/@types/node": { "version": "17.0.45", - "license": "MIT" + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", + "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" }, "node_modules/skin-tone": { "version": "2.0.0", diff --git a/package.json b/package.json index 5d1bf6574b..93bb5baed6 100644 --- a/package.json +++ b/package.json @@ -36,10 +36,10 @@ "dependencies": { "@commitlint/cli": "^17.6.7", "@commitlint/config-conventional": "^17.6.7", - "@docusaurus/core": "^3.1.0", - "@docusaurus/plugin-client-redirects": "^3.1.0", - "@docusaurus/plugin-ideal-image": "^3.1.0", - "@docusaurus/preset-classic": "^3.1.0", + "@docusaurus/core": "^3.1.1", + "@docusaurus/plugin-client-redirects": "^3.1.1", + "@docusaurus/plugin-ideal-image": "^3.1.1", + "@docusaurus/preset-classic": "^3.1.1", "@docusaurus/types": "^3.0.1", "@fortawesome/fontawesome-svg-core": "^6.4.0", "@fortawesome/free-solid-svg-icons": "^6.4.0",