diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/applications_setup.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/applications_setup.md index 39369905bd..1dd511a7b9 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/applications_setup.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/applications_setup.md @@ -75,7 +75,7 @@ hab sup run \ --event-stream-token="API_TOKEN" \ ``` -* [hab sup run]({{< relref "/habitat/habitat_cli#hab-sup-run" >}}) is the hab cli commant to start the Habitat supervisor. +* [hab sup run]({{< relref "/habitat/habitat_cli#hab-sup-run" >}}) is the hab CLI commant to start the Habitat supervisor. * `MY_APP` is the name of your application. Chef Automate groups services by application name in the Applications Dashboard * `MY_ENV` is the application environment for this supervisor. Chef Automate groups services by environment in the Applications Dashboard * `MY_SITE` describes the physical (for example, datacenter) or cloud-specific (for example, the AWS region) location where your services are deployed. The site field is a value filtering for services in the Applications Dashboard. diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/config_opensearch.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/config_opensearch.md index c89d77c88b..a6d10a7d5f 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/config_opensearch.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/config_opensearch.md @@ -1,5 +1,5 @@ +++ -title = "HA OpenSearch Node Config" +title = "High Availability OpenSearch Node Configuration" draft = false gh_repo = "automate" @@ -15,9 +15,7 @@ gh_repo = "automate" {{% automate/ha-warn %}} {{< /note >}} -## Configurations - -The OpenSearch node in Automate HA provides various configuration options that is patched to customize its behavior and meet specific requirements. This guide documents all the configurations that you can patch. +The OpenSearch node in Chef Automate HA provides various configuration options that is patched to customize its behavior and meet specific requirements. This guide documents all the configurations that you can patch. The detailed document about how these individual properties affect the system is at [Official OpenSearch docs](https://opensearch.org/docs/1.3/) @@ -25,7 +23,7 @@ Patch the below configuration to OpenSearch nodes. Please add the values you wan Certainly! Here's an explanation of each section in the OpenSearch TOML configuration file: -### Action +## Action ```toml [action] @@ -34,7 +32,7 @@ destructive_requires_name = "true" - This section configures action settings. Setting `destructive_requires_name` to `true` means that destructive actions, such as deleting indices or templates, require an explicit name to prevent accidental deletions. -### Bootstrap +## Bootstrap ```toml [bootstrap] @@ -43,7 +41,7 @@ memory_lock = false Disables swapping (along with memlock). Swapping can dramatically decrease performance and stability, so you should ensure it is disabled on production clusters. -### Cluster +## Cluster ```toml [cluster] @@ -55,7 +53,7 @@ max_shards_per_node= "2000" - It sets the name of the OpenSearch cluster to "opensearch". - We can use these settings to set the `max_shards_per_node` value for OpenSearch. The default value is 2000. -### Discovery +## Discovery ```toml [discovery] @@ -67,7 +65,7 @@ ping_unicast_hosts = ["Os Node IP 1", "Os Node IP 2"] - Set `ping_unicast_hosts` to pass an initial list of hosts to perform discovery when a new node start. The default list of hosts is ["127.0.0.1", "[::1]"] - Set `minimum_master_nodes` to prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): -### Gateway +## Gateway ```toml [gateway] @@ -76,7 +74,7 @@ recover_after_nodes = "" Set `recover_after_nodes` to block initial recovery after a full cluster restart until N nodes start. -### Logger +## Logger ```toml [logger] @@ -85,7 +83,7 @@ level = "info" This section configures logger settings. Allowed levels are trace, debug, info, warn, error, and fatal. -### Node +## Node ```toml [node] @@ -96,7 +94,7 @@ name = "" - Use `max_local_storage_nodes` to disable starting multiple nodes on a single system: - Use a descriptive name for the node by setting the `name` field -### OpenSearch Auth +## OpenSearch Auth ```toml [opensearch_auth] @@ -107,7 +105,7 @@ hashed_password = "" This section configures OpenSearch authentication settings. It sets the admin username and password and provides a hashed version of the password. -### Path +## Path ```toml [path] @@ -120,7 +118,7 @@ repo = "" - Use `logs` to set the path to your log files - Use `repo` to register the snapshot repository using OpenSearch. It is necessary to mount the same shared filesystem to the exact location on all master and data nodes. Register the location in the path.repo setting on all master and data nodes. -### Plugin Security +## Plugin Security ```toml [plugins.security] @@ -133,7 +131,7 @@ nodes_dn = "- " This section configures security plugin settings. It allows the default initialization of the security index, and unsafe demo certificates, checks snapshot and restore write privileges, enables snapshot and restore privileges, and specifies the nodes' distinguished names (DNs). -### Plugin Security Audit +## Plugin Security Audit ```toml [plugins.security.audit] @@ -142,7 +140,7 @@ type = "internal_opensearch" This section configures security audit settings. It specifies the type of audit logging as "internal_opensearch". -### Plugin Security Authcz +## Plugin Security Authcz ```toml [plugins.security.authcz] @@ -151,7 +149,7 @@ admin_dn = "- " This section specifies the distinguished name (DN) of the admin user. -### Plugin Security Restapi +## Plugin Security Restapi ```toml [plugins.security.restapi] @@ -160,7 +158,7 @@ roles_enabled = "[\"all_access\", \"security_rest_api_access\"]" This section configures security REST API settings. It enables certain roles, such as "all_access" and "security_rest_api_access". -### Plugin Security SSL HTTP +## Plugin Security SSL HTTP ```toml [plugins.security.ssl.http] @@ -172,7 +170,7 @@ pemtrustedcas_filepath = "certificates/root-ca.pem" This section configures SSL/TLS settings for HTTP. It enables SSL/TLS, specifying the certificate's file paths, private key, and trusted CA certificates. -### Plugin Security SSL Transport +## Plugin Security SSL Transport ```toml [plugins.security.ssl.transport] @@ -185,7 +183,7 @@ resolve_hostname = false This section configures SSL/TLS settings for transport layer communication. It disables hostname verification, specifies the file paths for the certificate, private key, and trusted CA certificates, and disables hostname resolution. -### Plugin Security System Indices +## Plugin Security System Indices ```toml [plugins.security.system_indices] @@ -197,7 +195,7 @@ opendistro-anomaly-checkpoints\", \".opendistro-anomaly-detection-state\", \".op This section configures system indices for the security plugin. It specifies the system indices that are enabled for various functionalities. -### Runtime +## Runtime ```toml [runtime] @@ -213,7 +211,7 @@ minHeapsize = "2g" This section configures runtime settings. It specifies various Java runtime options and heap sizes. -### S3 Client Default +## S3 Client Default ```toml [s3.client.default] @@ -226,7 +224,7 @@ use_throttle_retries = true This section configures the default S3 client settings. It specifies the S3 endpoint, the maximum number of retries, the protocol (HTTPS), the read timeout, and whether to use throttle retries. -### TLS +## TLS ```toml [tls] @@ -239,7 +237,7 @@ ssl_key = "Enter Private Key----" This section configures TLS settings. It specifies the file paths for the admin certificate, admin private key, root CA certificate, SSL certificate, and SSL private key. -### Full config for OpenSearch node +## Full config for OpenSearch node ```toml [action] @@ -350,7 +348,7 @@ ssl_key = "Enter Private Key----" port = 9300 ``` -#### Example +### Example To increase max heap size: @@ -363,6 +361,6 @@ maxHeapsize = "2g" - Run the patch command `chef-automate config patch heap.toml --os` to apply the patch. -### Centralized Logs +## Centralized Logs Take a tour of the main page to know about [Centralized logs](/automate/centralizing_log/). diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/datafeed.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/datafeed.md index acf02f1f4e..1b5fbc36d7 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/datafeed.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/datafeed.md @@ -47,7 +47,7 @@ Currently, the data feed has two types of integrations: * ELK * Custom * Storage Integration - * Minio + * MinIO * Amazon S3 {{< figure src="/images/automate/choose-a-data-feed-integration.png" alt="Choose a Data Feed Integration">}} @@ -189,9 +189,9 @@ You can also [**Enable/Disable**]({{< relref "#enabledisable-a-data-feed-instanc Create a data feed using a storage integration. -### Minio +### MinIO -To create a data feed select **Minio** from **Settings > Data Feed > New Integration**. +To create a data feed select **MinIO** from **Settings > Data Feed > New Integration**. 1. **Data Feed Name**: A unique name for this notification. 1. **Endpoint**: The endpoint for the data feed integration, including any specific port details. @@ -199,18 +199,18 @@ To create a data feed select **Minio** from **Settings > Data Feed > New Integra 1. Select **Test Connection** to start validating the connection details. 1. Once the test is successful, select **Save** to save the Data Feed configuration. -{{< figure src="/images/automate/data-feed-instance-using-minio-integration.png" alt="Data Feed Instance using Minio Integration">}} +{{< figure src="/images/automate/data-feed-instance-using-minio-integration.png" alt="Data Feed Instance using MinIO Integration">}} -#### Edit a Minio Data Feed Instance +#### Edit a MinIO Data Feed Instance -To edit a Data Feed instance of Minio Integration: +To edit a Data Feed instance of MinIO Integration: 1. Select the data feed instance name to open its detail page. 1. Edit the Data Feed **Name**, **End Point**, or the **Bucket**. 1. Select the **Test Data Feed** button to test the Endpoint. 1. Select **Save** to save your changes. -{{< figure src="/images/automate/details-of-data-feed-instance-using-minio-integration.png" alt="Details of Data Feed Instance using Minio Integration">}} +{{< figure src="/images/automate/details-of-data-feed-instance-using-minio-integration.png" alt="Details of Data Feed Instance using MinIO Integration">}} You can also [**Enable/Disable**]({{< relref "#enabledisable-a-data-feed-instance" >}}), and [**Delete**]({{< relref "#delete-a-data-feed-instance" >}}) the instance from the buttons provided on the details page. diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_automate_to_automate_ha.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_automate_to_automate_ha.md index 51ebe51cc6..68ef17acda 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_automate_to_automate_ha.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_automate_to_automate_ha.md @@ -26,7 +26,7 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but 1. Create a Backup of Chef Automate Standalone using the following command: - 1. Run the below command to create the backup in the `/var/opt/chef-automate/backups` location unless you specify the location in the `config.toml` file. + 1. Run the below command to create the backup in the `/var/opt/chef-automate/backups` location unless if you specify any other location in the `config.toml` file. ```bash chef-automate backup create @@ -41,10 +41,10 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but ``` -1. Copy the backup folder to bootstrapped Automate node of Automate HA using the following command: +1. Copy the backup folder to first Automate node of Automate HA using the following command: ```bash - scp -i -r @:/home/ + scp -i -r @:/home// ``` 1. Copy the `bootstrap.abb` file to all the Chef Automate HA FrontEnd Nodes (both Chef Automate and Chef Infra Server) using the following command: @@ -61,7 +61,7 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but ```bash [path] - repo = "/opensearch" + repo = "DIRECTOY-PATH-WHERE-AUTOMATE-STANDALONE-BACKUP-FOLDER-COPIED" ``` The following command will patch the configuration in all the OpenSearch nodes, run this command from bastion. @@ -77,9 +77,9 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but enable = true location = "fs" [global.v1.external.opensearch.backup.fs] - path = "/opensearch" + path = "DIRECTOY-PATH-WHERE-AUTOMATE-STANDALONE-BACKUP-FOLDER-COPIED" [global.v1.backups.filesystem] - path = "/backups" + path = "DIRECTOY-PATH-WHERE-AUTOMATE-STANDALONE-BACKUP-FOLDER-COPIED" ``` The following command will patch the configuration in all the Frontend nodes, run this command on bastion: @@ -88,7 +88,7 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but chef-automate config patch --fe ``` -1. Run the following command on the bootstrapped Automate node of Automate HA cluster to get the current config: +1. Run the following command on the first Automate node of Automate HA cluster to get the current config: ```bash sudo chef-automate config show > current_config.toml @@ -112,13 +112,13 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but chef-automate bootstrap bundle unpack bootstrap.abb ``` -1. Stop all the frontend nodes except boostrap automate node in Automate HA Cluster. Run the following command to all the Automate and Chef Infra Server nodes: +1. Stop all the frontend nodes except first automate node in Automate HA Cluster. Run the following command to all the Automate and Chef Infra Server nodes: ``` bash sudo chef-automate stop ``` -1. Restore in Chef-Automate HA using the following command in boostrap automate node : +1. Restore in Chef-Automate HA using the following command in first automate node :(Here `/mnt/automate_backups/` is the example path where the backup id folder is copied) ```bash chef-automate backup restore /mnt/automate_backups// --patch-config current_config.toml --airgap-bundle /var/tmp/frontend-${automate_version_number}.aib --skip-preflight @@ -137,8 +137,7 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but 1. Make EFS volume and attach that volume to the existing automate and Automate HA nodes. 1. Mount EFS Volume: - - In Automate, we are mounting that EFS volume at the `/var/opt/chef-automate/backups` location unless you specify the location in the `config.toml` file. - - In HA, we are mounting that EFS volume at `` for example `/mnt/automate_backups`. (You need to mount this volume in all the HA nodes). + - In Automate, if you are mounting at `/nfs/automate_backups` make sure the same location is mounted in the Automate HA. Make sure that the location has permission for the hab user. @@ -149,7 +148,7 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but ```bash [path] - repo = "/path/to/automate_backups/opensearch" + repo = "/nfs/automate_backups" ``` The following command will patch the configuration in all the OpenSearch nodes, run this command from bastion. @@ -165,9 +164,9 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but enable = true location = "fs" [global.v1.external.opensearch.backup.fs] - path = "/path/to/automate_backups/opensearch" + path = "/nfs/automate_backups" [global.v1.backups.filesystem] - path = "/path/to/automate_backups/backups" + path = "/nfs/automate_backups" ``` The following command will patch the configuration in all the Frontend nodes, run this command on bastion: @@ -178,7 +177,7 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but 1. Create a Backup of Chef Automate Standalone using the following command: - 1. Run the below command to create the backup in the `/var/opt/chef-automate/backups` location unless you specify the location in the `config.toml` file. + 1. Run the below command to create the backup in the `/nfs/automate_backups` location unless you specify the location in the `config.toml` file. ```bash chef-automate backup create @@ -192,7 +191,7 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but chef-automate bootstrap bundle create bootstrap.abb ``` -1. Run the following command on the bootstrapped Automate node of Automate HA cluster to get the current config: +1. Run the following command on the first Automate node of Automate HA cluster to get the current config: ```bash sudo chef-automate config show > current_config.toml @@ -233,7 +232,7 @@ Follow the steps below when migrating to On-Premises or AWS HA deployment **(but 1. Restore in Chef-Automate HA using the following command: ```bash - chef-automate backup restore /mnt/automate_backups// --patch-config current_config.toml --airgap-bundle /var/tmp/frontend-${automate_version_number}.aib --skip-preflight + chef-automate backup restore /nfs/automate_backups// --patch-config current_config.toml --airgap-bundle /var/tmp/frontend-${automate_version_number}.aib --skip-preflight ## Refer to the `/var/tmp/frontend-4.x.y.aib` file name for the exact version number. ``` @@ -363,7 +362,7 @@ For AWS managed services, map the snapshot role to the OpenSearch dashboard. It chef-automate config patch --frontend automate.toml ``` -1. Run the following command on the bootstrapped Automate node of Automate HA cluster to get the current config: +1. Run the following command on the first Automate node of Automate HA cluster to get the current config: ```bash sudo chef-automate config show > current_config.toml diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_deploy_steps.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_deploy_steps.md index 95131f7439..f8dee6f70c 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_deploy_steps.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_deploy_steps.md @@ -19,7 +19,7 @@ Follow the steps below to deploy Chef Automate High Availability (HA) on AWS (Am {{< warning >}} -- PLEASE DO NOT MODIFY THE WORKSPACE PATH. It should always be "/hab/a2_deploy_workspace". +- Do not modify the workspace path. It should always be `/hab/a2_deploy_workspace`. - We currently don't support AD managed users in nodes. We only support local Linux users. - If you have configured a sudo password for the user, you must create an environment variable `sudo_password` and set the password as the variable's value. Example: `export sudo_password=`. And then, run all sudo commands with the `sudo -E or --preserve-env` option. Example: `sudo -E ./chef-automate deploy config.toml --airgap-bundle automate.aib`. This is required for the `chef-automate` CLI to run the commands with sudo privileges. Please refer [this](/automate/ha_sudo_password/) for details. - If SELinux is enabled, deployment with configure it to `permissive` (Usually in case of RHEL SELinux is enabled) @@ -55,11 +55,15 @@ Run the following steps on Bastion Host Machine: " ``` - {{< note >}} Chef Automate bundles are available for 365 days from the release of a version. However, the milestone release bundles are available for download forever. {{< /note >}} + {{< note spaces=4 >}} + + Chef Automate bundles are available for 365 days from the release of a version. However, the milestone release bundles are available for download forever. + + {{< /note >}} ## Steps to Generate Config -1. Generate config with relevant data using the below command: +1. Generate a configuration file with relevant data. ```bash chef-automate config gen config.toml @@ -67,9 +71,13 @@ Run the following steps on Bastion Host Machine: Click [here](/automate/ha_config_gen) to know more about generating config - {{< note >}} You can also generate config using **init config** and then generate init config for existing infrastructure. The command is as shown below: + {{< note spaces=4 >}} + + You can also generate a configuration file using the `init-config` subcommand. - chef-automate init-config-ha aws{{< /note >}} + chef-automate init-config-ha aws + + {{< /note >}} ## Steps to Provision @@ -93,24 +101,24 @@ Run the following steps on Bastion Host Machine: sudo chef-automate verify -c config.toml ``` - To know more about config verify you can check [Config Verify Doc page](/automate/ha_verification_check/). + To know more about config verify, you can check [Config Verify Doc page](/automate/ha_verification_check/). ## Steps to Deploy -1. The following command will run the deployment. The deploy command will run the verify command internally, to skip a verification process during deploy command use `--skip-verify` flag +1. The following command will run the deployment. The deployment command will run the verify command internally, to skip a verification process during deploy command use `--skip-verify` flag ```bash chef-automate deploy config.toml --airgap-bundle automate.aib ``` - To skip verification in the deploy command, use `--skip-verify` flag + To skip verification in the deployment command, use `--skip-verify` flag ```bash chef-automate deploy config.toml --airgap-bundle automate.aib --skip-verify ``` ## Verify Deployment -1. Once the deployment is successful, Get the consolidate status of the cluster +1. Once the deployment is successful, Get the consolidated status of the cluster ```bash chef-automate status summary @@ -122,19 +130,19 @@ Run the following steps on Bastion Host Machine: chef-automate status ``` -1. Post Deployment, you can run the verification command +1. Post Deployment, you can run the verification command ```bash - chef-automate verfiy + chef-automate verify ``` -1. Get the cluster Info +1. Get the cluster Info ```bash chef-automate info ``` -1. After the deployment is completed. To view the automate UI, run the command `chef-automate info`, and you will get the `automate_url`. If you want to change the FQDN URL from the load balancer URL to some other FQDN URL, then use the below template. +1. After the deployment is completed. To view the Automate UI, run the command `chef-automate info`, and you will get the `automate_url`. If you want to change the FQDN URL from the load balancer URL to some other FQDN URL, then use the below template. - Create a file `a2.fqdn.toml` @@ -170,32 +178,34 @@ Run the following steps on Bastion Host Machine: Check if Chef Automate UI is accessible by going to (Domain used for Chef Automate) [https://chefautomate.example.com](https://chefautomate.example.com). -After successful deployment, proceed with following: +After successful deployment, proceed with the following: 1. Create user and orgs, Click [here](/automate/ha_node_bootstraping/#create-users-and-organization) to learn more about user and org creation 1. Workstation setup, Click [here](/automate/ha_node_bootstraping/#workstation-setup) to learn more about workstation setup - 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. + 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. ## Sample Config {{< note >}} -Assuming 10+1 nodes (1 bastion, 2 for automate UI, 2 for Chef-server, 3 for Postgresql, 3 for OpenSearch) + +Assuming 10+1 nodes (1 bastion, 2 for Automate UI, 2 for Chef-server, 3 for Postgresql, 3 for OpenSearch) + {{< /note >}} {{< note >}} -- User only needs to create/set up **the bastion node** with the IAM role of Admin access and s3 bucket access attached. -- The following config will create an s3 bucket for backup. +- User only needs to create/set up **the bastion node** with the IAM role of Admin access and S3 bucket access attached. +- The following config will create an S3 bucket for backup. - To provide multiline certificates use triple quotes like `""" multiline certificate contents"""` {{< /note >}} -```config +```toml [architecture] [architecture.aws] ssh_user = "ec2-user" ssh_group_name = "ec2-user" - ssh_key_file = "~/.ssh/my-key.pem" + ssh_key_file = "/home/ec2-user/KEY_FILENAME.pem" ssh_port = "22" secrets_key_file = "/hab/a2_deploy_workspace/secrets.key" secrets_store_file = "/hab/a2_deploy_workspace/secrets.json" @@ -275,7 +285,7 @@ OR chef-automate cleanup --aws-deployment ``` -Following the `cleanup` command the following command can be used to remove the deployment workspace in the Bastion machine. This will also remove the logs file inside the workspace. +After the `cleanup` command, the following command can be used to remove the deployment workspace in the Bastion machine. This will also remove the logs file inside the workspace. ```bash hab pkg uninstall chef/automate-ha-deployment diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_deployment_prerequisites.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_deployment_prerequisites.md index b2a1e78348..8cee7735b9 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_deployment_prerequisites.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_deployment_prerequisites.md @@ -25,7 +25,7 @@ Before installing Chef automate HA in AWS deployment mode, ensure you have taken We recommend using 11 node cluster for standard Automate HA AWS deployment, as detailed in the table below: | Service Type | Count | -| ----------------- | ----- | +|-------------------|-------| | Chef Automate | 2 | | Chef Infra Server | 2 | | PostgreSQL DB | 3 | @@ -38,30 +38,30 @@ We recommend using Chef Infra Server managed by Automate HA to have high availab ## Software Requirements -The software requirements of the nodes in the cluster and other external Chef and non Chef tools are discussed below: +The software requirements of the nodes in the cluster and other external Chef and non-Chef tools are discussed below: ### Node Software Requirements The operating system and the supported version for different nodes in AWS deployment of Automate HA are mentioned below: | Operating Systems | Supported Version | -| :----------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +|:-------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Red Hat Enterprise Linux (64 Bit OS) | 7, 8. For 8 or above versions, the **SELinux** configuration must be permissive. The **SELinux** configuration is enforced in RHEL 8. Red Hat Enterprise Linux derivatives include Amazon Linux v1 (using RHEL 6 packages) and v2 (using RHEL 7packages). | | Ubuntu (64 Bit OS) | 16.04.x, 18.04.x, 20.04.x | | Centos (64 Bit OS) | 7 | | Amazon Linux 2 (64 Bit OS) | 2 (kernel 5.10) | -| SUSE Linux Enterprise Server | 12.5 - | +| SUSE Linux Enterprise Server | 12.5 | Please provide AMI Id as per above list of supported operating systems. + ### Minimum Supported Chef Tool Versions Current Automate HA supports integration with the following Chef tools: -- Chef Infra Server version: 14.0.58+ -- Chef Inspec version: 4.3.2+ -- Chef Infra Client: 17.0.242+ -- Chef Habitat: 0.81+ +- Chef Infra Server version: 14.0.58+ +- Chef Inspec version: 4.3.2+ +- Chef Infra Client: 17.0.242+ +- Chef Habitat: 0.81+ We do not support **Chef Manage** integration in the ongoing Automate version. @@ -93,8 +93,8 @@ Current Automate HA integrates with the following non-Chef tools: ### Minimum Hardware Requirement -| Instance | Count | vCPU | RAM | Storage Size(/hab) | AWS Machine Type | Additional Space | -| ----------------- | ----- | ---- | --- | ------------------ | ---------------- | ----------------- | +| Instance | Count | vCPU | RAM | Storage Size(/hab) | AWS Machine Type | Additional Space | +|-------------------|-------|------|-----|--------------------|------------------|-----------------------| | Chef Automate | 2 | 2 | 8 | 200 GB | m5.large | /var/tmp=5% /root=20% | | Chef Infra Server | 2 | 2 | 8 | 200 GB | m5.large | /var/tmp=5% /root=20% | | PostgreSQL DB | 3 | 2 | 8 | 200 GB | m5.large | /var/tmp=5% /root=20% | @@ -114,7 +114,7 @@ The Chef Automate HA cluster requires multiple ports for the frontend and backen **Ports for Bastion before deployment** | Port No. | Outgoing to | Incoming from | -| -------- | ----------- | ------------- | +|----------|-------------|---------------| | 22 | Subnet | All | | 80 | | Internet | | 443 | | Internet | @@ -123,22 +123,22 @@ The Chef Automate HA cluster requires multiple ports for the frontend and backen The first column in the table below represents the source of the connection. The table's other columns represent the destination with the matrix value as a port number. The specified port numbers need to be opened on the origin and destination. -| | Chef Automate | Chef Infra Server | PostgreSQL | OpenSearch | Bastion | Automate Load Balancer | -| --------------- | -------------------- | -------------------- | ----------------------------------------- | ------------------------------------ | ------- | ------------- | -| Chef Automate | | | 7432, 9631 | 9200, 9631 | | | -| Infra Server | | | 7432, 9631 | 9200, 9631 | | 443 | -| PostgreSQL | | | 9631, 7432, 5432, 6432, 9638
UDP 9638 | | | | -| OpenSearch | | | | 9631, 9200, 9300, 9638
UDP 9638 | | | -| Bastion | 22, 9631, 9638, 7799 | 22, 9631, 9638, 7799 | 22, 9631, 9638, 7432, 7799 | 22, 9631, 9638, 9200, 7799 | | 22 | -| Automate Load Balancer | 443, 80 | 443, 80 | | | | | -| Internet Access | | | | | 80, 443 | | +| | Chef Automate | Chef Infra Server | PostgreSQL | OpenSearch | Bastion | Automate Load Balancer | +|------------------------|----------------------|----------------------|-------------------------------------------|--------------------------------------|---------|------------------------| +| Chef Automate | | | 7432, 9631 | 9200, 9631 | | | +| Infra Server | | | 7432, 9631 | 9200, 9631 | | 443 | +| PostgreSQL | | | 9631, 7432, 5432, 6432, 9638
UDP 9638 | | | | +| OpenSearch | | | | 9631, 9200, 9300, 9638
UDP 9638 | | | +| Bastion | 22, 9631, 9638, 7799 | 22, 9631, 9638, 7799 | 22, 9631, 9638, 7432, 7799 | 22, 9631, 9638, 9200, 7799 | | 22 | +| Automate Load Balancer | 443, 80 | 443, 80 | | | | | +| Internet Access | | | | | 80, 443 | | {{< note >}} Custom SSH port is supported, but use the same port across all the machines. {{< /note >}} **Port usage definitions** | Protocol | Port Number | Usage | -| -------- | ----------- | --------------------------------------------------------- | +|----------|-------------|-----------------------------------------------------------| | TCP | 22 | SSH to configure services | | TCP | 9631 | Habitat HTTP API | | TCP | 443 | Allow Users to reach UI / API | @@ -264,12 +264,12 @@ To know more about the AWS deployment disaster recovery, visit our [Disaster Rec - Automate HA will always have Chef Automate and Chef Infra Server running in the cluster. - Chef Manage or Private Chef Supermarket customers should not migrate to Automate HA. -| Existing System | Supported Setup Type | Minimum Eligible System Version | Maximum Eligible System Version | Pre-requisite Before Migration | -| ----------------- | ---------------------------------------------------------------------------------------------- | ------------------------------------ | ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Chef Automate | [Standalone](/automate/install/) | Automate 2020XXXXXX | | To migrate to Managed OpenSearch Automate HA cluster, the current standalone Chef Automate version should be at most 4.3.0. | -| Chef Backend | [Chef Backend Cluster](/server/install_server_ha/) | Backend 2.X and Infra Server 14.X | Chef Infra Server 15.4.0 | Chef Backend using PostgreSQL storage for Cookbooks should only migrate to Automate HA. | -| Chef Infra Server | [Standalone](/server/install_server/#standalone)
[Tiered](/server/install_server_tiered/) | Infra server 14.XXX | Chef Infra Server 15.4.0 | Chef Infra Server using PostgreSQL storage for Cookbooks should only migrate to Automate HA. | -| A2HA | PS Lead A2HA On-Premise Deployment | Chef Automate version 20201230192246 | Chef Automate Version 20220223121207 | The A2HA cluster-mounted backup file system should also be attached to Automate HA cluster.
In case of In-Place migration, the volume having `/hab` should have more than 60% free space on each node. | +| Existing System | Supported Setup Type | Minimum Eligible System Version | Maximum Eligible System Version | Pre-requisite Before Migration | +|-------------------|------------------------------------------------------------------------------------------------|--------------------------------------|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Chef Automate | [Standalone](/automate/install/) | Automate 2020XXXXXX | | To migrate to Managed OpenSearch Automate HA cluster, the current standalone Chef Automate version should be at most 4.3.0. | +| Chef Backend | [Chef Backend Cluster](/server/install_server_ha/) | Backend 2.X and Infra Server 14.X | Chef Infra Server 15.4.0 | Chef Backend using PostgreSQL storage for Cookbooks should only migrate to Automate HA. | +| Chef Infra Server | [Standalone](/server/install_server/#standalone)
[Tiered](/server/install_server_tiered/) | Infra server 14.XXX | Chef Infra Server 15.4.0 | Chef Infra Server using PostgreSQL storage for Cookbooks should only migrate to Automate HA. | +| A2HA | PS Lead A2HA On-Premise Deployment | Chef Automate version 20201230192246 | Chef Automate Version 20220223121207 | The A2HA cluster-mounted backup file system should also be attached to Automate HA cluster.
In case of In-Place migration, the volume having `/hab` should have more than 60% free space on each node. | {{< note >}} diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_managed_deploy_steps.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_managed_deploy_steps.md index 8f5022c988..be410211fe 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_managed_deploy_steps.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_aws_managed_deploy_steps.md @@ -19,7 +19,7 @@ Follow the steps below to deploy Chef Automate High Availability (HA) on AWS (Am {{< warning >}} -- PLEASE DO NOT MODIFY THE WORKSPACE PATH; it should always be "/hab/a2_deploy_workspace" +- Do not modify the workspace path. It should always be `/hab/a2_deploy_workspace` - We currently don't support AD managed users in nodes. We only support local Linux users. - If you have configured a sudo password for the user, you must create an environment variable `sudo_password` and set the password as the variable's value. Example: `export sudo_password=`. And then, run all sudo commands with the `sudo -E or --preserve-env` option. Example: `sudo -E ./chef-automate deploy config.toml --airgap-bundle automate.aib`. This is required for the `chef-automate` CLI to run the commands with sudo privileges. Please refer [this](/automate/ha_sudo_password/) for details. - If SELinux is enabled, deployment with configure it to `permissive` (Usually in case of RHEL SELinux is enabled) @@ -43,8 +43,10 @@ Follow the steps below to deploy Chef Automate High Availability (HA) on AWS (Am " ``` - {{< note >}} + {{< note spaces=3 >}} + Chef Automate bundles are available for 365 days from the release of a version. However, the milestone release bundles are available for download forever. + {{< /note >}} ## Steps to Generate Config @@ -57,9 +59,13 @@ Follow the steps below to deploy Chef Automate High Availability (HA) on AWS (Am Click [here](/automate/ha_config_gen) to know more about generating config - {{< note >}} You can also generate config using **init config** and then generate init config for existing infrastructure. The command is as shown below: + {{< note spaces=4 >}} + + You can also generate a configuration file using the `init-config` subcommand. The command is as shown below: - chef-automate init-config-ha aws{{< /note >}} + chef-automate init-config-ha aws + + {{< /note >}} {{< warning spaces=4 >}} {{% automate/char-warn %}} @@ -87,26 +93,26 @@ Once the provisioning is successful, **if you have added custom DNS to your conf sudo chef-automate verify -c config.toml ``` - To know more about config verify you can check [Config Verify Doc page](/automate/ha_verification_check/). + To know more about config verify, you can check [Config Verify Doc page](/automate/ha_verification_check/). - Once the verification is successfully completed, then proceed with deployment, In case of failure please fix the issue and re-run the verify command. + Once the verification is successfully completed, then proceed with deployment, In case of failure, please fix the issue and re-run the verify command. ## Steps to deploy -1. The following command will run the deployment. The deploy command will run the verify command internally, to skip verification process during deploy command use `--skip-verify` flag +1. The following command will run the deployment. The deploy command will first run the verify command internally, to skip verification process during deploy command use `--skip-verify` flag ```bash chef-automate deploy config.toml --airgap-bundle automate.aib ``` - To skip verification in the deploy command, use `--skip-verify` flag + To skip verification in the deployment command, use `--skip-verify` flag ```bash chef-automate deploy config.toml --airgap-bundle automate.aib --skip-verify ``` ## Verify Deployment -1. Once the deployment is successful, Get the consolidate status of the cluster +1. Once the deployment is successful, Get the consolidated status of the cluster ```bash chef-automate status summary @@ -118,20 +124,20 @@ Once the provisioning is successful, **if you have added custom DNS to your conf chef-automate status ``` -1. Post Deployment, you can run the verification command +1. Post Deployment, you can run the verification command ```bash chef-automate verfiy ``` -1. Get the cluster Info +1. Get the cluster Info ```bash chef-automate info ``` -1. After the deployment is completed. To view the automate UI, run the command `chef-automate info`, and you will get the `automate_url`. - If you want to change the FQDN URL from the loadbalancer URL to some other FQDN URL, then use the below template. +1. After the deployment is completed. To view the Automate UI, run the command `chef-automate info`, and you will get the `automate_url`. + If you want to change the FQDN URL from the load balancer URL to some other FQDN URL, then use the below template. - Create a file `a2.fqdn.toml` @@ -172,30 +178,34 @@ Once the provisioning is successful, **if you have added custom DNS to your conf Check if Chef Automate UI is accessible by going to (Domain used for Chef Automate) [https://chefautomate.example.com](https://chefautomate.example.com). -After successful deployment, proceed with following: +After successful deployment, proceed with the following: 1. Create user and orgs, Click [here](/automate/ha_node_bootstraping/#create-users-and-organization) to learn more about user and org creation 1. Workstation setup, Click [here](/automate/ha_node_bootstraping/#workstation-setup) to learn more about workstation setup - 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. + 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. ## Sample Config -{{< note >}} Assuming 8+1 nodes (1 bastion, 1 for automate UI, 1 for Chef-server, Managed RDS Postgresql, and Managed OpenSearch) {{< /note >}} +{{< note >}} + +Assuming 8+1 nodes (1 bastion, 1 for Chef Automate UI, 1 for Chef Infra Server, Managed RDS Postgresql, and Managed OpenSearch) + +{{< /note >}} {{< note >}} -- User only needs to create/set up **the bastion node**, a **user** with IAM role of Admin access and the s3 bucket access attached to it. -- The following config will create an s3 bucket for backup. +- User only needs to create/set up **the bastion node**, a **user** with IAM role of Admin access and the S3 bucket access attached to it. +- The following config will create an S3 bucket for backup. - To provide multiline certificates use triple quotes like `""" multiline certificate contents"""`. {{< /note >}} -```config +```toml [architecture] [architecture.aws] ssh_user = "ec2-user" ssh_group_name = "ec2-user" - ssh_key_file = "~/.ssh/my-key.pem" + ssh_key_file = "/home/ec2-user/KEY_FILENAME.pem" ssh_port = "22" secrets_key_file = "/hab/a2_deploy_workspace/secrets.key" secrets_store_file = "/hab/a2_deploy_workspace/secrets.json" @@ -203,7 +213,7 @@ After successful deployment, proceed with following: workspace_path = "/hab/a2_deploy_workspace" backup_mount = "/mnt/automate_backups" backup_config = "s3" - s3_bucketName = "My-Bucket-Name" + s3_bucketName = "BUCKET_NAME" [automate] [automate.config] admin_password = "test@343423" @@ -272,3 +282,31 @@ After successful deployment, proceed with following: - Provide `managed_rds_instance_url`,`managed_rds_superuser_username`,`managed_rds_superuser_password`,`managed_rds_dbuser_username`,`managed_rds_dbuser_password`. - Provide `ami_id` for the region where the infra is created. Eg: `ami-0bb66b6ba59664870`. - Provide `certificate ARN` for both automate and Chef servers in `automate_lb_certificate_arn` and `chef_server_lb_certificate_arn`, respectively. + +## Uninstall Chef Automate HA + +{{< danger >}} + +The `cleanup` command will remove all AWS resources created by the `provision-infra` command + +Adding the `--force` flag will remove object storage if it was created with the `provision-infra` command. + +{{< /danger >}} + +To uninstall Chef Automate HA instances after successful deployment, run the below command in your bastion host. This will delete the AWS resources that are created during provision-infra. + +```bash +chef-automate cleanup --aws-deployment --force +``` + +OR + +```bash +chef-automate cleanup --aws-deployment +``` + +Following the `cleanup` command the following command can be used to remove the deployment workspace in the Bastion machine. This will also remove the logs file inside the workspace. + +```bash +hab pkg uninstall chef/automate-ha-deployment +``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_aws_efs.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_aws_efs.md index 81952705a7..3b21482d49 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_aws_efs.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_aws_efs.md @@ -112,7 +112,7 @@ Configure the OpenSearch `path.repo` attribute. - Run the backup command from bastion as shown below to create a backup: - ```cmd + ```sh chef-automate backup create ``` @@ -132,9 +132,4 @@ To restore backed-up data of the Chef Automate High Availability (HA) using Exte ## Troubleshooting -While running the restore command, If it prompts any error follow the steps given below. - -- check the chef-automate status in Automate node by running `chef-automate status`. -- Also check the hab svc status in automate node by running `hab svc status`. -- If the deployment services is not healthy then reload it using `hab svc load chef/deployment-service`. -- Now, check the status of Automate node and then try running the restore command from bastion. +{{< readfile file = "content/automate/reusable/md/restore_troubleshooting.md" >}} diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_aws_s3.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_aws_s3.md index 9d3655b08a..c5a07e1f08 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_aws_s3.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_aws_s3.md @@ -128,17 +128,17 @@ In case of if you are using the Managed AWS Service you need to create a [snapsh {{< note >}} **IAM Role:** Assign the IAM Role to all the OpenSearch instances in the cluster created above. {{< /note >}} -## Backup and Restore Commands +## Backup and Restore ### Backup - To create the backup, by running the backup command from bastion. The backup command is as shown below: - ```cmd + ```sh chef-automate backup create ``` -#### Restoring the Backed-up Data from Object Storage +### Restore To restore backed-up data of the Chef Automate High Availability (HA) using External AWS S3, follow the steps given below: @@ -157,14 +157,9 @@ To restore backed-up data of the Chef Automate High Availability (HA) using Exte {{< /note >}} -## Troubleshooting - -While running the restore command, If it prompts any error follow the steps given below. +#### Troubleshooting -- Check the chef-automate status in Automate node by running `chef-automate status`. -- Also check the hab svc status in automate node by running `hab svc status`. -- If the deployment services is not healthy then reload it using `hab svc load chef/deployment-service`. -- Now, check the status of Automate node and then try running the restore command from bastion. +{{< readfile file = "content/automate/reusable/md/restore_troubleshooting.md" >}} For **Disaster Recovery or AMI upgrade**, while running the restore in secondary cluster which is in different region follow the steps given below. @@ -173,14 +168,14 @@ For **Disaster Recovery or AMI upgrade**, while running the restore in secondary 1. Modify the region in FrontEnd nodes by patching the below configs with command, `chef-automate config patch .toml --fe` - ```cmd + ```sh [global.v1.external.opensearch.backup.s3.settings] region = "" ``` 2. Make a PUT request in an Opensearch node by running this script: - ```cmd + ```sh indices=( chef-automate-es6-automate-cs-oc-erchef chef-automate-es6-compliance-service diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_file_system.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_file_system.md index 5cfbfc072b..e899948f38 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_file_system.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_file_system.md @@ -1,5 +1,5 @@ +++ -title = "On-Premise Deployment using Filesystem" +title = "On-Prem Deployment using Filesystem" draft = false @@ -7,7 +7,7 @@ gh_repo = "automate" [menu] [menu.automate] - title = "On-Premise Deployment using Filesystem" + title = "On-Prem Deployment using Filesystem" identifier = "automate/deploy_high_availability/backup_and_restore/ha_backup_restore_prerequisites.md Backup and Restore File System" parent = "automate/deploy_high_availability/backup_and_restore" weight = 210 @@ -116,17 +116,17 @@ Configure the OpenSearch `path.repo` setting by following the steps given below: chef-automate config patch --fe automate.toml ``` -## Backup and Restore commands +## Backup and Restore ### Backup -- To create the backup, by running the backup command from bastion. The backup command is as shown below: +To create the backup, by running the backup command from bastion. The backup command is as shown below: - ```cmd - chef-automate backup create - ``` +```sh +chef-automate backup create +``` -### Restoring the Backed-up Data From the file system +### Restore To restore backed-up data of the Chef Automate High Availability (HA) using External File System (EFS), follow the steps given below: @@ -140,11 +140,6 @@ To restore backed-up data of the Chef Automate High Availability (HA) using Exte {{< /note >}} -## Troubleshooting - -While running the restore command, If it prompts any error follow the steps given below. +#### Troubleshooting -- Check the chef-automate status in Automate node by running `chef-automate status`. -- Also check the hab svc status in automate node by running `hab svc status`. -- If the deployment services is not healthy then reload it using `hab svc load chef/deployment-service`. -- Now, check the status of Automate node and then try running the restore command from bastion. +{{< readfile file = "content/automate/reusable/md/restore_troubleshooting.md" >}} diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_object_storage.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_object_storage.md index 5aac7d06ba..21f068f23a 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_object_storage.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_backup_restore_object_storage.md @@ -1,5 +1,5 @@ +++ -title = "On-Premise Deployment using Object Storage" +title = "Back Up On-Prem Deployment With Object Storage" draft = false @@ -7,7 +7,7 @@ gh_repo = "automate" [menu] [menu.automate] - title = "On-Premise Deployment using Object Storage" + title = "On-Prem Back Up With Object Storage" identifier = "automate/deploy_high_availability/backup_and_restore/ha_backup_restore_prerequisites.md Backup and Restore Object Storage" parent = "automate/deploy_high_availability/backup_and_restore" weight = 220 @@ -17,110 +17,82 @@ gh_repo = "automate" {{% automate/ha-warn %}} {{< /note >}} -{{< note >}} - -- If the user chooses `backup_config` as `object_storage` in `config.toml` backup is already configured during the deployment, and in that case **the below steps are not required**. If `backup_config` is left blank, then the configuration needs to be configured manually. -- Encrypted S3 bucket are supported with only Amazon S3 managed keys (SSE-S3). +This document shows how to configure, back up, and restore a Chef Automate high availability deployment with object storage. -{{< /note >}} +During deployment of Chef Automate, if you set `backup_config = "object_storage"` or `backup_config = "file_system"` in the Automate configuration TOML file, then backup is already configured and you don't need to configure data backup for Chef Automate. +If a backup wasn't configured during the initial deployment, then follow these instructions to configure it manually. -## Overview +Chef Automate supports backing up data to the following platforms: -This section provides the pre-backup configuration required to back up the data on Object Storage System (Other than AWS S3) like Minio, Non-AWS S3. The steps to set a secret key using commands are given below: +- S3 (AWS S3, MinIO, non-AWS S3) +- Google Cloud Storage (GCS) -### Configuration in OpenSearch Node +## Configure backup for S3 -This section provides the pre-backup configuration required to back up the data on Object Storage Systems like _Minio_, _Non-AWS S3_. The steps to set a secret key using commands are given below: +This section shows how to configure data back up on a Chef Automate high availability deployment to object storage on AWS S3, MinIO, or non-AWS S3. -1. Log in to all the OpenSearch nodes and follow the steps on all the OpenSearch nodes. +### Configure OpenSearch nodes -- `export OPENSEARCH_PATH_CONF="/hab/svc/automate-ha-opensearch/config"` -- `hab pkg exec chef/automate-ha-opensearch opensearch-keystore add s3.client.default.access_key` (When asked, Enter your key) -- `hab pkg exec chef/automate-ha-opensearch opensearch-keystore add s3.client.default.secret_key` (When asked, Enter your key/secret) -- `chown -RL hab:hab /hab/svc/automate-ha-opensearch/config/opensearch.keystore` (Setting hab:hab permission) -- `curl -k -X POST "https://127.0.0.1:9200/_nodes/reload_secure_settings?pretty" -u admin:admin` (Command to load the above setting) +Add a secret key and access key for your S3 backup provider on every OpenSearch node. -The final output after running the curl command on all nodes is given below: - -```json -{ - "_nodes": { - "total": 3, - "successful": 3, - "failed": 0 - }, - "cluster_name": "chef-insights", - "nodes": { - "lenRTrZ1QS2uv_vJIwL-kQ": { - "name": "lenRTrZ" - }, - "Us5iBo4_RoaeojySjWpr9A": { - "name": "Us5iBo4" - }, - "qtz7KseqSlGm2lEm0BiUEg": { - "name": "qtz7Kse" - } - } -} -``` +{{< note >}} -#### Configuration for Opensearch Node from Provision Host +Encrypted S3 buckets are supported only with Amazon S3 managed keys (SSE-S3). -1. To override the existing default endpoint: +{{< /note >}} -- Create an `.toml` file on **the provisioning server** using the following command: +1. Set the OpenSearch path configuration location. - ```bash - touch os_config.toml + ```sh + export OPENSEARCH_PATH_CONF="/hab/svc/automate-ha-opensearch/config" ``` -- Add the following settings at the end of the `os_config.toml` file. +1. Add your S3 access and secret keys to the OpenSearch keystore. ```sh - [s3] - [s3.client.default] - protocol = "https" - read_timeout = "60s" - max_retries = "3" - use_throttle_retries = true - # Add endpoint of the Object storage below - endpoint = "" + hab pkg exec chef/automate-ha-opensearch opensearch-keystore add s3.client.default.access_key + hab pkg exec chef/automate-ha-opensearch opensearch-keystore add s3.client.default.secret_key ``` -- Run the following command to apply the updated `os_config.toml` changes. Run this command only once. (_This will trigger a restart of the OpenSearch services on each server_) +1. Change ownership of the keystore. ```sh - chef-automate config patch --opensearch os_config.toml + chown -RL hab:hab /hab/svc/automate-ha-opensearch/config/opensearch.keystore ``` -This will update the configuration in Opensearch node. - -#### Healthcheck commands - -- Following command can be run in the OpenSearch node +1. Load the secure settings into the OpenSearch keystore. ```sh - hab svc status (check whether OpenSearch service is up or not) + curl -X POST https://localhost:9200/_nodes/reload_secure_settings?pretty --cacert /hab/svc/automate-ha-opensearch/config/certificates/root-ca.pem --key /hab/svc/automate-ha-opensearch/config/certificates/admin-key.pem --cert /hab/svc/automate-ha-opensearch/config/certificates/admin.pem -k + ``` - curl -k -X GET "" -u admin:admin (Another way to check is to check whether all the indices are green or not) +1. Repeat these steps on all OpenSearch nodes until they are all updated. - # Watch for a message about OpenSearch going from RED to GREEN - `journalctl -u hab-sup -f | grep 'automate-ha-opensearch' - ``` +#### OpenSearch health check -#### Configuration for Automate Node from Provision Host +{{< readfile file="content/automate/reusable/md/opensearch_health_check.md" >}} -{{< note >}} +### Patch the Automate configuration -Make sure all the frontend nodes and OpenSearch have access to the object storage. +On the bastion host, update the S3 and OpenSearch configuration. -{{< /note >}} +Before starting, make sure the frontend nodes and OpenSearch nodes have access to the object storage endpoint. -Once done with the OpenSearch setup, add the following `automate.toml` file and patch the updated config to all frontend nodes. In the file, modify the values listed below: +1. Create a TOML file on the bastion host with the following settings. + + ```sh + [s3] + [s3.client.default] + protocol = "https" + read_timeout = "60s" + max_retries = "3" + use_throttle_retries = true + endpoint = "s3.example.com" + ``` -1. Create .toml file by `vi automate.toml` + Replace the value of `endpoint` with the URL of your S3 storage endpoint. -2. Refer to the content for the `automate.toml` file below: +1. Add the following content to the TOML file to configure OpenSearch. ```sh [global.v1] @@ -131,7 +103,7 @@ Once done with the OpenSearch setup, add the following `automate.toml` file and [global.v1.external.opensearch.backup.s3] # bucket (required): The name of the bucket - bucket = "bucket-name" + bucket = "" # base_path (optional): The path within the bucket where backups should be stored # If base_path is not set, backups will be stored at the root of the bucket. @@ -169,70 +141,195 @@ Once done with the OpenSearch setup, add the following `automate.toml` file and [global.v1.backups.s3.bucket] # name (required): The name of the bucket - name = "bucket-name" + name = "" # endpoint (required): The endpoint for the region the bucket lives in for Automate Version 3.x.y # endpoint (required): For Automate Version 4.x.y, use this https://s3.amazonaws.com - endpoint = "" + endpoint = "" # base_path (optional): The path within the bucket where backups should be stored # If base_path is not set, backups will be stored at the root of the bucket. base_path = "automate" [global.v1.backups.s3.credentials] - access_key = "" - secret_key = "" + access_key = "" + secret_key = "" ``` -3. Execute the command given below to trigger the deployment. +1. Use the `patch` subcommand to patch the Automate configuration. ```sh - ./chef-automate config patch --frontend /path/to/automate.toml + ./chef-automate config patch --frontend /PATH/TO/FILE_NAME.TOML ``` -## Backup and Restore Commands +## Configure backup on Google Cloud Storage -### Backup +This sections shows how to configure a Chef Automate high availability deployment to back up data to object storage on Google Cloud Storage (GCS). + +### Configure OpenSearch nodes -- To create the backup, by running the backup command from bastion. The backup command is as shown below: +Add a GCS service account file that gives access to the GCS bucket to every OpenSearch node. - ```cmd - chef-automate backup create +1. Log in to an OpenSearch node and set the OpenSearch path and GCS service account file locations. + + ```sh + export OPENSEARCH_PATH_CONF="/hab/svc/automate-ha-opensearch/config" + export GCS_SERVICE_ACCOUNT_JSON_FILE_PATH="/PATH/TO/GOOGLESERVICEACCOUNT.JSON" ``` - + ```sh + hab pkg exec chef/automate-ha-opensearch opensearch-keystore add-file --force gcs.client.default.credentials_file $GCS_SERVICE_ACCOUNT_JSON_FILE_PATH + ``` -#### Restoring the Backed-up Data from Object Storage +1. Change ownership of the keystore. -To restore backed-up data of the Chef Automate High Availability (HA) using External Object Storage, follow the steps given below: + ```sh + chown -RL hab:hab /hab/svc/automate-ha-opensearch/config/opensearch.keystore + ``` -- Check the status of Automate HA Cluster from the bastion nodes by executing the `chef-automate status` command. +1. Load the secure settings into the OpenSearch keystore. -- Execute the restore command from bastion`chef-automate backup restore s3://bucket_name/path/to/backups/BACKUP_ID --skip-preflight --s3-access-key "Access_Key" --s3-secret-key "Secret_Key"`. + ```sh + curl -X POST https://localhost:9200/_nodes/reload_secure_settings?pretty --cacert /hab/svc/automate-ha-opensearch/config/certificates/root-ca.pem --key /hab/svc/automate-ha-opensearch/config/certificates/admin-key.pem --cert /hab/svc/automate-ha-opensearch/config/certificates/admin.pem -k + ``` -- In case of Airgapped Environment, Execute this restore command from bastion `chef-automate backup restore /backups/BACKUP_ID --skip-preflight --airgap-bundle `. +1. Repeat these steps on all OpenSearch nodes until they are all updated. -{{< note >}} +After updating all nodes, the above curl command will return an output similar to this: -- If you are restoring the backup from an older version, then you need to provide the `--airgap-bundle `. -- If you have not configured S3 access and secret keys during deployment or if you have taken backup on a different bucket, then you need to provide the `--s3-access-key ` and `--s3-secret-key ` flags. +```json +{ + "_nodes": { + "total": 3, + "successful": 3, + "failed": 0 + }, + "cluster_name": "chef-insights", + "nodes": { + "lenRTrZ1QS2uv_vJIwL-kQ": { + "name": "lenRTrZ" + }, + "Us5iBo4_RoaeojySjWpr9A": { + "name": "Us5iBo4" + }, + "qtz7KseqSlGm2lEm0BiUEg": { + "name": "qtz7Kse" + } + } +} +``` -{{< /note >}} +#### OpenSearch health check + +{{< readfile file="content/automate/reusable/md/opensearch_health_check.md" >}} + +### Patch the Automate configuration + +On the bastion host, update the OpenSearch configuration. + +Before starting, make sure the frontend nodes and OpenSearch nodes have access to the object storage endpoint. + +1. Create a TOML file on the bastion host with the following settings. + + ```sh + [global.v1] + [global.v1.external.opensearch.backup] + enable = true + location = "gcs" + + [global.v1.external.opensearch.backup.gcs] + + # bucket (required): The name of the bucket + bucket = "bucket-name" + + # base_path (optional): The path within the bucket where backups should be stored + # If base_path is not set, backups will be stored at the root of the bucket. + base_path = "opensearch" + client = "default" + + [global.v1.backups] + location = "gcs" + + [global.v1.backups.gcs.bucket] + # name (required): The name of the bucket + name = "bucket-name" + + # endpoint = "" + + # base_path (optional): The path within the bucket where backups should be stored + # If base_path is not set, backups will be stored at the root of the bucket. + base_path = "automate" + + [global.v1.backups.gcs.credentials] + json = '''{ + "type": "service_account", + "project_id": "chef-automate-ha", + "private_key_id": "7b1e77baec247a22a9b3****************f", + "private_key": "", + "client_email": "myemail@chef.iam.gserviceaccount.com", + "client_id": "1******************1", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/myemail@chef.iam.gserviceaccount.com", + "universe_domain": "googleapis.com" + }''' + ``` + +1. Patch the Automate configuration to trigger the deployment. + + ```sh + ./chef-automate config patch --frontend /PATH/TO/FILE_NAME.TOML + ``` + +## Backup and Restore + +### Backup + +To create a backup, run the backup command from the bastion host. + +```sh +chef-automate backup create +``` + +### Restore + +Restore a backup from external object storage. + +1. Check the status of the Automate HA cluster from the bastion host. + + ```sh + chef-automate status + ``` + +1. Restore the backup by running the restore command from the bastion host. + + For S3: + + ```sh + chef-automate backup restore s3://BUCKET_NAME/PATH/TO/BACKUPS/BACKUP_ID --skip-preflight --s3-access-key "ACCESS_KEY" --s3-secret-key "SECRET_KEY" + ``` + + For GCS: + + ```sh + chef-automate backup restore gs://BUCKET_NAME/PATH/TO/BACKUPS/BACKUP_ID --gcs-credentials-path "PATH/TO/GOOGLE_SERVICE_ACCOUNT.JSON" + ``` + + In an airgapped environment: -## Troubleshooting + ```sh + chef-automate backup restore /BACKUPS/BACKUP_ID --skip-preflight --airgap-bundle + ``` -While running the restore command, If it prompts any error follow the steps given below. +#### Troubleshooting -- Check the chef-automate status in Automate node by running `chef-automate status`. -- Also check the hab svc status in automate node by running `hab svc status`. -- If the deployment services is not healthy then reload it using `hab svc load chef/deployment-service`. -- Now, check the status of Automate node and then try running the restore command from bastion. +{{< readfile file = "content/automate/reusable/md/restore_troubleshooting.md" >}} diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_chef_backend_to_automate_ha.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_chef_backend_to_automate_ha.md index 5079f9414b..517624aa74 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_chef_backend_to_automate_ha.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_chef_backend_to_automate_ha.md @@ -44,19 +44,19 @@ Check the [AWS Deployment Prerequisites](/automate/ha_aws_deployment_prerequisit 1. Execute the below command to install Habitat: - ```cmd + ```sh curl https://raw.githubusercontent.com/habitat-sh/habitat/master/components/hab/install.sh \ | sudo bash ``` 2. Execute the below command to install the habitat package for `knife-ec-backup` - ```cmd + ```sh hab pkg install chef/knife-ec-backup ``` 3. Execute the below command to generate a knife tidy server report to examine the stale node, data, etc. - ```cmd + ```sh hab pkg exec chef/knife-ec-backup knife tidy server report --node-threshold 60 -s -u -k ``` @@ -67,13 +67,13 @@ Check the [AWS Deployment Prerequisites](/automate/ha_aws_deployment_prerequisit - `node-threshold NUM_DAYS` is the maximum number of days since the last checking before a node is considered stale. For Example: - ```cmd + ```sh hab pkg exec chef/knife-ec-backup knife tidy server report --node-threshold 60 -s https://chef.io -u pivotal -k /etc/opscode/pivotal.pem ``` 4. Execute the below command to initiate a backup of your Chef Server data. - ```cmd + ```sh hab pkg exec chef/knife-ec-backup knife ec backup backup_$(date '+%Y%m%d%H%M%s') --webui-key /etc/opscode/webui_priv.pem -s ``` @@ -84,7 +84,7 @@ Check the [AWS Deployment Prerequisites](/automate/ha_aws_deployment_prerequisit For example: - ```cmd + ```sh hab pkg exec chef/knife-ec-backup knife ec backup backup_$(date '+%Y%m%d%H%M%s') --webui-key /etc/opscode/webui_priv.pem -s https://chef.io`. ``` @@ -96,7 +96,7 @@ Check the [AWS Deployment Prerequisites](/automate/ha_aws_deployment_prerequisit 5. Execute the below command to copy the backup directory to the Automate HA Chef Server. - ```cmd + ```sh scp -i /path/to/key /path/to/backup-file user@host:/home/user ``` @@ -117,13 +117,13 @@ Before restoring the backup on the Automate HA Chef Server, configure [S3 storag - Execute the below command to install the habitat package for `knife-ec-backup` - ```cmd + ```sh hab pkg install chef/knife-ec-backup ``` - Execute the below command to restore the backup. - ```cmd + ```sh export LC_ALL=en_US.UTF-8 export LANG=en_US.UTF-8 hab pkg exec chef/knife-ec-backup knife ec restore -yes --concurrency 1 --webui-key /hab/svc/automate-cs-oc-erchef/data/webui\_priv.pem --purge -c /hab/pkgs/chef/chef-server-ctl/*/*/omnibus-ctl/spec/fixtures/pivotal.rb @@ -133,7 +133,7 @@ Before restoring the backup on the Automate HA Chef Server, configure [S3 storag - Download the validation script using below - ```cmd + ```sh curl https://raw.githubusercontent.com/chef/automate/main/dev/infra_server_objects_count_collector.sh -o infra_server_objects_count_collector.sh ``` @@ -145,14 +145,14 @@ Before restoring the backup on the Automate HA Chef Server, configure [S3 storag - Execute the below command to get the counts of objects - ```cmd + ```sh bash infra_server_objects_count_collector.sh -S -K /path/to/key -F Filename ``` - Repeat the above commands for the new server to get the counts - Now run the below command to check the differences between the old and new data. Ideally, there should be no differences if the migration was done successfully. - ```cmd + ```sh diff old_server_file new_server_file ``` @@ -171,7 +171,7 @@ As part of this scenario, the customer will migrate from the chef-backend (5 mac - To validate the In-place migration, run the validation script before starting the backup and restore. -```cmd +```sh curl https://raw.githubusercontent.com/chef/automate/main/dev/infra_server_objects_count_collector.sh -o infra_server_objects_count_collector.sh ``` @@ -190,32 +190,32 @@ Where: 1. [Backup the existing chef server data](/automate/ha_chef_backend_to_automate_ha/##backup-the-existing-chef-infra-server-or-chef-backend-data) 2. ssh to all the backend nodes of chef-backend and run - ```cmd + ```sh chef-backend-ctl stop ``` 3. ssh to all frontend nodes of chef-backend and run - ```cmd + ```sh chef-server-ctl stop ``` 4. Create one bastion machine under the same network space. -5. ssh to bastion machine and download chef-automate cli and extract the downloaded zip file +5. ssh to bastion machine and download chef-automate CLI and extract the downloaded zip file - ```cmd + ```sh https://packages.chef.io/files/current/latest/chef-automate-cli/chef-automate_linux_amd64.zip | gunzip - > chef-automate && chmod +x chef-automate | cp -f chef-automate /usr/bin/chef-automate ``` 6. Create an airgap bundle using the command - ```cmd + ```sh ./chef-automate airgap bundle create ``` 7. Generate the `config.toml` file using the following command: - ```cmd + ```sh ./chef-automate init-config-ha existing_infra ``` @@ -227,7 +227,7 @@ Where: - Ensure to provide Chef backend's backend server IPs for Automate HA Postgres and OpenSearch machines. - Sample configuration; please modify according to your needs. - ```cmd + ```sh [architecture.existing_infra] secrets_key_file = "/hab/a2_deploy_workspace/secrets.key" secrets_store_file = "/hab/a2_deploy_workspace/secrets.json" @@ -276,7 +276,7 @@ Where: 9. Deploy using the following command: - ```cmd + ```sh ./chef-automate deploy config.toml --airgap-bundle ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_config_gen.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_config_gen.md index 0b110cbb4f..37f83d05b7 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_config_gen.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_config_gen.md @@ -26,112 +26,237 @@ Refer the fields below to generate Chef Automate High Availability (HA) configur ## Automate HA Topology -- You need to have `Chef Automate HA` as a topology for HA deployments. -- `On-Premise` to deploy on customer created Automate HA cluster. -- `Aws` to deploy in AWS environment, Cluster will be created by Automate HA. -- `Deployment` is a config type, we may have different type of configs in future. +You need to have `Chef Automate HA` as a topology for HA deployments. + +`On-Premise` +: To deploy on customer created Chef Automate HA cluster. + +`Aws` +: To deploy in AWS environment. Cluster will be created by Automate HA. + +`Deployment` +: The configuration type. ## SSH User and Group -- `ssh user name` user name to ssh to cluster instances. -- `ssh group name` group name which is associated with ssh user. -- `ssh port no` port to do ssh, default is 22 incase you have different port then provide the ssh port number. -- `ssh key file path` ssh key file path, same will be used to ssh to cluster instances Example `~/.ssh/my-key.pem`. +`ssh user name` +: User name to SSH to cluster instances. + +`ssh group name` +: Group name which is associated with SSH user. + +`ssh port no` +: Port to connect using SSH. Default value: `22`. + +`ssh key file path` +: SSH key file path, same will be used to SSH to cluster instances. For example, `/home/ec2-user/KEY_FILENAME.pem`. ## Automate Load Balancer FQDN -- `Automate FQDN` automate FQDN name Example `chefautomate.example.com`. -- `Automate FQDN ARN` for Aws deployment ARN name is required for Automate FQDN domain. -- `Automate FQDN Root Certificate` ssl root certificate for Automate FQDN domain. +`Automate FQDN` +: Chef Automate FQDN. For example, `chefautomate.example.com`. + +`Automate FQDN ARN` +: For AWS deployment ARN name is required for Automate FQDN domain. + +`Automate FQDN Root Certificate` +: SSL root certificate for Automate FQDN domain. ## Automate Admin -- `Automate Admin Password` Admin password to login to automate dashboard. +`Automate Admin Password` +: Admin password to login to automate dashboard. + +## Chef Infra Server Load Balancer FQDN -## Chef Server Load Balancer FQDN +`Chef Server FQDN` +: Chef Automate FQDN. For example, `chefserver.example.com`. -- `Chef Server FQDN` automate FQDN name Example `chefserver.example.com`. -- `Chef Server FQDN ARN` for Aws deployment ARN name is required for Chef Servers FQDN domain. -- `Chef Server FQDN Root Certificate` ssl root certificate for Chef Server FQDN domain. +`Chef Server FQDN ARN` +: For Aws deployment ARN name is required for Chef Servers FQDN domain. + +`Chef Server FQDN Root Certificate` +: SSL root certificate for Chef Infra Server FQDN domain. ## Number of Nodes in Automate HA Cluster -- `Automate node count` number of nodes we want to keep for automate, in case of On-Premise deployment need to provide IP Address for all nodes. -- `Chef Server node count` number of nodes we want to keep for Chef Server, in case of On-Premise deployment need to provide IP Address for all nodes. -- `Opensearch node count` number of nodes we want to keep for Opensearch, in case of On-Premise deployment need to provide IP Address for all nodes. -- `Postgresql node count` number of nodes we want to keep for Postgresql, in case of On-Premise deployment need to provide IP Address for all nodes. +`Automate node count` +: Number of nodes we want to keep for automate, in case of On-Premise deployment need to provide IP Address for all nodes. + +`Chef Server node count` +: Number of nodes we want to keep for Chef Server, in case of On-Premise deployment need to provide IP Address for all nodes. + +`Opensearch node count` +: Number of nodes we want to keep for Opensearch, in case of On-Premise deployment need to provide IP Address for all nodes. + +`Postgresql node count` +: Number of nodes we want to keep for Postgresql, in case of On-Premise deployment need to provide IP Address for all nodes. ## Private/Public Key For Automate -- `Private key for Automate` In case to have custom certificate for Automate node provide your private for Automate, If you have custom certificates for each Automate node then provide different private key for each of Automate node. -- `Public key for Automate` In case to have custom certificate for Automate node provide your public for Automate, If you have custom certificates for each Automate node then provide different public key for each of Automate node. +`Private key for Automate` +: If you have a custom certificate for Automate node provide your private for Automate, If you have a custom certificates for each Automate node then provide different private key for each of Automate node. + +`Public key for Automate` +: If you have a custom certificate for Automate node provide your public for Automate, If you have a custom certificates for each Automate node then provide different public key for each of Automate node. ## Private/Public Key For Chef Server -- `Private key for Chef Server` In case to have custom certificate for Chef Server node provide your private for Chef Server, If you have custom certificates for each Chef Server node then provide different private key for each of Chef Server node. -- `Public key for Chef Server` In case to have custom certificate for Chef Server node provide your public for Chef Server, If you have custom certificates for each Chef Server node then provide different public key for each of Chef Server node. +`Private key for Chef Server` +: If you have a custom certificate for Chef Infra Server node provide your private for Chef Infra Server. If you have a custom certificates for each Chef Infra Server node then provide different private key for each of Chef Infra Server node. + +`Public key for Chef Server` +: If you have a custom certificate for Chef Infra Server node provide your public for Chef Infra Server. If you have a custom certificates for each Chef Infra Server node then provide different public key for each of Chef Infra Server node. ## OpenSearch Certificate and Private/Public Key -- `Root CA for Open Search` In case of have custom certificates for Open Search node provide root certificates. -- `Admin Key certificate for Open Search` In case of have custom certificates for Open Search node provide admin key certificates. -- `Admin certificate for Open Search` In case of have custom certificates for Open Search node provide admin certificates. -- `Private key for Open Search` In case to have custom certificate for Open Search node provide your private for Open Search, If you have custom certificates for each Open Search node then provide different private key for each of Open Search node. -- `Public key for Open Search` In case to have custom certificate for Open Search node provide your public for Open Search, If you have custom certificates for each Open Search node then provide different public key for each of Open Search node. +`Root CA for Open Search` +: In case of have custom certificates for Open Search node provide root certificates. + +`Admin Key certificate for Open Search` +: In case of have custom certificates for Open Search node provide admin key certificates. + +`Admin certificate for Open Search` +: In case of have custom certificates for Open Search node provide admin certificates. + +`Private key for Open Search` +: If you have a custom certificate for Open Search node provide your private for Open Search, If you have a custom certificates for each Open Search node then provide different private key for each of Open Search node. + +`Public key for Open Search` +: If you have a custom certificate for Open Search node provide your public for Open Search, If you have a custom certificates for each Open Search node then provide different public key for each of Open Search node. ## PostgreSQL Certificate and Private/Public Key -- `Root CA for Postgresql` In case of have custom certificates for Postgresql node provide root certificates. -- `Private key for Postgresql` In case to have custom certificate for Postgresql node provide your private for Postgresql, If you have custom certificates for each Postgresql node then provide different private key for each of Postgresql node. -- `Public key for Postgresql` In case to have custom certificate for Postgresql node provide your public for Postgresql, If you have custom certificates for each Postgresql node then provide different public key for each of Postgresql node. +`Root CA for Postgresql` +: In case of have custom certificates for Postgresql node provide root certificates. + +`Private key for Postgresql` +: If you have a custom certificate for Postgresql node provide your private for Postgresql, If you have a custom certificates for each Postgresql node then provide different private key for each of Postgresql node. + +`Public key for Postgresql` +: If you have a custom certificate for Postgresql node provide your public for Postgresql, If you have a custom certificates for each Postgresql node then provide different public key for each of Postgresql node. ## AWS Deployment -- Details required for AWS deployment: - - `VPC ID` VPC Id in which you want to create cluster. - - `Private subnet ids` three private subnets are required to create cluster. - - `Public subnet ids` in case you want to have public load balancer then, Three public subnets are required to create cluster. - - `Instance type` instance type to create cluster. - - `EBS volume size` it should be based on your load needs. - - `EBS volume type` default is `gp3`, change based on your need. - - `EBS volume IOPS` it should be based on your load needs. - - `ssh key pair name` ssh key pair name on AWS Example `my-key`. - - `Region` AWS region to create cluster. - - `AMI Id` AWS AMI Id for specific region to create cluster of particular AMI. - - `AWS profile name`. AWS profile name configured in .aws/credentials, Skip this if the IAM role is configured on the bastion host. +Settings required for AWS deployment. + +`VPC ID` +: VPC ID in which you want to create cluster. + +`Private subnet ids` +: Three private subnets are required to create cluster. + +`Public subnet ids` +: If you want to have public load balancer then, Three public subnets are required to create cluster. + +`Instance type` +: Instance type to create cluster. + +`EBS volume size` +: The EBS volume size. + +`EBS volume type` +: Default is `gp3`. + +`EBS volume IOPS` +: It should be based on your load needs. + +`ssh key pair name` +: SSH key pair name on AWS. For example, `my-key`. + +`Region` +: AWS region to create cluster. + +`AMI Id` +: AWS AMI ID for specific region to create cluster of particular AMI. + +`AWS profile name`. AWS profile name configured in .aws/credentials, Skip this if the IAM role is configured on the bastion host. ## External Databases -In Case of AWS managed or Customer managed databases below fields will be required. +Settings for AWS-managed or customer-managed databases. ### OpenSearch -- `Opensearch domain name` Opensearch domain name deployed on AWS or customer environment. -- `Opensearch domain url` For AWS managed provide domain url without port and protocol Example: `opensearch.example.com`, and for customer managed opensearch provide domain url along with port Example `opensearch.example.com:9200`. -- `Opensearch user name`, username to login to opensearch. -- `Opensearch user passwords` password to login to opensearch. -- `Opensearch root-ca` SSL root certificates to connect with opensearch, In Case of AWS managed databases we have option to use default aws certificates - - If using default certificates then no need to provide root certificates. +`Opensearch domain name` +: Opensearch domain name deployed on AWS or customer environment. + +`Opensearch domain url` +: For AWS managed provide domain URL without port and protocol. + + For example, `opensearch.example.com`. + +: For customer managed OpenSearch provide domain URL along with port. + + For example, `opensearch.example.com:9200`. + +`Opensearch user name` +: Username to login to OpenSearch. + +`Opensearch user passwords` +: Password to login to OpenSearch. + +`Opensearch root-ca` +: SSL root certificates to connect with OpenSearch. + + If you have AWS managed databases we have option to use default AWS certificates - - If using default certificates then no need to provide root certificates. ### PostgreSQL -- `Postgresql URL and port` Postgresql url along with port Example: `postgresql.example.com:5432`. -- `PostgreSQL super username` master username to login to postgresql. -- `PostgreSQL super user password` master password to login to postgresql. -- `PostgreSQL database username` database username to login to postgresql. -- `PostgreSQL database user password` database password to login to postgresql. +`PostgreSQL URL and port` +: Postgresql URL along with port. + + For example, `postgresql.example.com:5432`. + +`PostgreSQL super username` +: Superuser username to login to PostgreSQL. + +`PostgreSQL super user password` +: Superuser password to login to PostgreSQL. + +`PostgreSQL database username` +: Database username to login to PostgreSQL. + +`PostgreSQL database user password` +: Database password to login to PostgreSQL. ### AWS OpenSearch -- `Aws OpenSearch snapshot arn` snapshot arn is required to take a backup from aws OpenSearch -- `Aws OpenSearch snapshot user accesskey` snapshot user accesskey is required to take a backup from aws OpenSearch -- `Aws OpenSearch snapshot secret key` snapshot user accesskey is required to take a backup from aws OpenSearch. Refer to the [Enabling OpenSearch Backup Restore](/automate/managed_services/#enabling-opensearch-backup-restore) section, to create them and get their values. +`Aws OpenSearch snapshot arn` +: Snapshot arn is required to take a backup from AWS OpenSearch + +`Aws OpenSearch snapshot user accesskey` +: Snapshot user accesskey is required to take a backup from AWS OpenSearch + +`Aws OpenSearch snapshot secret key` +: Snapshot user accesskey is required to take a backup from AWS OpenSearch. Refer to the [Enabling OpenSearch Backup Restore](/automate/managed_services/#enabling-opensearch-backup-restore) section, to create them and get their values. ## Backup -- If configuring backup at the time of deployment the we many need following fields: - - `Bucket name` for object storage or AWS S3 type of backup provide bucket name, for AWS deployment bucket will be created if not exist in AWS. - - `Access Key` S3 access key. - - `Secret Key` S3 secret key. - - `Endpoint` for object storage provide endpoint of object storage. - - `Region` for S3 provide region. - - `Mount path` in case of file system/efs backup provide mount path of backup directory. +If backup is configured during deployment, set the following settings. + +`Bucket name` +: Object storage bucket name. In case of AWS deployment bucket will be created if not exist in AWS. + +`Access Key` +: S3 access key. + +`Secret Key` +: S3 secret key. + +`Endpoint` +: Endpoint of object storage. + +`Region` +: S3 Bucket region. + +`Mount path` +: For file system or EFS backup, provide the mount path of the backup directory. + +`Location` +: For Google Cloud Storage backup, `gcs`. + + For S3 backup (AWS S3, MinIO, non-AWS S3), `s3`. + +`Google Service Account File` +: For Google Cloud Storage provide the credentials file path. For example, `/path/to/file/test.json`. diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_disaster_recovery_setup.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_disaster_recovery_setup.md index d0adad5769..d0e5f89a23 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_disaster_recovery_setup.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_disaster_recovery_setup.md @@ -1,12 +1,11 @@ +++ -title = "Disaster Recovery Setup" - +title = "Set Up On-Prem Deployment Disaster Recovery" draft = false - gh_repo = "automate" + [menu] [menu.automate] - title = "Disaster Recovery On Prem" + title = "Disaster Recovery On-Prem" parent = "automate/deploy_high_availability/disaster_recovery" identifier = "automate/deploy_high_availability/ha_disaster_recovery_setup.md Disaster Recovery" weight = 200 @@ -16,21 +15,21 @@ gh_repo = "automate" {{% automate/ha-warn %}} {{< /note >}} -## Setup Disaster Recovery Cluster For OnPrem Deployment +Recovery Point Objective (RPO) is the maximum acceptable amount of time since the last data recovery point. +If an RPO of 1 to 24 hours is acceptable, then Chef recommends using a typical backup and restore strategy for your disaster recovery plan. -Recovery Point Objective (RPO) is the maximum acceptable amount of time since the last data recovery point, if an RPO of 1 to 24 hours is acceptable then using a typical backup and restore strategy for your disaster recovery plan is recommended. -Typically these two clusters should be located in different data centers or cloud provider regions. +Set these two clusters in different data centers or cloud provider regions. -### Requirements +## Requirements 1. Two identical clusters located in different data centers or cloud provider regions -1. Network accessible storage (NAS), object store (S3), available in both data centers/regions +1. Network accessible storage (NAS), object store (S3,MinIO,Google Cloud Storage), available in both data centers/regions 1. Ability to schedule jobs to run backup and restore commands in both clusters. We recommend using corn or a similar tool like anacron. In the above approach, there will be 2 identical clusters -- Primary Cluster (or Production Cluster) -- Disaster Recovery Cluster +- primary cluster (or production cluster) +- disaster recovery cluster ![Disaster Recovery Setup with 2 Identical Clusters](/images/automate/DR-2-cluster.png) @@ -41,22 +40,24 @@ When a failure of the primary cluster occurs, fail-over can be accomplished thro ### Caveat with the above approach - Running two parallel clusters can be expensive. -- The amount of data loss will depend on how frequently backups are performed in the Primary cluster. -- Changing DNS records from the Primary load balancer to the Disaster Recovery load balancer can take time to propagate through the network. +- The amount of data loss will depend on how frequently backups are performed in the primary cluster. +- Changing DNS records from the primary load balancer to the disaster recovery load balancer can take time to propagate through the network. -### Steps to setup the Production and Disaster Recovery Cluster +### Set up the production and disaster recovery cluster -1. Deploy the Primary cluster following the deployment instructions by [clicking here](/automate/ha_onprim_deployment_procedure/#Run-these-steps-on-Bastion-Host-Machine). +1. Deploy the primary cluster following the deployment instructions by [clicking here](/automate/ha_onprim_deployment_procedure/#deploy-the-bastion-host). -1. Deploy the Disaster Recovery cluster into a different data center/region using the same steps as the Primary cluster +1. Deploy the disaster recovery cluster into a different data center and region using the same steps as the primary cluster. 1. Do the backup configuration as explained in backup section for [file system](/automate/ha_backup_restore_file_system/) or [object storage](/automate/ha_backup_restore_object_storage/). {{< note >}} + Configure backups for both clusters using either [file system](/automate/ha_backup_restore_file_system/) or [object storage](/automate/ha_backup_restore_object_storage/). + {{< /note >}} -1. On Primary Cluster +1. On the primary cluster - From one of the Chef Automate nodes, configure a cronjob to run the `chef-automate backup` command at a regular interval. The sample cron for backup looks like: @@ -70,7 +71,7 @@ Configure backups for both clusters using either [file system](/automate/ha_back chef-automate bootstrap bundle create bootstrap.abb ``` - - Copy `bootstrap.abb` to all Automate and Chef Infra frontend nodes in the Disaster Recovery cluster. + - Copy `bootstrap.abb` to all Automate and Chef Infra frontend nodes in the disaster recovery cluster. {{< note >}} - Suggested frequency of backup and restore jobs is one hour. Be sure to monitor backup times to ensure they can be completed in the available time. @@ -78,7 +79,7 @@ Configure backups for both clusters using either [file system](/automate/ha_back - A cron job is a Linux command used to schedule a job that is executed periodically. {{< /note >}} - - To clean the data from the backed up storage, either schedule a cron or delete it manually. + - To clean the data from the backed-up storage, either schedule a cron or delete it manually. - To prune all but a certain number of the most recent backups manually, parse the output of chef-automate backup list and apply the command chef-automate backup delete. For example: @@ -87,15 +88,15 @@ Configure backups for both clusters using either [file system](/automate/ha_back export KEEP=10; export HAB_LICENSE=accept-no-persist; chef-automate backup list --result-json backup.json > /dev/null && hab pkg exec core/jq-static jq "[.result.backups[].id] | sort | reverse | .[]" -rM backup.json | tail -n +$(($KEEP+1)) | xargs -L1 -i chef-automate backup delete --yes {} ``` -1. On Disaster Recovery Cluster +1. On disaster recovery cluster - - Install `bootstrap.abb` on all the Frontend nodes (Chef-server and Automate nodes) by running the following command: + - Install `bootstrap.abb` on all the frontend nodes (Chef-server and Automate nodes) by running the following command: - ```cmd + ```sh sudo chef-automate bootstrap bundle unpack bootstrap.abb ``` - - We don't recommend creating backups from the Disaster Recovery cluster unless it has become the active cluster and receiving traffic from the clients/nodes. + - We don't recommend creating backups from the disaster recovery cluster unless it has become the active cluster and receiving traffic from the clients/nodes. - Stop all the services on all Automate and Chef Infra frontend nodes using the following command: @@ -105,7 +106,7 @@ Configure backups for both clusters using either [file system](/automate/ha_back - Make sure both backup and restore cron are aligned. - - Run the following command in one of the Automate nodes to get the IDs of all the backup: + - Run the following command in one of the Automate nodes to get the IDs of all the backups: ```sh chef-automate backup list @@ -131,22 +132,38 @@ Configure backups for both clusters using either [file system](/automate/ha_back password = "admin" ``` - - In the Disaster Recovery cluster, use the following sample command to restore the latest backup from any Chef Automate frontend instance. + - In the disaster recovery cluster, use the following sample command to restore the latest backup from any Chef Automate frontend instance. + + For **S3/MinIO** execute the following command from the Bootstrapped Automate node to restore: - ```cmd + ```sh id=$(sudo chef-automate backup list | tail -1 | awk '{print $1}') sudo chef-automate backup restore /mnt/automate_backups/backups/$id/ --patch-config current_config.toml --airgap-bundle /var/tmp/frontend-4.x.y.aib --skip-preflight ``` + + For **GCS** execute the following command from the Bootstrapped Automate node to restore: - Sample cron for restoring backup saved in object storage (S3) looks like this: + ```sh + id=$(sudo chef-automate backup list | tail -1 | awk '{print $1}') + sudo chef-automate backup restore gs://bucket_name/path/to/backups/BACKUP_ID --patch-config current_config.toml --airgap-bundle /var/tmp/frontend-4.x.y.aib --skip-preflight --gcs-credentials-path "path/to/googleServiceAccount.json"` + ``` - ```cmd + Sample cron for restoring backup saved in object storage **(S3/MinIO)** looks like this: + + ```sh id=$(chef-automate backup list | grep completed | tail -1 | awk '{print $1}') sudo chef-automate backup restore /automate/$id/ --patch-config /path/to/current_config.toml --airgap-bundle /var/tmp/frontend-4.x.y.aib --skip-preflight --s3-access-key "Access_Key" --s3-secret-key "Secret_Key" ``` + + Sample cron for restoring backup saved in object storage **(GCS)** looks like this: + + ```sh + id=$(chef-automate backup list | grep completed | tail -1 | awk '{print $1}') + sudo chef-automate backup restore /automate/$id/ --patch-config /path/to/current_config.toml --airgap-bundle /var/tmp/frontend-4.x.y.aib --skip-preflight --gcs-credentials-path "path/to/googleServiceAccount.json" + ``` -### Switch to Disaster Recovery Cluster +### Switch to disaster recovery cluster Steps to switch to the disaster recovery cluster are as follows: @@ -157,6 +174,6 @@ Steps to switch to the disaster recovery cluster are as follows: systemctl start chef-automate ``` -- Update the Automate FQDN DNS entry to resolve to the Disaster Recovery load balancer. -- The Disaster Recovery cluster will be the primary cluster, it may take some time for DNS changes to fully propagate. +- Update the Automate FQDN DNS entry to resolve to the disaster recovery load balancer. +- The disaster recovery cluster will be the primary cluster, it may take some time for DNS changes to fully propagate. - Setup backup cron to start taking backups of the now active cluster. diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_disaster_recovery_setup_AWS.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_disaster_recovery_setup_AWS.md index da1cedea4e..f1f7ac891e 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_disaster_recovery_setup_AWS.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_disaster_recovery_setup_AWS.md @@ -91,7 +91,7 @@ When the primary cluster fails, accomplish the fail-over by updating DNS records - Install `bootstrap.abb` on all the Frontend nodes (Chef-server and Automate nodes) by running the following command: - ```cmd + ```sh sudo chef-automate bootstrap bundle unpack bootstrap.abb ``` @@ -133,7 +133,7 @@ When the primary cluster fails, accomplish the fail-over by updating DNS records - In the Disaster Recovery cluster, use the following sample command to restore the latest backup from any Chef Automate frontend instance. - ```cmd + ```sh id=$(chef-automate backup list | grep completed | tail -1 | awk '{print $1}') sudo chef-automate backup restore /automate/$id/ --patch-config /path/to/current_config.toml --airgap-bundle /var/tmp/frontend-4.x.y.aib --skip-preflight --s3-access-key "Access_Key" --s3-secret-key "Secret_Key" ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_existing_a2ha_to_automate_ha.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_existing_a2ha_to_automate_ha.md index 40cb9a2b5d..2d30e99138 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_existing_a2ha_to_automate_ha.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_existing_a2ha_to_automate_ha.md @@ -57,7 +57,7 @@ done 1. Run the following commands from any automate instance in A2HA Cluster. - ```cmd + ```sh sudo chef-automate backup create sudo chef-automate bootstrap bundle create bootstrap.abb ``` @@ -148,7 +148,7 @@ done 1. Copy the `bootstrap.abb` bundle to all the Frontend nodes of the Chef Automate HA cluster. Unpack the bundle using the below command on all the Frontend nodes. - ```cmd + ```sh sudo chef-automate bootstrap bundle unpack bootstrap.abb ``` 2. Stop the Service in all the frontend nodes with the below command. @@ -159,7 +159,7 @@ done 3. To restore the A2HA backup on Chef Automate HA, run the following command from any Chef Automate instance of the Chef Automate HA cluster: - ```cmd + ```sh sudo chef-automate backup restore /mnt/automate_backups/backups/20210622065515/ --patch-config current_config.toml --airgap-bundle /var/tmp/frontend-4.x.y.aib --skip-preflight ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_faqs.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_faqs.md index 45702dd499..f11fe5da94 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_faqs.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_faqs.md @@ -20,6 +20,16 @@ This page explains the frequently encountered issues in Chef Automate High Avail ## Frequently Asked Questions +### What are different ways to provision an on-premise deployment? +- There are two types of infrastructure to provision on-premise deployment as follows: + - Existing Infrastructure + - Existing Cloud Infrastructure + - AWS + - Google Cloud Platform + +### What are different types of backup available for deployment on GCP platform? +- Google Cloud Storage(GCS) and File System(FS) type backup is supported for deployment on GCP platform + ### How to check logs For automate nodes? - To view the logs please do ssh to the respective node by running the command from bastion node `./chef-automate ssh --hostname a2` @@ -28,21 +38,21 @@ This page explains the frequently encountered issues in Chef Automate High Avail `journalctl --follow --unit chef-automate` ### How to check logs For Chef Infra Server nodes? -- To view the logs please do ssh to the respective node by running the command from bastion node +- To view the logs, please do ssh to the respective node by running the command from bastion node `./chef-automate ssh --hostname cs` - choose the instance based on the output. To view the logs run the command `journalctl --follow --unit chef-automate` ### How to check logs For Postgres nodes? -- To view the logs please do ssh to the respective node by running the command from bastion node +- To view the logs, please do ssh to the respective node by running the command from bastion node `./chef-automate ssh --hostname pg` - choose the instance based on the output. To view the logs run the command `journalctl --follow --unit hab-sup` ### How to check logs For Opensearch nodes? -- To view the logs please do ssh to the respective node by running the command from bastion node +- To view the logs, please do ssh to the respective node by running the command from bastion node `./chef-automate ssh --hostname os` - choose the instance based on the output. To view the logs run the command @@ -53,29 +63,30 @@ This page explains the frequently encountered issues in Chef Automate High Avail -### How to Add more nodes In AWS Deployment, post deployment. +### How to Add more nodes In AWS Deployment, post deployment. The commands require some arguments so that it can determine which types of nodes you want to add to your HA setup from your bastion host. It needs the count of the nodes you want to add as as argument when you run the command. + For example, -- if you want to add 2 nodes to automate, you have to run the: +- if you want to add 2 nodes to automate, you have to run: ```sh chef-automate node add --automate-count 2 ``` -- If you want to add 3 nodes to chef-server, you have to run the: +- If you want to add 3 nodes to chef-server, you have to run: ```sh chef-automate node add --chef-server-count 3 ``` -- If you want to add 1 node to OpenSearch, you have to run the: +- If you want to add 1 node to OpenSearch, you have to run: ```sh chef-automate node add --opensearch-count 1 ``` -- If you want to add 2 nodes to PostgreSQL you have to run: +- If you want to add 2 nodes to PostgreSQL, you have to run: ```sh chef-automate node add --postgresql-count 2 @@ -89,7 +100,7 @@ You can mix and match different services if you want to add nodes across various chef-automate node add --automate-count 1 --postgresql-count 2 ``` -- If you want to add 1 node to automate, 2 nodes to chef-server, and 2 nodes to PostgreSQL you have to run: +- If you want to add 1 node to automate, 2 nodes to chef-server, and 2 nodes to PostgreSQL, you have to run: ```sh chef-automate node add --automate-count 1 --chef-server-count 2 --postgresql-count 2 @@ -100,7 +111,7 @@ Once the command executes, it will add the supplied number of nodes to your auto {{< note >}} - If you have patched some external config to any of the existing services then make sure you apply the same on the new nodes as well. -For example, if you have patched any external configurations like SAML or LDAP, or any other done manually post-deployment in automate nodes, make sure to patch those configurations on the new automate nodes. The same must be followed for services like Chef-Server, Postgresql, and OpenSearch. +For example, if you have patched any external configurations like SAML or LDAP, or any other done manually post-deployment in automate nodes, make sure to patch those configurations on the new automate nodes. The same must be followed for services like Chef-Server, PostgreSQL, and OpenSearch. - The new node will be configured with the certificates which were already configured in your HA setup. {{< /note >}} @@ -110,7 +121,7 @@ For example, if you have patched any external configurations like SAML or LDAP, Downgrading the number of instance_count for the backend nodes will result in data loss. We do not recommend downgrading the backend nodes. {{< /warning >}} -### Is Automate HA supports unencrypted traffic with managed service like AWS-Opensearch / RDS ? +### Is Automate HA supports unencrypted traffic with managed service like AWS-Opensearch / RDS? - No, Automate HA support https connection only with Managed services. @@ -133,8 +144,6 @@ for on premises deployment chef-automate cleanup --onprem-deployment ``` - - ## HA Health Check Commands This section includes commands that you can execute for the Chef Automate cluster part of the Chef Automate High Availability (HA) system. These commands aid you in assessing the health and status of the components part of the HA cluster. It is highly recommended to run these commands on a test cluster before using them in a production environment. diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_healthcheck.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_healthcheck.md index 08daf991b7..2671288999 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_healthcheck.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_healthcheck.md @@ -22,25 +22,25 @@ This page includes commands that can be executed for the Chef Automate cluster p - Get the Automate HA cluster Information - ```cmd + ```sh chef-automate info ``` - Post Deployment, run the smoke test cases on Automate HA cluster and run the command from the bastion node. - ```cmd + ```sh chef-automate test --full ``` - Validate the cluster but skip "chef-automate diagnostic run" when performing the smoke tests - ```cmd + ```sh chef-automate test ``` - Run the smoke test on specific cluster - ```cmd + ```sh chef-automate test automate chef-automate test chef_server chef-automate test opensearch @@ -49,30 +49,30 @@ This page includes commands that can be executed for the Chef Automate cluster p - To get the status of the cluster, run the command from the bastion node. - ```cmd + ```sh chef-automate status ``` - To check the service status on Automate nodes. - ```cmd + ```sh chef-automate status --automate chef-automate status --a2 ``` - To check the service status on Chef Infra Server nodes. - ```cmd + ```sh chef-automate status --chef_server chef-automate status --cs ``` - To check the service status on Postgres nodes. - ```cmd + ```sh chef-automate status --postgresql chef-automate status -pg ``` - To check the service status on Opensearch nodes. -```cmd +```sh chef-automate status --opensearch chef-automate status --os ``` @@ -134,21 +134,21 @@ sorthands for --postgresql is --pg and -p - Collect the Gatherlogs for Automate HA cluster, and run the command from the bastion node. - logs are collected at `/var/tmp` -```cmd +```sh chef-automate gather-logs ``` - View the active Habitat gossiped toml config for any locally loaded service: - ssh to the backend opensearch nodes `chef-automate ssh --hostname os` -```cmd +```sh source /hab/sup/default/SystemdEnvironmentFile.sh automate-backend-ctl show --svc=automate-ha-opensearch ``` - ssh to the backend postgres nodes `chef-automate ssh --hostname pg` -```cmd +```sh source /hab/sup/default/SystemdEnvironmentFile.sh automate-backend-ctl show --svc=automate-ha-postgresql ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_inplace_migration.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_inplace_migration.md index bc75581ffb..4782b1d7e5 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_inplace_migration.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_inplace_migration.md @@ -1,15 +1,14 @@ +++ title = "In-Place A2HA to Automate HA" - draft = false - gh_repo = "automate" + [menu] -[menu.automate] -title = "In-Place A2HA to Automate HA" -parent = "automate/deploy_high_availability/migration" -identifier = "automate/deploy_high_availability/migration/ha_inplace_migration.md In-Place A2HA to Automate HA" -weight = 200 + [menu.automate] + title = "In-Place A2HA to Automate HA" + parent = "automate/deploy_high_availability/migration" + identifier = "automate/deploy_high_availability/migration/ha_inplace_migration.md In-Place A2HA to Automate HA" + weight = 200 +++ {{< note >}} @@ -22,8 +21,7 @@ weight = 200 {{< /warning >}} - -This page explains the In-Place migration of A2HA to Automate HA. This migration involves the following steps: +This page explains the in-place migration of A2HA to Automate HA. This migration involves the following steps: ## Prerequisites @@ -37,31 +35,29 @@ In order to verify the migration is completed successfully we'll need to capture Create `capture_infra_counts.sh` and run it using `./capture_infra_counts.sh > pre_migration_infra_counts.log` - ```bash - #!/usr/bin/bash - - for i in `chef-server-ctl org-list`; do - org=https://localhost/organizations/$i - echo "Orgination: ${i}" - echo -n "node count: " - knife node list -s $org | wc -l - echo -n "client count: " - knife client list -s $org | wc -l - echo -n "cookbook count: " - knife cookbook list -s $org | wc -l - echo -n "total objects: " - knife list / -R -s $org | wc -l - echo "----------------" - done - ``` - - +```bash +#!/usr/bin/bash + +for i in `chef-server-ctl org-list`; do + org=https://localhost/organizations/$i + echo "Orgination: ${i}" + echo -n "node count: " + knife node list -s $org | wc -l + echo -n "client count: " + knife client list -s $org | wc -l + echo -n "cookbook count: " + knife cookbook list -s $org | wc -l + echo -n "total objects: " + knife list / -R -s $org | wc -l + echo "----------------" +done +``` ## Taking Backup and clean up of instances 1. Take the latest backup of A2HA by running the following commands from any automate instance: - ```cmd + ```sh sudo chef-automate backup create ``` @@ -73,7 +69,7 @@ Create `capture_infra_counts.sh` and run it using `./capture_infra_counts.sh > p The output looks like as shown below: - ```cmd + ```sh Backup State Age 20180508201548 completed 8 minutes old 20180508201643 completed 8 minutes old @@ -82,7 +78,7 @@ Create `capture_infra_counts.sh` and run it using `./capture_infra_counts.sh > p 1. Create a bootstrap bundle from one of automate node using the following command: - ```cmd + ```sh sudo chef-automate bootstrap bundle create bootstrap.abb ``` @@ -90,21 +86,22 @@ Create `capture_infra_counts.sh` and run it using `./capture_infra_counts.sh > p 1. Stop each of the frontend nodes (automate and chef-server) using the following command: - ```cmd + ```sh sudo chef-automate stop ``` Rename `/hab` dir to something else like `/hab-old`. - - Remove the following files - * `/bin/chef-automate` - * `/bin/hab` - * `/bin/hab-launch` - * `/bin/hab-sup` + + Remove the following files: + + - `/bin/chef-automate` + - `/bin/hab` + - `/bin/hab-launch` + - `/bin/hab-sup` 1. Unload services from each of the Postgresql Nodes: - ```cmd + ```sh sudo hab svc unload chef/automate-backend-postgresql sudo hab svc unload chef/automate-backend-metricbeat sudo hab svc unload chef/automate-backend-journalbeat @@ -116,7 +113,7 @@ Create `capture_infra_counts.sh` and run it using `./capture_infra_counts.sh > p 1. Unload services from each of the Elasticsearch Nodes - ```cmd + ```sh sudo hab svc unload chef/automate-backend-elasticsidecar sudo hab svc unload chef/automate-backend-elasticsearch sudo hab svc unload chef/automate-backend-journalbeat @@ -136,11 +133,14 @@ Follow Automate HA installation documentation. Click [here](/automate/ha_onprim_ **provide** the same IPs and backup config in config.toml as in the `a2ha.rb` file. ## File System backup configuration + In case the backup configuration was skipped in the deployment config.toml, the User needs to configure EFS backup manually in Automate HA please click [here](/automate/ha_backup_restore_file_system/#configuration-for-automate-node-from-provision-host) to know more. {{}} -While configuring the backup configuration provide the path of **Elasticsearch** instead of **Opensearch** as A2HA backup was in Elasticsearch directory + +While configuring the backup configuration provide the path of **Elasticsearch** instead of **Opensearch** as A2HA backup was in Elasticsearch directory like instead of `/mnt/automate_backups/opensearch/` it will be `/mnt/automate_backups/elasticsearch/` + {{}} ## Restore Backup @@ -155,7 +155,7 @@ sudo chef-automate config show > current_config.toml Find the following config in the **current_config.toml** file and update it to look like the following: -```cmd +```sh [global.v1.external.opensearch.auth.basic_auth] username = "admin" password = "admin" @@ -163,20 +163,20 @@ Find the following config in the **current_config.toml** file and update it to l AND -```cmd +```sh [global.v1.external.opensearch.backup.fs] path = "/mnt/automate_backups/elasticsearch" ``` Copy the **bootstrap.abb** bundle to all the Frontend nodes of the Chef Automate HA cluster. Unpack the bundle using the below command on all the Frontend nodes: -```cmd +```sh sudo chef-automate bootstrap bundle unpack bootstrap.abb ``` To restore, use the below command from same automate node, Make sure to **stop all other frontend nodes using `chef-automate stop`**: -```cmd +```sh sudo chef-automate backup restore /mnt/automate_backups/backups/20210622065515/ --patch-config current_config.toml --airgap-bundle /var/tmp/frontend-4.x.y.aib --skip-preflight ``` @@ -245,10 +245,10 @@ Click [here](/automate/ha_backup_restore_object_storage/) to know more about the ## Troubleshoot -1. While installing the new Automate HA, if PostgreSQL is having any issues in starting, and in PostgreSQL instance `hab svc status` shows a secret key mismatch error, then try the cleanup command with new Automate HA cli `chef-automate cleanup --onprem-deployment` and then remove `/bin/chef-automate` from all frontend nodes, now try the installation again. +1. While installing the new Automate HA, if PostgreSQL is having any issues in starting, and in PostgreSQL instance `hab svc status` shows a secret key mismatch error, then try the cleanup command with new Automate HA CLI `chef-automate cleanup --onprem-deployment` and then remove `/bin/chef-automate` from all frontend nodes, now try the installation again. 1. Click [here](/automate/ha_existing_a2ha_to_automate_ha/#troubleshooting) to know more if you encounter an error while restoring related to the ElasticSearch snapshot. -2. While restoring the backup if an error related to backup directory occurs like +2. While restoring the backup if an error related to backup directory occurs like > **Error in Automate node:** failed to create snapshot repository: Elasticsearch repository create request failed for repo** > OR > **Error in Opensearch node:** /mnt/automate_backups/backups/automate-elasticsearch-data/chef-automate-*-service] doesn't match any of the locations specified by path.repo diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_node_bootstraping.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_node_bootstraping.md index 6d957a549d..9b8045ddd7 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_node_bootstraping.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_node_bootstraping.md @@ -180,7 +180,7 @@ host: "" - Command to patch the config -```cmd +```sh chef-automate config patch --cs ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_on_premises_deployment_prerequisites.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_on_premises_deployment_prerequisites.md index 2ed82b433f..c54d3a3821 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_on_premises_deployment_prerequisites.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_on_premises_deployment_prerequisites.md @@ -17,14 +17,16 @@ automate = "On-Premises Prerequisites" {{< /note >}} {{< warning >}} -The below prerequisites are according to the standard Chef Automate HA setup. You can contact the customer success manager or account manager if you use any specified version not mentioned here or a third-party extension or software. + +The following prerequisites are according to the standard Chef Automate HA setup. You can contact the customer success manager or account manager if you use any specified version not mentioned here or a third-party extension or software. + {{< /warning >}} -Before installing Chef Automate HA in On-premises deployment mode, ensure you have taken a quick tour of this prerequisite page. +Before installing Chef Automate HA in on-premises deployment mode, ensure you have taken a quick tour of this prerequisite page. ## Chef Automate Architecture -We recommend using 11 node cluster for standard Automate HA on-premises deployment, as detailed in the table below: +Chef recommends using an 11 node cluster for a standard Chef Automate HA on-premises deployment, as detailed in the table below: | Service Type | Count | |-------------------|-------| @@ -34,13 +36,17 @@ We recommend using 11 node cluster for standard Automate HA on-premises deployme | OpenSearch DB | 3 | | Bastion Machine | 1 | -Additionally, this topology requires two load balancers and 2 DNS entries with certificates. Refer to the [architectural page](/automate/ha/#chef-automate-ha-architecture/) for further guidance. +This topology requires two load balancers and two DNS entries with certificates. Refer to the [architectural page](/automate/ha/#chef-automate-ha-architecture/) for further guidance. + +Chef Automate HA requires a [high availability Chef Infra Server](/server/install_server_ha/) deployment; it does not support a standalone Chef Infra Server deployment. + +You can deploy a Chef Automate high availability cluster on AWS or Google Cloud Platform (GCP) VMs. -We recommend using Chef Infra Server managed by Automate HA to have high availability for both Automate and Infra Server. External Standalone Infra Server will violate this high availability requirement. +On-prem deployments of Chef Automate HA supports making backups on file system (FS) or object storage (S3/MinIO/Google Cloud Storage). ## Software Requirements -The software requirements of the nodes in the cluster and other external Chef and non Chef tools are discussed below: +The software requirements for nodes in the cluster and for other external Chef and non-Chef tools are discussed below. ### Node Software Requirements @@ -85,13 +91,13 @@ Current Automate HA integrates with the following non-Chef tools: ### Minimum Hardware Requirement -| Instance | Count | vCPU | RAM | Storage Size(/hab) | AWS Machine Type | Additional Space | -| ----------------- | ----- | ---- | --- | ------------------ | ---------------- | ----------------- | -| Chef Automate | 2 | 2 | 8 | 200 GB | m5.large | /var/tmp=5% /root=20% | -| Chef Infra Server | 2 | 2 | 8 | 200 GB | m5.large | /var/tmp=5% /root=20% | -| PostgreSQL DB | 3 | 2 | 8 | 200 GB | m5.large | /var/tmp=5% /root=20% | -| OpenSearch DB | 3 | 2 | 8 | 200 GB | m5.large | /var/tmp=5% /root=20% | -| Bastion Machine | 1 | 2 | 8 | 200 GB | m5.large | /var/tmp=5% /root=20% | +| Instance | Count | vCPU | RAM | Storage Size(/hab) | AWS Machine Type | GCP Machine Type | Additional Space | +| ----------------- | ----- | ---- | --- | ------------------ | ---------------- | ---------------- | ----------------- | +| Chef Automate | 2 | 2 | 8 | 200 GB | m5.large | n2-standard-2 | /var/tmp=5% /root=20% | +| Chef Infra Server | 2 | 2 | 8 | 200 GB | m5.large | n2-standard-2 | /var/tmp=5% /root=20% | +| PostgreSQL DB | 3 | 2 | 8 | 200 GB | m5.large | n2-standard-2 | /var/tmp=5% /root=20% | +| OpenSearch DB | 3 | 2 | 8 | 200 GB | m5.large | n2-standard-2 | /var/tmp=5% /root=20% | +| Bastion Machine | 1 | 2 | 8 | 200 GB | m5.large | n2-standard-2 | /var/tmp=5% /root=20% | {{< note >}} For production, OpenSearch volume size also depends on the number of nodes and frequency of Chef Infra Client runs and compliance scans. @@ -233,7 +239,7 @@ Active/Active Disaster Recovery is not supported right now as we do not support The requirements for disaster recovery setup (Active/Passive) are: - Two identical clusters located in different data centers or cloud provider regions. -- Network Attached Storage (NAS) or Object Store (S3) should be available in both data centers/regions. +- Network Attached Storage (NAS) or Object Store (S3/MinIO/Google cloud storage) should be available in both data centers/regions. - Set up scheduled jobs to run backup and restore commands on both clusters. We recommend using **cron** to schedule the jobs. To know more about the on-premises deployment disaster recovery, visit our [Disaster Recovery Setup](/automate/ha_disaster_recovery_setup/) page. @@ -264,6 +270,6 @@ To know more about the on-premises deployment disaster recovery, visit our [Disa ## Backup and Restore -In On-premises deployment of Automate HA, we support [**Network File System (NFS)**](/automate/ha_backup_restore_file_system/) or [**Object Storage (S3/MinIO)**](/automate/ha_backup_restore_object_storage/) for taking backup. +In on-premises deployment of Automate HA, we support [**Network File System (NFS)**](/automate/ha_backup_restore_file_system/) or [**Object Storage (S3/MinIO/Google Cloud Storage)**](/automate/ha_backup_restore_object_storage/) for taking backup. -Encrypted S3 bucket are supported with only Amazon S3 managed keys (SSE-S3). +Encrypted S3 buckets are only supported with Amazon S3 managed keys (SSE-S3). diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_procedure.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_procedure.md index 1e7cfd02eb..af2bff8e23 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_procedure.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_procedure.md @@ -1,11 +1,11 @@ +++ -title = "On-premise Deployment with Chef Managed Database" +title = "On-Prem Deployment with Chef Managed Database" draft = false gh_repo = "automate" [menu] [menu.automate] - title = "On-premise Deployment with Chef Managed Database" + title = "On-Prem Deployment with Chef Managed Database" parent = "automate/deploy_high_availability/deployment" identifier = "automate/deploy_high_availability/deployment/ha_onprim_deployment_procedure.md On-premise Deployment with Chef Managed Database" weight = 200 @@ -15,93 +15,100 @@ gh_repo = "automate" {{% automate/ha-warn %}} {{< /note >}} -This section will discuss deploying Chef Automate HA on-premise machines with chef managed database. Please see the [On-Premises Prerequisites](/automate/ha_on_premises_deployment_prerequisites/) page and move ahead with the following sections of this page. +This document explains how to deploy Chef Automate HA on on-premises machines with Chef Managed Database. +Please see the [On-Premises Prerequisites](/automate/ha_on_premises_deployment_prerequisites/) page and move ahead with the following sections of this page. {{< warning >}} -- PLEASE DO NOT MODIFY THE WORKSPACE PATH; it should always be "/hab/a2_deploy_workspace". -- We currently don't support AD managed users in nodes. We only support local Linux users. +- Do not modify the workspace path. It should always be `/hab/a2_deploy_workspace`. +- We don't support AD managed users in nodes. We only support local Linux users. - If you have configured a sudo password for the user, you must create an environment variable `sudo_password` and set the password as the variable's value. Example: `export sudo_password=`. And then, run all sudo commands with the `sudo -E or --preserve-env` option. Example: `sudo -E ./chef-automate deploy config.toml --airgap-bundle automate.aib`. This is required for the `chef-automate` CLI to run the commands with sudo privileges. Please refer [this](/automate/ha_sudo_password/) for details. - If SELinux is enabled, deployment with configure it to `permissive` (Usually in case of RHEL SELinux is enabled) {{< /warning >}} -## Steps to run on Bastion Host Machine +## Provisioning -1. Run the below commands to download the latest Automate CLI and Airgapped Bundle: +Provision the other nodes in the high availability cluster before deploying the bastion host. + +Make sure you have all resources either on existing infrastructure or on existing cloud infrastructure (AWS/Google Cloud Platform). + +## Deploy the bastion host + +1. Run the following commands to download the latest Automate CLI and airgapped bundle: ```bash - #Run commands as sudo. sudo -- sh -c " - #Download Chef Automate CLI. curl https://packages.chef.io/files/current/latest/chef-automate-cli/chef-automate_linux_amd64.zip \ | gunzip - > chef-automate && chmod +x chef-automate \ | cp -f chef-automate /usr/bin/chef-automate - #Download the latest Airgapped Bundle. - #To download specific version bundle, example version: 4.2.59 then replace latest.aib with 4.2.59.aib curl https://packages.chef.io/airgap_bundle/current/automate/latest.aib -o automate.aib " ``` + To download specific version bundle, replace `latest.aib` with Chef Automate version number. For example, `4.2.59.aib`. + {{< note spaces=4 >}} + Chef Automate bundles are available for 365 days from the release of a version. However, the milestone release bundles are available for download forever. - {{< /note >}} - {{< note >}} If the Airgapped Bastion machine differs, transfer the Bundle file (`latest.aib`) and Chef Automate CLI binary (`chef-automate`) to the Airgapped Bastion Machine using the `scp` command. {{< /note >}} + {{< /note >}} - After transferring, in Airgapped Bastion, run the below commands: + {{< note >}} If the airgapped bastion host differs, transfer the bundle file (`latest.aib`) and Chef Automate CLI binary (`chef-automate`) to the airgapped bastion host using the `scp` command. After transferring the bundle file to the airgapped bastion host, run the following commands: - ```bash - #Run commands as sudo. sudo -- sh -c " #Move the Chef Automate CLI to `/usr/bin`. cp -f chef-automate /usr/bin/chef-automate " - ``` + {{< /note >}} -## Steps to Generate Config +## Generate Chef Automate configuration file -1. Generate config using the below command: +1. Generate config. -```bash -sudo chef-automate config gen config.toml -``` + ```bash + sudo chef-automate config gen config.toml + ``` Click [here](/automate/ha_config_gen) to know more about generating config. -You can also view the [Sample Config](#sample-config). +You can also view the [Sample Config](#sample-config). You can also view the [Sample Config For 5 Node Cluster](#sample-config-for-5-nodes-cluster). + +{{< note >}} -{{< note >}} You can also generate config using **init config** and then generate init config for existing infrastructure. The command is as shown below: +You can also generate a configuration file using the `init-config` subcommand. -`chef-automate init-config-ha existing_infra`{{< /note >}} +`chef-automate init-config-ha existing_infra` + +{{< /note >}} ## Config Verify -1. We verify the above config using the below command : +1. Verify the above config using the `verify` subcommand. ```bash sudo chef-automate verify -c config.toml ``` - To know more about config verify you can check [Config Verify Doc page](/automate/ha_verification_check/). + To know more about config verify, check [Config Verify Documentation](/automate/ha_verification_check/). - Once the verification is successfully completed, then proceed with deployment, In case of failure please fix the issue and re-run the verify command. + Once the verification completed successfully, proceed with the deployment. In case of failure, fix the issue and verify it by re-running the verify command. ## Steps to Deploy -1. The following command will run the deployment. The deploy command will run the verify command internally, to skip a verification process during deploy command use `--skip-verify` flag +1. The following command will run the deployment. The deploy command will first run the verify command internally, to skip a verification process during deploy command use `--skip-verify` flag ```bash chef-automate deploy config.toml --airgap-bundle automate.aib ``` - To skip verification in the deploy command, use `--skip-verify` flag + To skip verification during deployment, use `--skip-verify` flag ```bash chef-automate deploy config.toml --airgap-bundle automate.aib --skip-verify ``` ## Verify Deployment -1. Once the deployment is successful, Get the consolidate status of the cluster +1. Once the deployment is successful, get the consolidated status of the cluster ```bash chef-automate status summary @@ -113,13 +120,13 @@ You can also view the [Sample Config](#sample-config). chef-automate status ``` -1. Post Deployment, you can run the verification command +1. Post Deployment, you can run the verification command ```bash - chef-automate verfiy + chef-automate verify ``` -1. Get the cluster Info +1. Get the cluster Info ```bash chef-automate info @@ -129,7 +136,7 @@ You can also view the [Sample Config](#sample-config). After successful deployment, proceed with following... 1. Create user and orgs, Click [here](/automate/ha_node_bootstraping/#create-users-and-organization) to learn more about user and org creation 1. Workstation setup, Click [here](/automate/ha_node_bootstraping/#workstation-setup) to learn more about workstation setup - 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. + 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. ## Backup/Restore @@ -137,7 +144,7 @@ A shared file system is always required to create OpenSearch snapshots. To regis ## Add/Remove Nodes -The Chef Automate commands require some arguments so that it can determine which types of nodes you want to add or remove to/from your HA setup from your bastion host. To know more see [Add Nodes to the Deployment](/automate/ha_add_nodes_to_the_deployment) to add nodes and [Remove Single Node from Cluster](/automate/ha_remove_single_node_from_cluster) to remove nodes. +The Chef Automate commands require some arguments so that it can determine which types of nodes you want to add or remove to/from your HA setup from your bastion host. To know more, see [Add Nodes to the Deployment](/automate/ha_add_nodes_to_the_deployment) to add nodes and [Remove Single Node from Cluster](/automate/ha_remove_single_node_from_cluster) to remove nodes. ## Patch Configs @@ -147,18 +154,18 @@ The bastion server can patch new configurations in all nodes. To know more see [ {{< note >}} -- Assuming 10+1 nodes (1 bastion, 2 for automate UI, 2 for Chef-server, 3 for Postgresql, 3 for OpenSearch). +- Assuming 10+1 nodes (1 bastion, 2 for Chef Automate, 2 for Chef Infra Server, 3 for PostgreSQL, 3 for OpenSearch). - The following config will, by default, leave the backup configuration empty. - To provide multiline certificates use triple quotes like `""" multiline certificate contents"""`. {{< /note >}} -```config +```toml [architecture] [architecture.existing_infra] ssh_user = "ec2-user" ssh_group_name = "ec2-user" - ssh_key_file = "~/.ssh/my-key.pem" + ssh_key_file = "/home/ec2-user/KEY_FILENAME.pem" ssh_port = "22" secrets_key_file = "/hab/a2_deploy_workspace/secrets.key" secrets_store_file = "/hab/a2_deploy_workspace/secrets.json" @@ -196,10 +203,64 @@ The bastion server can patch new configurations in all nodes. To know more see [ postgresql_private_ips = ["192.0.0.8", "192.0.0.9", "192.0.0.10"] ``` +## Sample Config For 5 Nodes Cluster + +{{< note >}} + +- Assuming 5+1 nodes (1 bastion, 2 for Chef Automate and Chef Infra Server, 3 for PostgreSQL and OpenSearch). +- For the Frontend nodes you can use the same IP in automate and chefserver. +- For the Backend nodes you can use the same IP in postgresql and opensearch. +- To provide multiline certificates use triple quotes like `""" multiline certificate contents"""`. + +{{< /note >}} + +```config +[architecture] + [architecture.existing_infra] + ssh_user = "ec2-user" + ssh_group_name = "ec2-user" + ssh_key_file = "/home/ec2-user/my-key.pem" + ssh_port = "22" + secrets_key_file = "/hab/a2_deploy_workspace/secrets.key" + secrets_store_file = "/hab/a2_deploy_workspace/secrets.json" + architecture = "existing_nodes" + workspace_path = "/hab/a2_deploy_workspace" + backup_mount = "/mnt/automate_backups" + backup_config = "file_system" +[automate] + [automate.config] + admin_password = "Progress@123" + fqdn = "chefautomate.example.com" + config_file = "configs/automate.toml" + root_ca = "-----BEGIN CERTIFICATE----- + + -----END CERTIFICATE-----" + instance_count = "2" +[chef_server] + [chef_server.config] + fqdn = "chefinfraserver.example.com" + lb_root_ca = "-----BEGIN CERTIFICATE----- + + -----END CERTIFICATE-----" + instance_count = "2" +[opensearch] + [opensearch.config] + instance_count = "3" +[postgresql] + [postgresql.config] + instance_count = "3" +[existing_infra] + [existing_infra.config] + automate_private_ips = ["192.0.0.1", "192.0.0.2"] + chef_server_private_ips = ["192.0.0.1", "192.0.0.2] + opensearch_private_ips = ["192.0.0.5", "192.0.0.6", "192.0.0.7"] + postgresql_private_ips = ["192.0.0.5", "192.0.0.6", "192.0.0.7] +``` + ## Uninstall Chef Automate HA -To uninstall Chef Automate HA instances after unsuccessful deployment, run the below command in your bastion host. +To uninstall Chef Automate HA instances after unsuccessful deployment, run the `cleanup` command on your bastion host. ```bash - chef-automate cleanup --onprem-deployment +chef-automate cleanup --onprem-deployment ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_with_aws_managed_deployment.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_with_aws_managed_deployment.md index f5c2297d97..3a4fff40dc 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_with_aws_managed_deployment.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_with_aws_managed_deployment.md @@ -1,11 +1,11 @@ +++ -title = "On-premise Deployment with AWS Managed Database" +title = "On-Prem Deployment with AWS Managed Database" draft = false gh_repo = "automate" [menu] [menu.automate] - title = "On-premise Deployment with AWS Managed Database" + title = "On-Prem Deployment with AWS Managed Database" parent = "automate/deploy_high_availability/deployment" identifier = "automate/deploy_high_availability/deployment/ha_onprim_deployment_with_aws_managed_deployment.md On-premise Deployment with AWS Managed Database" weight = 210 @@ -23,9 +23,11 @@ This section will discuss deploying Chef Automate HA on-premise machines with AW {{< /warning >}} -See the steps [here](/automate/ha_onprim_deployment_procedure/#steps-to-run-on-bastion-host-machine) to run on Bastion to download the latest Automate CLI and Airgapped Bundle. +- Before proceeding with the deployment steps make sure to provision ,Click here to know more [details](automate/ha_onprim_deployment_procedure/#provisioning). -## Steps to Generate Config +- See the steps [here](/automate/ha_onprim_deployment_procedure/#deploy-the-bastion-host) to run on bastion host to download the latest Automate CLI and Airgapped Bundle. + +## Generate configuration file 1. Generate config using the below command: @@ -37,11 +39,11 @@ Click [here](/automate/ha_config_gen) to know more about generating config. You can also view the [Sample Config](#sample-config-to-setup-on-premise-deployment-with-aws-managed-services). -{{< note >}} You can also generate config using **init config** and then generate init config for existing infrastructure. The command is as shown below: +{{< note >}} You can also generate a configuration file using the `init-config` subcommand. The command is as shown below: `chef-automate init-config-ha existing_infra`{{< /note >}} -## Config Verify +## Verify Configuration file 1. We verify the above config using the below command: @@ -49,26 +51,26 @@ You can also view the [Sample Config](#sample-config-to-setup-on-premise-deploym sudo chef-automate verify -c config.toml ``` - To know more about config verify you can check [Config Verify Doc page](/automate/ha_verification_check/). + To know more about config verify, you can check [Config Verify Doc page](/automate/ha_verification_check/). - Once the verification is successfully completed, then proceed with deployment, In case of failure please fix the issue and re-run the verify command. + Once the verification is successfully completed, then proceed with deployment, In case of failure, please fix the issue and re-run the verify command. ## Steps to Deploy -The following command will run the deployment. The deploy command will run the verify command internally, to skip verification process during deploy command use `--skip-verify` flag +The following command will run the deployment. The deploy command will first run the verify command internally, to skip verification process during deploy command use `--skip-verify` flag ```bash chef-automate deploy config.toml --airgap-bundle automate.aib ``` -To skip verification in the deploy command, use `--skip-verify` flag +To skip verification in the deployment command, use `--skip-verify` flag ```bash chef-automate deploy config.toml --airgap-bundle automate.aib --skip-verify ``` ## Verify Deployment -1. Once the deployment is successful, Get the consolidate status of the cluster +1. Once the deployment is successful, Get the consolidated status of the cluster ```bash chef-automate status summary @@ -83,10 +85,10 @@ To skip verification in the deploy command, use `--skip-verify` flag 1. Post Deployment, you can run the verification command ```bash - chef-automate verfiy + chef-automate verify ``` -1. Get the cluster Info +1. Get the cluster Info ```bash chef-automate info @@ -94,11 +96,11 @@ To skip verification in the deploy command, use `--skip-verify` flag Check if Chef Automate UI is accessible by going to (Domain used for Chef Automate) [https://chefautomate.example.com](https://chefautomate.example.com). -After successful deployment, proceed with following: +After successful deployment, proceed with the following: 1. Create user and orgs, Click [here](/automate/ha_node_bootstraping/#create-users-and-organization) to learn more about user and org creation 1. Workstation setup, Click [here](/automate/ha_node_bootstraping/#workstation-setup) to learn more about workstation setup - 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. + 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. ## Backup/Restore @@ -114,12 +116,12 @@ The bastion server can patch new configurations in all nodes. To know more see [ ## Sample Config to setup On-Premise Deployment with AWS Managed Services -```config +```toml [architecture] [architecture.existing_infra] ssh_user = "ec2-user" ssh_group_name = "ec2-user" - ssh_key_file = "~/.ssh/my-key.pem" + ssh_key_file = "/home/ec2-user/KEY_FILENAME.pem" ssh_port = "22" secrets_key_file = "/hab/a2_deploy_workspace/secrets.key" secrets_store_file = "/hab/a2_deploy_workspace/secrets.json" @@ -183,5 +185,5 @@ The bastion server can patch new configurations in all nodes. To know more see [ To uninstall Chef Automate HA instances after unsuccessful deployment, run the below command in your bastion host. ```bash - chef-automate cleanup --onprem-deployment +chef-automate cleanup --onprem-deployment ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_with_customer_managed_deployment.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_with_customer_managed_deployment.md index a8f753189d..8eb2100ddf 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_with_customer_managed_deployment.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_onprim_deployment_with_customer_managed_deployment.md @@ -1,11 +1,11 @@ +++ -title = "On-premise Deployment with Customer Managed Database" +title = "On-Prem Deployment with Customer Managed Database" draft = false gh_repo = "automate" [menu] [menu.automate] - title = "On-premise Deployment with Customer Managed Database" + title = "On-Prem Deployment with Customer Managed Database" parent = "automate/deploy_high_availability/deployment" identifier = "automate/deploy_high_availability/deployment/ha_onprim_deployment_with_customer_managed_deployment.md On-premise Deployment with Customer Managed Database" weight = 220 @@ -15,19 +15,21 @@ gh_repo = "automate" {{% automate/ha-warn %}} {{< /note >}} -This section will discuss deploying Chef Automate HA on-premise machines with customer managed database. Please see the [On-Premises Prerequisites](/automate/ha_on_premises_deployment_prerequisites/) page and move ahead with the following sections of this page. +This section will discuss deploying Chef Automate HA on-premise machines with a customer-managed database. Please see the [On-Premises Prerequisites](/automate/ha_on_premises_deployment_prerequisites/) page and move ahead with the following sections of this page. {{< warning >}} -- If SELinux is enabled, deployment with configure it to `permissive` (Usually in case of RHEL SELinux is enabled) +If SELinux is enabled, deployment with configure it to `permissive` (Usually in case of RHEL SELinux is enabled) {{< /warning >}} -See the steps [here](/automate/ha_onprim_deployment_procedure/#steps-to-run-on-bastion-host-machine) to run on Bastion to download the latest Automate CLI and Airgapped Bundle. +- Before proceeding with deployment steps make sure to provision ,Click here to know more [details](automate/ha_onprim_deployment_procedure/#provisioning). -## Steps to Generate Config +- See the steps [here](/automate/ha_onprim_deployment_procedure/#deploy-the-bastion-host) to run on bastion host to download the latest Automate CLI and Airgapped Bundle. -1. Generate config using the below command: +## Generate Chef Automate config + +1. Generate the configuration file. ```bash sudo chef-automate config gen config.toml @@ -37,56 +39,61 @@ See the steps [here](/automate/ha_onprim_deployment_procedure/#steps-to-run-on-b You can also view the [Sample Config](#sample-config-to-setup-on-premises-deployment-with-self-managed-services). - {{< note >}} You can also generate config using **init config** and then generate init config for existing infrastructure. The command is as shown below: + {{< note spaces=4 >}} + + You can also generate config using the `init-config-ha` subcommand and to generate init config for existing infrastructure. - `chef-automate init-config-ha existing_infra`{{< /note >}} + `chef-automate init-config-ha existing_infra` -## Config Verify + {{< /note >}} -1. We verify the above config using the below command : +## Verify + +1. Verify the configuration file. ```bash sudo chef-automate verify -c config.toml ``` - To know more about config verify you can check [Config Verify Doc page](/automate/ha_verification_check/). + To know more about config verify, you can check [Config Verify Doc page](/automate/ha_verification_check/). - Once the verification is successfully completed, then proceed with deployment, In case of failure please fix the issue and re-run the verify command. + Once the verification is successfully completed, then proceed with deployment, In case of failure, please fix the issue and re-run the verify command. ## Steps to Deploy -The following command will run the deployment. The deploy command will run the verify command internally, to skip a verification process during deploy command use `--skip-verify` flag +The following command will run the deployment. ```bash - chef-automate deploy config.toml --airgap-bundle automate.aib +chef-automate deploy config.toml --airgap-bundle automate.aib ``` To skip verification in the deploy command, use `--skip-verify` flag + ```bash - chef-automate deploy config.toml --airgap-bundle automate.aib --skip-verify +chef-automate deploy config.toml --airgap-bundle automate.aib --skip-verify ``` ## Verify Deployment -1. Once the deployment is successful, Get the consolidate status of the cluster +1. Once the deployment is successful, get the consolidate status of the cluster. ```bash chef-automate status summary ``` -1. Get the service status from each node +1. Get the service status from each node. ```bash chef-automate status ``` -1. Post Deployment, you can run the verification command +1. Run the verification command. ```bash - chef-automate verfiy + chef-automate verify ``` -1. Get the cluster Info +1. Get the cluster information. ```bash chef-automate info @@ -94,11 +101,11 @@ To skip verification in the deploy command, use `--skip-verify` flag Check if Chef Automate UI is accessible by going to (Domain used for Chef Automate) [https://chefautomate.example.com](https://chefautomate.example.com). -After successful deployment, proceed with following: +After successful deployment, proceed with the following: 1. Create user and orgs, Click [here](/automate/ha_node_bootstraping/#create-users-and-organization) to learn more about user and org creation 1. Workstation setup, Click [here](/automate/ha_node_bootstraping/#workstation-setup) to learn more about workstation setup - 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. + 1. Node bootstrapping, Click [here](/automate/ha_node_bootstraping/#bootstraping-a-node) to learn more about node bootstrapping. ## Backup/Restore @@ -112,14 +119,14 @@ The Chef Automate commands require some arguments so that it can determine which The bastion server can patch new configurations in all nodes. To know more see [Patch Configuration](/automate/ha_config/#patch-configuration) section. -## Sample Config to setup On-Premises Deployment with Self Managed Services +## sample config to set up on-premises deployment with self managed services -```config +```toml [architecture] [architecture.existing_infra] ssh_user = "ec2-user" ssh_group_name = "ec2-user" - ssh_key_file = "~/.ssh/my-key.pem" + ssh_key_file = "/home/ec2-user/KEY_FILENAME.pem" ssh_port = "22" secrets_key_file = "/hab/a2_deploy_workspace/secrets.key" secrets_store_file = "/hab/a2_deploy_workspace/secrets.json" @@ -182,5 +189,5 @@ The bastion server can patch new configurations in all nodes. To know more see [ To uninstall Chef Automate HA instances after unsuccessful deployment, run the below command in your bastion host. ```bash - chef-automate cleanup --onprem-deployment +chef-automate cleanup --onprem-deployment ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_performance_benchmarks.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_performance_benchmarks.md index 02acf9eede..574edc3e8b 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_performance_benchmarks.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_performance_benchmarks.md @@ -2,6 +2,7 @@ title = "Performance Benchmarks" date = 2023-07-10T21:33:17-08:00 draft = false +gh_repo = "automate" [menu] [menu.automate] diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_remove_single_node_from_cluster.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_remove_single_node_from_cluster.md index 90776bdc89..8e5e7c315d 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_remove_single_node_from_cluster.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_remove_single_node_from_cluster.md @@ -22,13 +22,13 @@ Chef Automate HA comes with five different types of deployment flows. This page - We do not recommend the removal of any node from the backend cluster, but replacing the node is recommended. For the replacement of a node, click [here](#replace-node-in-automate-ha-cluster) for reference. - Removal of nodes for PostgreSQL or OpenSearch is at your own risk and may result in data loss. Consult your database administrator before trying to delete PostgreSQL or OpenSearch nodes. - Below process can be done for `chef-server` and `automate`. -- Only one node can be removed simultaneously, irrespective of node type. +- Only one node can be removed simultaneously, irrespective of a node type. {{< /warning >}} {{< note >}} -- The flags like `opensearch-ips` and `postgresql-ips` are only applicable for the Chef Managed Database cluster +- The flags like `opensearch-ip` and `postgresql-ip` are only applicable for the Chef Managed Database cluster {{< /note >}} @@ -78,9 +78,9 @@ Once the command executes, it will remove the particular node from your HA setup ## Remove Single Node From Cluster on AWS Deployment -In this section, we will see how to remove single nodes from the AWS deployment for AWS managed database. +In this section, we will see how to remove single nodes from the AWS deployment. -The command requires some arguments to determine which types of nodes you want to remove from your HA setup from your bastion host. It needs the node's IP address you want to remove as an argument when you run the command. For example, +The command requires some arguments to determine the type of node you want to remove from your HA setup from your bastion host. It needs the node's IP address you want to remove as an argument when you run the command. For example, - To remove Automate node, run the following command: @@ -88,7 +88,7 @@ The command requires some arguments to determine which types of nodes you want t chef-automate node remove --automate-ip "" ``` -- To remove Chef Server node, run the following command: +- To remove a Chef Infra Server node, run the following command: ```sh chef-automate node remove --chef-server-ip "" @@ -106,4 +106,4 @@ The command requires some arguments to determine which types of nodes you want t chef-automate node remove --postgresql-ip "" ``` -Once the command executes, it will remove the paticular node from your HA setup. The command might take a while to complete. +Once the command is executed, it will remove the particular node from your HA setup. The command might take a while to complete. diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_troubleshooting.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_troubleshooting.md index 2c2fbbcadc..529f38753b 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_troubleshooting.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_troubleshooting.md @@ -20,11 +20,11 @@ This page explains the frequently encountered issues in Chef Automate High Avail ### Post Automate HA deployment if the chef-server service is in a critical state -![ChefServer critical Error](/images/automate/chef-server-critical-error.png) +![Chef Infra Server critical Error](/images/automate/chef-server-critical-error.png) #### Solution -- First we can check if Automate UI is opening via browser if it open's then we can try to hit the curl request to the Automate FQDN from the chefserver node. +- First we can check if Automate UI is opening via browser if it open's then we can try to hit the curl request to the Automate FQDN from the Chef Infra Server node. `curl --cacert /path/to/fqdn-rootca-pem-file https://` - The above request will verify the authenticity of the server's SSL certificate (FQDN RootCA) against Automate FQDN. - In case if it gives any error, then we have make sure that the `RootCA` is valid or not. @@ -164,19 +164,19 @@ Go to any Automate Node in HA: - Run the following command to get all the snapshots: -```cmd +```sh curl -k -X GET -s http://localhost:10144/_snapshot/_all?pretty ``` - One by One, delete all the snapshots using the below command: -```cmd +```sh curl -k -X DELETE -s http://localhost:10144/_snapshot/ ``` Example: -```cmd +```sh curl -k -X DELETE -s http://localhost:10144/_snapshot/chef-automate-es6-event-feed-service ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_upgrade_introduction.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_upgrade_introduction.md index c8d10a884d..9fcbc67978 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_upgrade_introduction.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_upgrade_introduction.md @@ -18,7 +18,7 @@ gh_repo = "automate" Steps to upgrade the Chef Automate HA are as shown below: -- Download the latest cli +- Download the latest CLI ```bash curl https://packages.chef.io/files/current/latest/chef-automate-cli/chef-automate_linux_amd64.zip | gunzip - > chef-automate && chmod +x chef-automate | cp -f chef-automate /usr/bin/chef-automate ``` @@ -126,7 +126,7 @@ We can also pass a flag in upgrade command to avoid prompt for workspace upgrade - Install `bootstrap.abb` on all the Frontend nodes (Chef-server and Automate nodes) by running the following command: - ```cmd + ```sh sudo chef-automate bootstrap bundle unpack bootstrap.abb ``` @@ -160,7 +160,7 @@ We can also pass a flag in upgrade command to avoid prompt for workspace upgrade - On New cluster, use the following restore command to restore the backup of Primary Cluster from bastion. - ```cmd + ```sh sudo chef-automate backup restore s3:///// --patch-config /path/to/current_config.toml --airgap-bundle /path/to/airgap-bundle --skip-preflight --s3-access-key "Access_Key" --s3-secret-key "Secret_Key" ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_verification_check.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_verification_check.md index 07ab4c470f..de2904b0d2 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_verification_check.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/ha_verification_check.md @@ -48,7 +48,7 @@ Once the verify command is triggered, following checks will be triggered: - System User (All Nodes) - External OpenSearch Database - External PostgreSQL Database -- S3/Minio Backup Config (If Required) +- S3/MinIO Backup Config (If Required) - NFS Backup Config (If Required) - FQDN with Load Balancer Reachability - Firewall Ports @@ -67,7 +67,7 @@ It's always better to know the critical scenarios of a deployment process before ## Reports -Once you run the verify command, it checks all the test cases defined. After it executes, you will see the full report of how many reports succeeded and how may failed. The report comes in a table structure with five columns, **NO.**, **IDENTIFIER**, **PARAMETER**, **STATUS**, and **MESSAGE**. The MESSAGE column shows the pointers to resolve for the parameters which have failed. +Once you run the verify command, it checks all the test cases defined. After it executes, you will see the full report of how many reports succeeded and how may failed. The report comes in a table structure with five columns, **NO**, **IDENTIFIER**, **PARAMETER**, **STATUS**, and **MESSAGE**. The MESSAGE column shows the pointers to resolve for the parameters which have failed. An example of a checks performed are shown in the below image: diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/large_compliance_report.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/large_compliance_report.md index 7bd28139ba..4e275eb111 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/large_compliance_report.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/large_compliance_report.md @@ -25,7 +25,7 @@ The following change in architecture enables Automate to ingest reports larger t ![LCR Architecture](/images/automate/lcr_architecture.jpg) -Automate with the configuration to allow ingestion of an extensive compliance report and allow the data to be sent to the OpenSearch data and in an externally deployed Minio Service. Automate in the configuration expects that a Minio Server is running externally to Automate ecosystem, which Automate can connect and transact to. +Automate with the configuration to allow ingestion of an extensive compliance report and allow the data to be sent to the OpenSearch data and in an externally deployed MinIO Service. Automate in the configuration expects that a MinIO Server is running externally to Automate ecosystem, which Automate can connect and transact to. Automate with the configuration will enable Automate to: @@ -53,9 +53,9 @@ Here is a benchmark test summary report run on ## Prerequisites -{{< note >}} Automate installation does not include Minio server. {{< /note >}} +{{< note >}} Automate installation does not include MinIO server. {{< /note >}} -- An external Minio server needs to be set up and available to connect +- An external MinIO server needs to be set up and available to connect ### MinIO @@ -73,7 +73,7 @@ To enable Automate to ingest Large Compliance reports: ```toml [global.v1.external.minio] - ##Do not add the protocol(http or https) for minio server end point. ex. mydomain.com:1234 + ##Do not add the protocol(http or https) for MinIO server endpoint. ex. mydomain.com:1234 endpoint = ":" root_user = "" root_password = "" @@ -105,7 +105,7 @@ To enable Automate to ingest Large Compliance reports: {{< warning >}} The below configuration is not tested to determine benchmark numbers. We recommend doing benchmark testing before considering the approach. {{< /warning >}} -Automate can connect to AWS S3 for extensive compliance reports if you reuse the Minio Configuration in the following manner: +Automate can connect to AWS S3 for extensive compliance reports if you reuse the MinIO Configuration in the following manner: ```toml [global.v1.external.minio] diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/major_upgrade.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/major_upgrade.md index 2d1a159943..f0909042ca 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/major_upgrade.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/major_upgrade.md @@ -57,7 +57,7 @@ Please upgrade to latest date pattern version number. ```sh ./chef-automate airgap bundle create --version 20220329091442 ``` - - Copy the bundle file `automate-20220329091442.aib` and latest downloaded cli `chef-automate` to the airgapped machine running Chef Automate. + - Copy the bundle file `automate-20220329091442.aib` and latest downloaded CLI `chef-automate` to the airgapped machine running Chef Automate. - On the airgapped machine running Chef Automate - Upgrade Automate with bundle ```sh diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/opensearch_external_upgrade.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/opensearch_external_upgrade.md index bd3c2d9cde..01b74011c3 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/opensearch_external_upgrade.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/opensearch_external_upgrade.md @@ -101,7 +101,7 @@ For example: ## Steps To Migrate from External Elasticsearch to External OpenSearch -Please refer the documententation for the Elasticsearch migration to OpenSearch. +Please refer the documentation for the Elasticsearch migration to OpenSearch. [OpenSearch's documentation on upgrading to OpenSearch](https://opensearch.org/docs/latest/upgrade-to/upgrade-to/#upgrade-to-opensearch). diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/reusable/index.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/reusable/index.md new file mode 100644 index 0000000000..41de90ae49 --- /dev/null +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/reusable/index.md @@ -0,0 +1,5 @@ ++++ +headless = true +## headless = true makes this directory a headless bundle. +## See https://gohugo.io/content-management/page-bundles/#headless-bundle ++++ diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/reusable/md/opensearch_health_check.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/reusable/md/opensearch_health_check.md new file mode 100644 index 0000000000..0e71846815 --- /dev/null +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/reusable/md/opensearch_health_check.md @@ -0,0 +1,19 @@ +Use the following commands on OpenSearch nodes to verify their health status. + +1. Verify that the Habitat service is running. + + ```sh + hab svc status + ``` + +1. Check the status of OpenSearch indices. + + ```sh + curl -k -X GET "https://localhost:9200/_cat/indices/*?v=true&s=index&pretty" -u admin:admin + ``` + +1. View logs of the Chef Habitat services. + + ```sh + journalctl -u hab-sup -f | grep 'automate-ha-opensearch' + ``` diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/reusable/md/restore_troubleshooting.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/reusable/md/restore_troubleshooting.md new file mode 100644 index 0000000000..e531cca3a1 --- /dev/null +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/reusable/md/restore_troubleshooting.md @@ -0,0 +1,21 @@ +Try these steps if Chef Automate returns an error while restoring data. + +1. Check the Chef Automate status. + + ```sh + chef-automate status + ``` + +1. Check the status of your Habitat service on the Automate node. + + ```sh + hab svc status + ``` + +1. If the deployment services are not healthy, reload them. + + ```sh + hab svc load chef/deployment-service + ``` + +Now check the status of the Automate node and then try running the restore command from the bastion host. diff --git a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/users.md b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/users.md index c572648b90..ed5f3069f1 100644 --- a/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/users.md +++ b/_vendor/github.com/chef/automate/components/docs-chef-io/content/automate/users.md @@ -30,6 +30,7 @@ Navigate to _Users_ in the **Settings** tab. Select the **Create User** button, ![Add Local User](/images/automate/admin-tab-users-list.png) +Please attach the policy to the new users before their first login, and refer to it [Attached policies]({{< relref "policies.md#adding-members-to-policies" >}}). ### Changing Display Names Navigate to _Users_ in the **Settings** tab and locate the user who needs their display name changed. Navigate to their user page, provide a new display name, and select the **Save** button. diff --git a/_vendor/modules.txt b/_vendor/modules.txt index 9e17465168..b5e1c9fa1b 100644 --- a/_vendor/modules.txt +++ b/_vendor/modules.txt @@ -1,4 +1,4 @@ -# github.com/chef/automate/components/docs-chef-io v0.0.0-20230906170447-b3729eb32ec0 +# github.com/chef/automate/components/docs-chef-io v0.0.0-20230925114325-98981657948f # github.com/chef/desktop-config/docs-chef-io v0.0.0-20230711052355-bad26ce3ac0b # github.com/habitat-sh/habitat/components/docs-chef-io v0.0.0-20230808222519-d0c20bbe8c45 # github.com/chef/chef-server/docs-chef-io v0.0.0-20230825050408-6e9201414311 diff --git a/go.mod b/go.mod index 9101387a9f..6577298044 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/chef/chef-web-docs go 1.16 require ( - github.com/chef/automate/components/docs-chef-io v0.0.0-20230906170447-b3729eb32ec0 // indirect + github.com/chef/automate/components/docs-chef-io v0.0.0-20230925114325-98981657948f // indirect github.com/chef/chef-server/docs-chef-io v0.0.0-20230825050408-6e9201414311 // indirect github.com/chef/chef-workstation/docs-chef-io v0.0.0-20230906065503-8f1a978813f8 // indirect github.com/chef/compliance-profiles/docs-chef-io v0.0.0-20230904102656-f8fff0821d49 // indirect diff --git a/go.sum b/go.sum index af895a20e5..ec76c2fab2 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chef/automate/components/docs-chef-io v0.0.0-20230906170447-b3729eb32ec0 h1:dV84EVjWL65BWhWNOIoJHPv7KKeIg4g/OIBlsL2ej34= -github.com/chef/automate/components/docs-chef-io v0.0.0-20230906170447-b3729eb32ec0/go.mod h1:juvLC7Rt33YOCgJ5nnfl4rWZRAbSwqjTbWmcAoA0LtU= +github.com/chef/automate/components/docs-chef-io v0.0.0-20230925114325-98981657948f h1:ar9pU9Wc5Z0h6f+KaznmxgTHXP24iArbAZv9jTYQZLw= +github.com/chef/automate/components/docs-chef-io v0.0.0-20230925114325-98981657948f/go.mod h1:juvLC7Rt33YOCgJ5nnfl4rWZRAbSwqjTbWmcAoA0LtU= github.com/chef/chef-server/docs-chef-io v0.0.0-20230825050408-6e9201414311 h1:L69Bjrc0KOKFtgiuVSJ0rxOmngU41cGvbmC2MIQp26I= github.com/chef/chef-server/docs-chef-io v0.0.0-20230825050408-6e9201414311/go.mod h1:gMSa25GUHmLimA0gjvRd3hs1buOBqkKPrdHzHvaJauY= github.com/chef/chef-workstation/docs-chef-io v0.0.0-20230906065503-8f1a978813f8 h1:rMpqWWnaV+fzB5Qk+8sNdbMgBarjPmCGSF623V5SOqc=