From 7d4da7a5cfb6b5a8a4bfbf4137b37e6d972b9990 Mon Sep 17 00:00:00 2001 From: Godson Fortil Date: Fri, 29 Apr 2022 16:15:00 -0400 Subject: [PATCH] Add support for all the supported parameters by the helm provider. Signed-off-by: Godson Fortil --- README.md | 315 +++++++++++++++++++++++------------ examples/admin.tfvars | 169 ++++++++++++++----- locals.tf | 18 +- main.tf | 107 +++++++----- values/elastic4hpcclogs.yaml | 4 + values/esp.yaml | 73 +++++++- variables.tf | 34 +--- versions.tf | 2 +- 8 files changed, 488 insertions(+), 234 deletions(-) create mode 100644 values/elastic4hpcclogs.yaml diff --git a/README.md b/README.md index 74cb26b..a27c69a 100644 --- a/README.md +++ b/README.md @@ -13,24 +13,14 @@ This module deploys an HPCC AKS cluster using remote modules that are listed bel ## Remote Modules These are the list of all the remote modules. -| Name | Description | URL | Required | -| -------------------- | ------------------------------------------------------------------------ | -------------------------------------------------------------------------- | :------: | -| subscription | Queries enabled azure subscription from host machine | https://github.com/Azure-Terraform/terraform-azurerm-subscription-data.git | yes | -| naming | Enforces naming conventions | - | yes | -| metadata | Provides metadata | https://github.com/Azure-Terraform/terraform-azurerm-metadata.git | yes | -| resource_group | Creates a resource group | https://github.com/Azure-Terraform/terraform-azurerm-resource-group.git | yes | -| virtual_network | Creates a virtual network | https://github.com/Azure-Terraform/terraform-azurerm-virtual-network.git | yes | -| cheapest_spot_region | Returns the region name with the cheapest instance based on a given size | https://github.com/gfortil/terraform-azurerm-cheapest-region.git | no | -| kubernetes | Creates an Azure Kubernetes Service Cluster | https://github.com/Azure-Terraform/terraform-azurerm-kubernetes.git | yes | -
- -## Providers - -| Name | Version | -| ---------- | --------- | -| azurerm | >= 2.57.0 | -| random | ~>3.1.0 | -| kubernetes | ~>2.2.0 | +| Name | Description | URL | Required | +| --------------- | ---------------------------------------------------- | -------------------------------------------------------------------------- | :------: | +| subscription | Queries enabled azure subscription from host machine | https://github.com/Azure-Terraform/terraform-azurerm-subscription-data.git | yes | +| naming | Enforces naming conventions | - | yes | +| metadata | Provides metadata | https://github.com/Azure-Terraform/terraform-azurerm-metadata.git | yes | +| resource_group | Creates a resource group | https://github.com/Azure-Terraform/terraform-azurerm-resource-group.git | yes | +| virtual_network | Creates a virtual network | https://github.com/Azure-Terraform/terraform-azurerm-virtual-network.git | yes | +| kubernetes | Creates an Azure Kubernetes Service Cluster | https://github.com/Azure-Terraform/terraform-azurerm-kubernetes.git | yes |
## Supported Arguments @@ -52,8 +42,8 @@ Usage Example: name = "Example" email = "example@hpccdemo.com" } -
+ ### The `disable_naming_conventions` block: When set to `true`, this attribute drops the naming conventions set forth by the python module. This attribute is optional. @@ -214,31 +204,77 @@ Usage Example: enable_auto_scaling = true only_critical_addons_enabled = true min_count = 1 - max_count = 2 + max_count = 1 availability_zones = [] subnet = "private" + enable_host_encryption = false + enable_node_public_ip = false + os_disk_type = "Managed" + type = "VirtualMachineScaleSets" + # max_pods = 10 + # node_labels = {"engine" = "roxie", "engine" = "roxie"} + # orchestrator_version = "2.9.0" + # os_disk_size_gb = 100 + # tags = {"mynodepooltag1" = "mytagvalue1", "mynodepooltag2" = "mytagvalue2"} + } addpool1 = { - vm_size = "Standard_D4_v4" - enable_auto_scaling = true - min_count = 1 - max_count = 2 - availability_zones = [] - subnet = "public" - priority = "Regular" - spot_max_price = -1 + vm_size = "Standard_D4_v4" + enable_auto_scaling = true + node_count = 2 + min_count = 1 + max_count = 2 + availability_zones = [] + subnet = "public" + priority = "Regular" + spot_max_price = -1 + max_surge = "1" + os_type = "Linux" + priority = "Regular" + enable_host_encryption = false + enable_node_public_ip = false + only_critical_addons_enabled = false + os_disk_type = "Managed" + type = "VirtualMachineScaleSets" + # orchestrator_version = "2.9.0" + # os_disk_size_gb = 100 + # max_pods = 20 + # node_labels = {"engine" = "roxie", "engine" = "roxie"} + # eviction_policy = "Spot" + # node_taints = ["mytaint1", "mytaint2"] + # proximity_placement_group_id = "my_proximity_placement_group_id" + # spot_max_price = 1 + # tags = {"mynodepooltag1" = "mytagvalue1", "mynodepooltag2" = "mytagvalue2"} } addpool2 = { - vm_size = "Standard_D4_v4" - enable_auto_scaling = true - min_count = 1 - max_count = 2 - availability_zones = [] - subnet = "public" - priority = "Regular" - spot_max_price = -1 + vm_size = "Standard_D4_v4" + enable_auto_scaling = true + node_count = 2 + min_count = 1 + max_count = 2 + availability_zones = [] + subnet = "public" + priority = "Regular" + spot_max_price = -1 + max_surge = "1" + os_type = "Linux" + priority = "Regular" + enable_host_encryption = false + enable_node_public_ip = false + only_critical_addons_enabled = false + os_disk_type = "Managed" + type = "VirtualMachineScaleSets" + # orchestrator_version = "2.9.0" + # os_disk_size_gb = 100 + # max_pods = 20 + # node_labels = {"engine" = "roxie", "engine" = "roxie"} + # eviction_policy = "Spot" + # node_taints = ["mytaint1", "mytaint2"] + # proximity_placement_group_id = "my_proximity_placement_group_id" + # spot_max_price = 1 + # tags = {"mynodepooltag1" = "mytagvalue1", "mynodepooltag2" = "mytagvalue2"} } }
@@ -254,24 +290,59 @@ This block disable helm deployments by Terraform. This block is optional and wil ### The `hpcc` block: This block deploys the HPCC helm chart. This block is optional. - | Name | Description | Type | Default | Required | - | ------------- | ----------------------------------------------------------------------- | ------------ | -------------------------------- | :------: | - | chart | Path to local chart directory name. Examples: ~/HPCC-Platform/helm/hpcc | string | null | no | - | namespace | Namespace to use. | string | default | no | - | name | Release name of the chart. | string | `myhpcck8s` | no | - | values | List of desired state files to use similar to -f in CLI. | list(string) | `values-retained-azurefile.yaml` | no | - | chart_version | Version of the HPCC chart. | string | latest | yes | - | image_root | HPCC image root to use. | string | hpccsystems | no | - | image_name | HPCC image name to use. | string | platform-core | no | - | image_version | HPCC image version to use. | string | 8.6.20-rc1 | no | + | Name | Description | Type | Default | Required | + | -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | ------------------------------ | :------: | + | local_chart | Path to local chart directory name or tgz file. Example1: ~/HPCC-Platform/helm/hpcc Example2: https://github.com/hpcc-systems/helm-chart/raw/master/docs/hpcc-8.6.16-rc1.tgz | string | null | no | + | remote_chart | URL of the remote chart. Example: https://hpcc-systems.github.io/helm-chart | string | null | no | + | namespace | Namespace to use. | string | default | no | + | name | Release name of the chart. | string | myhpcck8s | no | + | values | List of desired state files to use similar to -f in CLI. | list(string) | values-retained-azurefile.yaml | no | + | version | Version of the HPCC chart. | string | latest | yes | + | image_root | Image root to use. | string | hpccsystems | no | + | image_name | Image name to use. | string | platform-core | no | + | atomic | If set, installation process purges chart on fail. The `wait` flag will be set automatically if `atomic` is used. | bool | false | no | + | recreate_pods | Perform pods restart during upgrade/rollback. | bool | false | no | + | reuse_values | When upgrading, reuse the last release's values and merge in any overrides. If `reset_values` is specified, this is ignored. | bool | false | no | + | reset_values | When upgrading, reset the values to the ones built into the chart. | bool | false | no | + | force_update | Force resource update through delete/recreate if needed. | bool | false | no | + | cleanup_on_fail | Allow deletion of new resources created in this upgrade when upgrade fails. | bool | false | no | + | disable_openapi_validation | If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema. | bool | false | no | + | max_history | Maximum number of release versions stored per release. | number | 0 | no | + | wait | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout` . | bool | true | no | + | dependency_update | Runs helm dependency update before installing the chart. | bool | false | no | + | timeout | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). | number | 900 | no | + | wait_for_jobs | If wait is enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as `timeout`. | bool | false | no | + | lint | Run the helm chart linter during the plan. | bool | false | no | + | expose_eclwatch | Expose ECLWatch to the internet. This can cause the service to hang on pending state if external IPs are blocked by your organization's cloud policies. | bool | true | no |
Usage Example:
hpcc = { - version = "8.2.16-rc2" - name = "myhpcck8s" + expose_eclwatch = true + name = "myhpcck8s" + atomic = true + recreate_pods = false + reuse_values = false + reset_values = false + force_update = false + namespace = "default" + cleanup_on_fail = false + disable_openapi_validation = false + max_history = 0 + wait = true + dependency_update = true + timeout = 900 + wait_for_jobs = false + lint = false + remote_chart = "https://hpcc-systems.github.io/helm-chart" + # local_chart = "/Users/foo/work/demo/helm-chart/helm/hpcc" #Other examples: https://github.com/hpcc-systems/helm-chart/raw/master/docs/hpcc-8.6.16-rc1.tgz + # version = "8.6.14-rc2" + # values = ["/Users/foo/mycustomvalues1.yaml", "/Users/foo/mycustomvalues2.yaml"] + # image_root = "west.lexisnexisrisk.com" + # image_name = "platform-core-ln" + # image_version = "8.6.18-rc1" }
@@ -279,15 +350,28 @@ This block deploys the HPCC helm chart. This block is optional. ### The `storage` block: This block deploys the HPCC persistent volumes. This block is required. - | Name | Description | Type | Default | Valid Options | Required | - | --------------- | ----------------------------------------------------------------------------------------------------- | ------------ | ------------------------------------------------------ | ---------------- | :------: | - | default | Use AKS provided storage account | bool | `false` | `true` , `false` | no | - | chart | Absolute path to local chart directory. Examples: ~/HPCC-Platform//helm/examples/azure/hpcc-azurefile | string | null | no | - | name | Release name of the chart. | string | `myhpcck8s` | no | - | values | List of desired state files to use similar to -f in CLI. | list(string) | [] | no | - | storage_account | The storage account account to use. | object | Queries attributes' values from storage_account module | - | no | - | version | Version of the storage chart. | string | 0.1.0 | no | - + | Name | Description | Type | Default | Valid Options | Required | + | -------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------ | ------------------------------------------------------ | ---------------- | :---------: | + | default | Use AKS provided storage account? | bool | `false` | `true` , `false` | no | + | version | The version of the storage chart. | string | 0.1.0 | | no | + | local_chart | Path to local chart directory name or tgz file. Example1: /Users/foo/work/demo/helm-chart/helm/examples/azure/hpcc-azurefile Example2: https://github.com/hpcc-systems/helm-chart/raw/master/docs/hpcc-azurefile-0.1.0.tgz | string | null | no | + | remote_chart | URL of the remote chart. Example: https://hpcc-systems.github.io/helm-chart | name | Release name of the chart. | string | `myhpcck8s` | no | + | values | List of desired state files to use similar to -f in CLI. | list(string) | [] | no | + | storage_account | The storage account account to use. | object | Queries attributes' values from storage_account module | - | no | + | version | Version of the storage chart. | string | 0.1.0 | no | + | atomic | If set, installation process purges chart on fail. The `wait` flag will be set automatically if `atomic` is used. | bool | false | no | + | recreate_pods | Perform pods restart during upgrade/rollback. | bool | false | no | + | reuse_values | When upgrading, reuse the last release's values and merge in any overrides. If `reset_values` is specified, this is ignored. | bool | false | no | + | reset_values | When upgrading, reset the values to the ones built into the chart. | bool | false | no | + | force_update | Force resource update through delete/recreate if needed. | bool | false | no | + | cleanup_on_fail | Allow deletion of new resources created in this upgrade when upgrade fails. | bool | false | no | + | disable_openapi_validation | If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema. | bool | false | no | + | max_history | Maximum number of release versions stored per release. | number | 0 | no | + | wait | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout` . | bool | true | no | + | dependency_update | Runs helm dependency update before installing the chart. | bool | false | no | + | timeout | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). | number | 600 | no | + | wait_for_jobs | If wait is enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as `timeout`. | bool | false | no | + | lint | Run the helm chart linter during the plan. | bool | false | no |
### The `storage_account` block: @@ -305,57 +389,89 @@ Usage Example:
storage = { - default = false - # chart = "" - # values = [] - + default = false + atomic = true + recreate_pods = false + reuse_values = false + reset_values = false + force_update = false + namespace = "default" + cleanup_on_fail = false + disable_openapi_validation = false + max_history = 0 + wait = true + dependency_update = true + timeout = 600 + wait_for_jobs = false + lint = false + remote_chart = "https://hpcc-systems.github.io/helm-chart" + # local_chart = "/Users/foo/work/demo/helm-chart/helm/examples/azure/hpcc-azurefile" + # version = "0.1.0" + # values = ["/Users/foo/mycustomvalues1.yaml", "/Users/foo/mycustomvalues2.yaml"] + /* storage_account = { location = "eastus" - name = "hpccsa3" + name = "foohpccsa3" resource_group_name = "app-storageaccount-sandbox-eastus-48936" - # subscription_id = "" + # subscription_id = "value" } + */ } -
-### The `elk` block: -This block deploys the ELK chart. This block is optional. - - | Name | Description | Type | Default | Required | - | ------- | ------------------------------------------------------------------------------------------- | ------------ | --------- | :------: | - | chart | Path to local chart directory name. Examples: ./HPCC-Platform//helm/managed/logging/elastic | string | null | no | - | enable | Enable ELK | bool | `true` | no | - | name | Release name of the chart. | string | myhpccelk | no | - | values | List of desired state files to use similar to -f in CLI. | list(string) | - | no | - | version | Version of the ELK chart. | string | 1.2.1 | no | - +### The `elastic4hpcclogs` block: +This block deploys the elastic4hpcclogs chart. This block is optional. + + | Name | Description | Type | Default | Required | + | -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | ----------------------- | :------: | + | local_chart | Path to local chart directory name or tgz file. Example1: /Users/foo/work/demo/helm-chart/helm/managed/logging/elastic Example2: https://github.com/hpcc-systems/helm-chart/raw/master/docs/elastic4hpcclogs-1.2.10.tgz | string | null | no | + | remote_chart | URL of the remote chart. Example: https://hpcc-systems.github.io/helm-chart | enable | Enable elastic4hpcclogs | bool | `true` | no | + | name | Release name of the chart. | string | myelastic4hpcclogs | no | + | version | The version of the elastic4hpcclogs | string | 1.2.8 | | no | + | values | List of desired state files to use similar to -f in CLI. | list(string) | - | no | + | version | Version of the elastic4hpcclogs chart. | string | 1.2.1 | no | + | atomic | If set, installation process purges chart on fail. The `wait` flag will be set automatically if `atomic` is used. | bool | false | no | + | recreate_pods | Perform pods restart during upgrade/rollback. | bool | false | no | + | reuse_values | When upgrading, reuse the last release's values and merge in any overrides. If `reset_values` is specified, this is ignored. | bool | false | no | + | reset_values | When upgrading, reset the values to the ones built into the chart. | bool | false | no | + | force_update | Force resource update through delete/recreate if needed. | bool | false | no | + | cleanup_on_fail | Allow deletion of new resources created in this upgrade when upgrade fails. | bool | false | no | + | disable_openapi_validation | If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema. | bool | false | no | + | max_history | Maximum number of release versions stored per release. | number | 0 | no | + | wait | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as `timeout` . | bool | true | no | + | dependency_update | Runs helm dependency update before installing the chart. | bool | false | no | + | timeout | Time in seconds to wait for any individual kubernetes operation (like Jobs for hooks). | number | 900 | no | + | wait_for_jobs | If wait is enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as `timeout`. | bool | false | no | + | lint | Run the helm chart linter during the plan. | bool | false | no | + | expose | Expose myelastic4hpcclogs-kibana service to the internet. This can cause the service to hang on pending state if external IPs are blocked by your organization's cloud policies. | bool | true | no |
Usage Example:
- elk = { - enable = false - name = "myhpccelk" + elastic4hpcclogs = { + enable = true + expose = true + name = "myelastic4hpcclogs" + atomic = true + recreate_pods = false + reuse_values = false + reset_values = false + force_update = false + namespace = "default" + cleanup_on_fail = false + disable_openapi_validation = false + max_history = 0 + wait = true + dependency_update = true + timeout = 300 + wait_for_jobs = false + lint = false + remote_chart = "https://hpcc-systems.github.io/helm-chart" + #local_chart = "/Users/godji/work/demo/helm-chart/helm/managed/logging/elastic" + # version = "1.2.10" + # values = ["/Users/foo/mycustomvalues1.yaml", "/Users/foo/mycustomvalues2.yaml"] } - -
- -The `hpcc`, `storage` and `elk` blocks also support the following arguments: -
- -| Name | Optional, Required | Description | -| -------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| atomic | Optional | If set, installation process purges chart on fail. The wait flag will be set automatically if atomic is used. Defaults to false. | -| recreate_pods | Optional | Perform pods restart during upgrade/rollback. Defaults to false. | -| cleanup_on_fail | Optional | Allow deletion of new resources created in this upgrade when upgrade fails. Defaults to false. | -| disable_openapi_validation | Optional | If set, the installation process will not validate rendered templates against the Kubernetes OpenAPI Schema. Defaults to false. | -| wait | Optional | Will wait until all resources are in a ready state before marking the release as successful. It will wait for as long as timeout. Defaults to false. | -| dependency_update | Optional | Runs helm dependency update before installing the chart. Defaults to false. | -| timeout | Optional | Time in seconds to wait for any individual Kubernetes operation (like Jobs for hooks). Defaults to 900 seconds for hpcc and 600 seconds for storage and elk. | -| wait_for_jobs | Optional | If wait is enabled, will wait until all Jobs have been completed before marking the release as successful. It will wait for as long as timeout. Defaults to false. | -| lint | Optional | Run the helm chart linter during the plan. Defaults to false. |
### The `registry` block: @@ -386,15 +502,6 @@ This block automatically connect your cluster to your local machine similarly to | auto_connect | Automatically connect to the Kubernetes cluster from the host machine by overwriting the current context. | bool | `false` | no |
-### The `expose_services` argument: -Expose ECLWatch and ELK to the internet. This is unsafe and may not be supported by your organization. Setting this to `true` can cause eclwatch service to stick in a pending state. - - | Name | Description | Type | Default | Required | - | --------------- | ---------------------------------------- | ---- | ------- | :------: | - | expose_services | Expose ECLWatch and ELK to the internet. | bool | `false` | no | -
- - ## Outputs | Name | Description | diff --git a/examples/admin.tfvars b/examples/admin.tfvars index 8c499aa..db1e413 100644 --- a/examples/admin.tfvars +++ b/examples/admin.tfvars @@ -28,33 +28,77 @@ node_pools = { enable_auto_scaling = true only_critical_addons_enabled = true min_count = 1 - max_count = 2 + max_count = 1 availability_zones = [] - subnet = "private" + subnet = "public" + enable_host_encryption = false + enable_node_public_ip = false + os_disk_type = "Managed" + type = "VirtualMachineScaleSets" + # max_pods = 10 + # node_labels = {"engine" = "roxie", "engine" = "roxie"} + # orchestrator_version = "2.9.0" + # os_disk_size_gb = 100 + # tags = {"mynodepooltag1" = "mytagvalue1", "mynodepooltag2" = "mytagvalue2"} + } addpool1 = { - vm_size = "Standard_D4_v4" - enable_auto_scaling = true - node_count = 2 - min_count = 1 - max_count = 2 - availability_zones = [] - subnet = "public" - priority = "Regular" - spot_max_price = -1 + vm_size = "Standard_D4_v4" + enable_auto_scaling = true + node_count = 2 + min_count = 1 + max_count = 2 + availability_zones = [] + subnet = "public" + priority = "Regular" + spot_max_price = -1 + max_surge = "1" + os_type = "Linux" + priority = "Regular" + enable_host_encryption = false + enable_node_public_ip = false + only_critical_addons_enabled = false + os_disk_type = "Managed" + type = "VirtualMachineScaleSets" + # orchestrator_version = "2.9.0" + # os_disk_size_gb = 100 + # max_pods = 20 + # node_labels = {"engine" = "roxie", "engine" = "roxie"} + # eviction_policy = "Spot" + # node_taints = ["mytaint1", "mytaint2"] + # proximity_placement_group_id = "my_proximity_placement_group_id" + # spot_max_price = 1 + # tags = {"mynodepooltag1" = "mytagvalue1", "mynodepooltag2" = "mytagvalue2"} } addpool2 = { - vm_size = "Standard_D4_v4" - enable_auto_scaling = true - node_count = 2 - min_count = 1 - max_count = 2 - availability_zones = [] - subnet = "public" - priority = "Regular" - spot_max_price = -1 + vm_size = "Standard_D4_v4" + enable_auto_scaling = true + node_count = 2 + min_count = 1 + max_count = 2 + availability_zones = [] + subnet = "public" + priority = "Regular" + spot_max_price = -1 + max_surge = "1" + os_type = "Linux" + priority = "Regular" + enable_host_encryption = false + enable_node_public_ip = false + only_critical_addons_enabled = false + os_disk_type = "Managed" + type = "VirtualMachineScaleSets" + # orchestrator_version = "2.9.0" + # os_disk_size_gb = 100 + # max_pods = 20 + # node_labels = {"engine" = "roxie", "engine" = "roxie"} + # eviction_policy = "Spot" + # node_taints = ["mytaint1", "mytaint2"] + # proximity_placement_group_id = "my_proximity_placement_group_id" + # spot_max_price = 1 + # tags = {"mynodepooltag1" = "mytagvalue1", "mynodepooltag2" = "mytagvalue2"} } } @@ -62,39 +106,86 @@ node_pools = { # ....................... hpcc = { - version = "8.6.20-rc1" - name = "myhpcck8s" - atomic = true - # image_root = "/" + name = "myhpcck8s" + expose_eclwatch = true + atomic = true + recreate_pods = false + reuse_values = false + reset_values = false + force_update = false + namespace = "default" + cleanup_on_fail = false + disable_openapi_validation = false + max_history = 0 + wait = true + dependency_update = true + timeout = 900 + wait_for_jobs = false + lint = false + remote_chart = "https://hpcc-systems.github.io/helm-chart" + # local_chart = "/Users/foo/work/demo/helm-chart/helm/hpcc" #Other examples: local_chart = "https://github.com/hpcc-systems/helm-chart/raw/master/docs/hpcc-8.6.16-rc1.tgz" + # version = "8.6.14-rc2" + # values = ["/Users/foo/mycustomvalues1.yaml", "/Users/foo/mycustomvalues2.yaml"] + # image_root = "west.lexisnexisrisk.com" # image_name = "platform-core-ln" # image_version = "8.6.18-rc1" } -elk = { - enable = true - name = "myhpccelk" - # chart = "" - # values = [] -} - storage = { - default = false - # chart = "" - # values = [] + default = false + atomic = true + recreate_pods = false + reuse_values = false + reset_values = false + force_update = false + namespace = "default" + cleanup_on_fail = false + disable_openapi_validation = false + max_history = 0 + wait = true + dependency_update = true + timeout = 600 + wait_for_jobs = false + lint = false + remote_chart = "https://hpcc-systems.github.io/helm-chart" + # local_chart = "/Users/foo/work/demo/helm-chart/helm/examples/azure/hpcc-azurefile" + # version = "0.1.0" + # values = ["/Users/foo/mycustomvalues1.yaml", "/Users/foo/mycustomvalues2.yaml"] /* storage_account = { location = "eastus" - name = "demohpccsa3" + name = "foohpccsa3" resource_group_name = "app-storageaccount-sandbox-eastus-48936" - # subscription_id = "" + # subscription_id = "value" } */ } -# Optional Attributes -# ------------------- +elastic4hpcclogs = { + enable = true + expose = true + name = "myelastic4hpcclogs" + atomic = true + recreate_pods = false + reuse_values = false + reset_values = false + force_update = false + namespace = "default" + cleanup_on_fail = false + disable_openapi_validation = false + max_history = 0 + wait = true + dependency_update = true + timeout = 300 + wait_for_jobs = false + lint = false + remote_chart = "https://hpcc-systems.github.io/helm-chart" + # local_chart = "/Users/foo/work/demo/helm-chart/helm/managed/logging/elastic" + # version = "1.2.10" + # values = ["/Users/foo/mycustomvalues1.yaml", "/Users/foo/mycustomvalues2.yaml"] +} -# expose_services - Expose ECLWatch and ELK to the internet. This can be unsafe and may not be supported by your organization. +# expose_services - Expose ECLWatch and elastic4hpcclogs to the internet. This can be unsafe and may not be supported by your organization. # Setting this to true can cause eclwatch service to stick in a pending state. Only use this if you know what you are doing. expose_services = true diff --git a/locals.tf b/locals.tf index 52f209d..54f215b 100644 --- a/locals.tf +++ b/locals.tf @@ -13,22 +13,8 @@ locals { ) : module.metadata.names tags = var.disable_naming_conventions ? merge(var.tags, { "admin" = var.admin.name, "email" = var.admin.email }) : merge(module.metadata.tags, { "admin" = var.admin.name, "email" = var.admin.email }, try(var.tags)) virtual_network = can(var.virtual_network.private_subnet_id) && can(var.virtual_network.public_subnet_id) && can(var.virtual_network.route_table_id) ? var.virtual_network : data.external.vnet[0].result - cluster_name = "${local.names.resource_group_type}-${local.names.product_name}-terraform-${local.names.location}-${var.admin.name}-${terraform.workspace}" - - hpcc_repository = "https://github.com/hpcc-systems/helm-chart/raw/master/docs/hpcc-${var.hpcc.chart_version}.tgz" - hpcc_chart = can(var.hpcc.chart) ? var.hpcc.chart : local.hpcc_repository - hpcc_name = can(var.hpcc.name) ? var.hpcc.name : "myhpcck8s" - - - storage_version = can(var.storage.chart_version) ? var.storage.chart_version : "0.1.0" - storage_repository = "https://github.com/hpcc-systems/helm-chart/raw/master/docs/hpcc-azurefile-${local.storage_version}.tgz" - storage_chart = can(var.storage.chart) ? var.storage.chart : local.storage_repository - storage_account = can(var.storage.storage_account.resource_group_name) && can(var.storage.storage_account.name) && can(var.storage.storage_account.location) ? var.storage.storage_account : data.external.sa[0].result - - elk_version = can(var.elk.chart_version) ? var.elk.chart_version : "1.2.1" - elk_repository = "https://github.com/hpcc-systems/helm-chart/raw/master/docs/elastic4hpcclogs-${local.elk_version}.tgz" - elk_chart = can(var.elk.chart) ? var.elk.chart : local.elk_repository - elk_name = can(var.elk.name) ? var.elk.name : "myhpccelk" + cluster_name = "${local.names.resource_group_type}-${local.names.product_name}-terraform-${local.names.location}-${var.admin.name}${random_integer.int.result}-${terraform.workspace}" + storage_account = can(var.storage.storage_account.resource_group_name) && can(var.storage.storage_account.name) && can(var.storage.storage_account.location) ? var.storage.storage_account : data.external.sa[0].result az_command = try("az aks get-credentials --name ${module.kubernetes.name} --resource-group ${module.resource_group.name} --overwrite", "") web_urls = { auto_launch_eclwatch = "http://$(kubectl get svc --field-selector metadata.name=eclwatch | awk 'NR==2 {print $4}'):8010" } diff --git a/main.tf b/main.tf index 5a12eb6..6ce987a 100644 --- a/main.tf +++ b/main.tf @@ -1,13 +1,6 @@ -resource "random_string" "random" { - length = 43 - upper = false - number = false - special = false -} - -resource "random_password" "admin" { - length = 6 - special = true +resource "random_integer" "int" { + min = 1 + max = 3 } module "subscription" { @@ -120,21 +113,27 @@ resource "kubernetes_secret" "private_docker_registry" { resource "helm_release" "hpcc" { count = var.disable_helm ? 0 : 1 - name = local.hpcc_name - chart = local.hpcc_chart + name = can(var.hpcc.name) ? var.hpcc.name : "myhpcck8s" + version = can(var.hpcc.version) ? var.hpcc.version : null + chart = can(var.hpcc.remote_chart) ? "hpcc" : var.hpcc.local_chart + repository = can(var.hpcc.remote_chart) ? var.hpcc.remote_chart : null create_namespace = true namespace = try(var.hpcc.namespace, terraform.workspace) - atomic = try(var.hpcc.atomic, null) - recreate_pods = try(var.hpcc.recreate_pods, null) - cleanup_on_fail = try(var.hpcc.cleanup_on_fail, null) - disable_openapi_validation = try(var.hpcc.disable_openapi_validation, null) - wait = try(var.hpcc.wait, null) - dependency_update = try(var.hpcc.dependency_update, null) + atomic = try(var.hpcc.atomic, false) + recreate_pods = try(var.hpcc.recreate_pods, false) + reuse_values = try(var.hpcc.reuse_values, false) + reset_values = try(var.hpcc.reset_values, false) + force_update = try(var.hpcc.force_update, false) + cleanup_on_fail = try(var.hpcc.cleanup_on_fail, false) + disable_openapi_validation = try(var.hpcc.disable_openapi_validation, false) + max_history = try(var.hpcc.max_history, 0) + wait = try(var.hpcc.wait, true) + dependency_update = try(var.hpcc.dependency_update, false) timeout = try(var.hpcc.timeout, 900) - wait_for_jobs = try(var.hpcc.wait_for_jobs, null) - lint = try(var.hpcc.lint, null) + wait_for_jobs = try(var.hpcc.wait_for_jobs, false) + lint = try(var.hpcc.lint, false) - values = concat(var.expose_services ? [file("${path.root}/values/esp.yaml")] : [], + values = concat(var.hpcc.expose_eclwatch ? [file("${path.root}/values/esp.yaml")] : [], try([for v in var.hpcc.values : file(v)], []), [file("${path.root}/values/values-retained-azurefile.yaml")]) dynamic "set" { @@ -170,29 +169,43 @@ resource "helm_release" "hpcc" { } depends_on = [ - helm_release.elk, + helm_release.elastic4hpcclogs, helm_release.storage, module.kubernetes ] } -resource "helm_release" "elk" { - count = var.disable_helm || !var.elk.enable ? 0 : 1 +resource "helm_release" "elastic4hpcclogs" { + count = var.disable_helm || !var.elastic4hpcclogs.enable ? 0 : 1 - name = local.elk_name + name = can(var.elastic4hpcclogs.name) ? var.elastic4hpcclogs.name : "myelastic4hpcclogs" namespace = try(var.hpcc.namespace, terraform.workspace) - chart = local.elk_chart - values = try([for v in var.elk.values : file(v)], []) + chart = can(var.elastic4hpcclogs.remote_chart) ? "elastic4hpcclogs" : var.elastic4hpcclogs.local_chart + repository = can(var.elastic4hpcclogs.remote_chart) ? var.elastic4hpcclogs.remote_chart : null + version = can(var.elastic4hpcclogs.version) ? var.elastic4hpcclogs.version : null + values = concat(try([for v in var.elastic4hpcclogs.values : file(v)], []), can(var.elastic4hpcclogs.expose) ? [file("${path.root}/values/elastic4hpcclogs.yaml")] : []) create_namespace = true - atomic = try(var.elk.atomic, null) - recreate_pods = try(var.elk.recreate_pods, null) - cleanup_on_fail = try(var.elk.cleanup_on_fail, null) - disable_openapi_validation = try(var.elk.disable_openapi_validation, null) - wait = try(var.elk.wait, null) - dependency_update = try(var.elk.dependency_update, null) - timeout = try(var.elk.timeout, 600) - wait_for_jobs = try(var.elk.wait_for_jobs, null) - lint = try(var.elk.lint, null) + atomic = try(var.elastic4hpcclogs.atomic, false) + force_update = try(var.elastic4hpcclogs.force_update, false) + recreate_pods = try(var.elastic4hpcclogs.recreate_pods, false) + reuse_values = try(var.elastic4hpcclogs.reuse_values, false) + reset_values = try(var.elastic4hpcclogs.reset_values, false) + cleanup_on_fail = try(var.elastic4hpcclogs.cleanup_on_fail, false) + disable_openapi_validation = try(var.elastic4hpcclogs.disable_openapi_validation, false) + wait = try(var.elastic4hpcclogs.wait, true) + max_history = try(var.storage.max_history, 0) + dependency_update = try(var.elastic4hpcclogs.dependency_update, false) + timeout = try(var.elastic4hpcclogs.timeout, 300) + wait_for_jobs = try(var.elastic4hpcclogs.wait_for_jobs, false) + lint = try(var.elastic4hpcclogs.lint, false) + + # dynamic "set" { + # for_each = can(var.elastic4hpcclogs.expose) ? [1] : [] + # content { + # name = "kibana.service.annotations.service\\.beta\\.kubernetes\\.io/azure-load-balancer-internal" + # value = tostring(false) + # } + # } depends_on = [ helm_release.storage @@ -203,19 +216,25 @@ resource "helm_release" "storage" { count = var.disable_helm ? 0 : 1 name = "azstorage" - chart = local.storage_chart + chart = can(var.storage.remote_chart) ? "hpcc-azurefile" : var.storage.local_chart + repository = can(var.storage.remote_chart) ? var.storage.remote_chart : null + version = can(var.storage.version) ? var.storage.version : null values = concat(can(var.storage.storage_account.name) ? [file("${path.root}/values/hpcc-azurefile.yaml")] : [], try([for v in var.storage.values : file(v)], [])) create_namespace = true namespace = try(var.hpcc.namespace, terraform.workspace) - atomic = try(var.storage.atomic, null) - recreate_pods = try(var.storage.recreate_pods, null) + atomic = try(var.storage.atomic, false) + force_update = try(var.storage.force_update, false) + recreate_pods = try(var.storage.recreate_pods, false) + reuse_values = try(var.storage.reuse_values, false) + reset_values = try(var.storage.reset_values, false) cleanup_on_fail = try(var.storage.cleanup_on_fail, null) - disable_openapi_validation = try(var.storage.disable_openapi_validation, null) - wait = try(var.storage.wait, null) - dependency_update = try(var.storage.dependency_update, null) + disable_openapi_validation = try(var.storage.disable_openapi_validation, false) + wait = try(var.storage.wait, true) + max_history = try(var.storage.max_history, 0) + dependency_update = try(var.storage.dependency_update, false) timeout = try(var.storage.timeout, 600) - wait_for_jobs = try(var.storage.wait_for_jobs, null) - lint = try(var.storage.lint, null) + wait_for_jobs = try(var.storage.wait_for_jobs, false) + lint = try(var.storage.lint, false) depends_on = [ module.kubernetes diff --git a/values/elastic4hpcclogs.yaml b/values/elastic4hpcclogs.yaml new file mode 100644 index 0000000..4024016 --- /dev/null +++ b/values/elastic4hpcclogs.yaml @@ -0,0 +1,4 @@ +kibana: + service: + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "false" \ No newline at end of file diff --git a/values/esp.yaml b/values/esp.yaml index 1004a51..46448d1 100644 --- a/values/esp.yaml +++ b/values/esp.yaml @@ -4,6 +4,11 @@ esp: application: eclwatch auth: none replicas: 1 +# Add remote clients to generated client certificates and make the ESP require that one of the generated certificates is provided by a client in order to connect +# When setting up remote clients make sure that certificates.issuers.remote.enabled is set to true. +# remoteClients: +# - name: myclient +# organization: mycompany service: ## port can be used to change the local port used by the pod. If omitted, the default port (8880) is used port: 8888 @@ -14,9 +19,25 @@ esp: ## Annotations can be specified on a service - for example to specify provider-specific information such as service.beta.kubernetes.io/azure-load-balancer-internal-subnet #annotations: # service.beta.kubernetes.io/azure-load-balancer-internal-subnet: "mysubnet" + # The service.annotations prefixed with hpcc.eclwatch.io should not be declared here. They can be declared + # in other services in order to be exposed in the ECLWatch interface. Similar function can be used by other + # applications. For other applications, the "eclwatch" inside the service.annotations should be replaced by + # their application names. + # hpcc.eclwatch.io/enabled: "true" + # hpcc.eclwatch.io/description: "some description" ## You can also specify labels on a service #labels: # mylabel: "3" + ## Links specify the web links for a service. The web links may be shown on ECLWatch. + #links: + #- name: linkname + # description: "some description" + # url: "http://abc.com/def?g=1" + ## CIDRS allowed to access this service. + #loadBalancerSourceRanges: [1.2.3.4/32, 5.6.7.8/32] + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 70M #resources: # cpu: "1" # memory: "2G" @@ -27,6 +48,9 @@ esp: service: servicePort: 8010 visibility: cluster + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M #resources: # cpu: "250m" # memory: "1G" @@ -37,6 +61,13 @@ esp: service: visibility: global servicePort: 8002 + #annotations: + # hpcc.eclwatch.io/enabled: "true" + # hpcc.eclwatch.io/description: "Roxie Test page" + # hpcc.eclwatch.io/port: "8002" + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M #resources: # cpu: "250m" # memory: "1G" @@ -47,6 +78,9 @@ esp: service: visibility: local servicePort: 8899 + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M #resources: # cpu: "250m" # memory: "1G" @@ -54,10 +88,47 @@ esp: application: sql2ecl auth: none replicas: 1 +# remoteClients: +# - name: sqlclient111 service: visibility: local servicePort: 8510 #domain: hpccsql.com + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M #resources: # cpu: "250m" - # memory: "1G" \ No newline at end of file + # memory: "1G" +- name: dfs + application: dfs + auth: none + replicas: 1 + service: + visibility: local + servicePort: 8520 + # Increase maxRequestEntityLength when query deployments (or similar actions) start to fail because they surpass the maximum size + # default for EclWatch is 60M, default for other services is 8M + #maxRequestEntityLength: 9M + #resources: + # cpu: "250m" + # memory: "1G" + + +#- name: ldapenvironment + #ldapenvironment is a stand alone ESP service used to help stand up new HPCC LDAP Environments +# application: ldapenvironment +# auth: ldap +# #specify the hpcc branch Root Name +# hpccRootName: ou=hpcc,dc=myldap,dc=com +# #specify all BaseDN with your LDAP Server's "dc=" settings +# sharedFilesBaseDN: ou=files,ou=hpcc,dc=myldap,dc=com +# sharedGroupsBaseDN: ou=groups,ou=hpcc,dc=myldap,dc=com +# sharedUsersBaseDN: ou=users,ou=hpcc,dc=myldap,dc=com +# sharedResourcesBaseDN: ou=smc,ou=espservices,ou=hpcc,dc=myldap,dc=com +# sharedWorkunitsBaseDN: ou=workunits,ou=hpcc,dc=myldap,dc=com +# adminGroupName: HPCCAdmins +# replicas: 1 +# service: +# visibility: local +# servicePort: 8511 \ No newline at end of file diff --git a/variables.tf b/variables.tf index 760e109..a49809c 100644 --- a/variables.tf +++ b/variables.tf @@ -7,7 +7,7 @@ variable "admin" { } variable "expose_services" { - description = "Expose ECLWatch and ELK to the Internet. This is not secure. Please consider before using it." + description = "Expose ECLWatch and elastic4hpcclogs to the Internet. This is not secure. Please consider before using it." type = bool default = false } @@ -93,46 +93,22 @@ variable "node_pools" { default = { default = {} } } -variable "image_root" { - description = "Root of the image other than hpccsystems." - type = string - default = null -} - -variable "image_name" { - description = "Root of the image other than hpccsystems." - type = string - default = null -} - -variable "image_version" { - description = "Root of the image other than hpccsystems." - type = string - default = null -} - variable "hpcc" { description = "HPCC Helm chart variables." type = any - default = { default = { name = "myhpcck8s" } } + default = { name = "myhpcck8s" } } variable "storage" { description = "Storage account arguments." type = any - default = { default = {} } -} - -variable "existing_storage" { - description = "Existing storage account metadata." - type = any - default = { default = {} } + default = { default = false } } -variable "elk" { +variable "elastic4hpcclogs" { description = "HPCC Helm chart variables." type = any - default = { default = { name = "myhpccelk", enable = true } } + default = { name = "myelastic4hpcclogs", enable = true } } variable "registry" { diff --git a/versions.tf b/versions.tf index dc62e41..39f8f07 100644 --- a/versions.tf +++ b/versions.tf @@ -14,7 +14,7 @@ terraform { } helm = { source = "hashicorp/helm" - version = ">=2.1.2" + version = ">=2.5.1" } } required_version = ">=0.15.0"