Skip to content

Commit

Permalink
Add support for all the supported parameters by the helm provider.
Browse files Browse the repository at this point in the history
Signed-off-by: Godson Fortil <[email protected]>
  • Loading branch information
Godson Fortil committed Apr 29, 2022
1 parent 4ddcfe7 commit 7d4da7a
Show file tree
Hide file tree
Showing 8 changed files with 488 additions and 234 deletions.
315 changes: 211 additions & 104 deletions README.md

Large diffs are not rendered by default.

169 changes: 130 additions & 39 deletions examples/admin.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -28,73 +28,164 @@ node_pools = {
enable_auto_scaling = true
only_critical_addons_enabled = true
min_count = 1
max_count = 2
max_count = 1
availability_zones = []
subnet = "private"
subnet = "public"
enable_host_encryption = false
enable_node_public_ip = false
os_disk_type = "Managed"
type = "VirtualMachineScaleSets"
# max_pods = 10
# node_labels = {"engine" = "roxie", "engine" = "roxie"}
# orchestrator_version = "2.9.0"
# os_disk_size_gb = 100
# tags = {"mynodepooltag1" = "mytagvalue1", "mynodepooltag2" = "mytagvalue2"}

}

addpool1 = {
vm_size = "Standard_D4_v4"
enable_auto_scaling = true
node_count = 2
min_count = 1
max_count = 2
availability_zones = []
subnet = "public"
priority = "Regular"
spot_max_price = -1
vm_size = "Standard_D4_v4"
enable_auto_scaling = true
node_count = 2
min_count = 1
max_count = 2
availability_zones = []
subnet = "public"
priority = "Regular"
spot_max_price = -1
max_surge = "1"
os_type = "Linux"
priority = "Regular"
enable_host_encryption = false
enable_node_public_ip = false
only_critical_addons_enabled = false
os_disk_type = "Managed"
type = "VirtualMachineScaleSets"
# orchestrator_version = "2.9.0"
# os_disk_size_gb = 100
# max_pods = 20
# node_labels = {"engine" = "roxie", "engine" = "roxie"}
# eviction_policy = "Spot"
# node_taints = ["mytaint1", "mytaint2"]
# proximity_placement_group_id = "my_proximity_placement_group_id"
# spot_max_price = 1
# tags = {"mynodepooltag1" = "mytagvalue1", "mynodepooltag2" = "mytagvalue2"}
}

addpool2 = {
vm_size = "Standard_D4_v4"
enable_auto_scaling = true
node_count = 2
min_count = 1
max_count = 2
availability_zones = []
subnet = "public"
priority = "Regular"
spot_max_price = -1
vm_size = "Standard_D4_v4"
enable_auto_scaling = true
node_count = 2
min_count = 1
max_count = 2
availability_zones = []
subnet = "public"
priority = "Regular"
spot_max_price = -1
max_surge = "1"
os_type = "Linux"
priority = "Regular"
enable_host_encryption = false
enable_node_public_ip = false
only_critical_addons_enabled = false
os_disk_type = "Managed"
type = "VirtualMachineScaleSets"
# orchestrator_version = "2.9.0"
# os_disk_size_gb = 100
# max_pods = 20
# node_labels = {"engine" = "roxie", "engine" = "roxie"}
# eviction_policy = "Spot"
# node_taints = ["mytaint1", "mytaint2"]
# proximity_placement_group_id = "my_proximity_placement_group_id"
# spot_max_price = 1
# tags = {"mynodepooltag1" = "mytagvalue1", "mynodepooltag2" = "mytagvalue2"}
}
}

# CHARTS
# .......................

hpcc = {
version = "8.6.20-rc1"
name = "myhpcck8s"
atomic = true
# image_root = "<server-address>/<repository name>"
name = "myhpcck8s"
expose_eclwatch = true
atomic = true
recreate_pods = false
reuse_values = false
reset_values = false
force_update = false
namespace = "default"
cleanup_on_fail = false
disable_openapi_validation = false
max_history = 0
wait = true
dependency_update = true
timeout = 900
wait_for_jobs = false
lint = false
remote_chart = "https://hpcc-systems.github.io/helm-chart"
# local_chart = "/Users/foo/work/demo/helm-chart/helm/hpcc" #Other examples: local_chart = "https://github.com/hpcc-systems/helm-chart/raw/master/docs/hpcc-8.6.16-rc1.tgz"
# version = "8.6.14-rc2"
# values = ["/Users/foo/mycustomvalues1.yaml", "/Users/foo/mycustomvalues2.yaml"]
# image_root = "west.lexisnexisrisk.com"
# image_name = "platform-core-ln"
# image_version = "8.6.18-rc1"
}

elk = {
enable = true
name = "myhpccelk"
# chart = ""
# values = []
}

storage = {
default = false
# chart = ""
# values = []
default = false
atomic = true
recreate_pods = false
reuse_values = false
reset_values = false
force_update = false
namespace = "default"
cleanup_on_fail = false
disable_openapi_validation = false
max_history = 0
wait = true
dependency_update = true
timeout = 600
wait_for_jobs = false
lint = false
remote_chart = "https://hpcc-systems.github.io/helm-chart"
# local_chart = "/Users/foo/work/demo/helm-chart/helm/examples/azure/hpcc-azurefile"
# version = "0.1.0"
# values = ["/Users/foo/mycustomvalues1.yaml", "/Users/foo/mycustomvalues2.yaml"]
/*
storage_account = {
location = "eastus"
name = "demohpccsa3"
name = "foohpccsa3"
resource_group_name = "app-storageaccount-sandbox-eastus-48936"
# subscription_id = ""
# subscription_id = "value"
}
*/
}

# Optional Attributes
# -------------------
elastic4hpcclogs = {
enable = true
expose = true
name = "myelastic4hpcclogs"
atomic = true
recreate_pods = false
reuse_values = false
reset_values = false
force_update = false
namespace = "default"
cleanup_on_fail = false
disable_openapi_validation = false
max_history = 0
wait = true
dependency_update = true
timeout = 300
wait_for_jobs = false
lint = false
remote_chart = "https://hpcc-systems.github.io/helm-chart"
# local_chart = "/Users/foo/work/demo/helm-chart/helm/managed/logging/elastic"
# version = "1.2.10"
# values = ["/Users/foo/mycustomvalues1.yaml", "/Users/foo/mycustomvalues2.yaml"]
}

# expose_services - Expose ECLWatch and ELK to the internet. This can be unsafe and may not be supported by your organization.
# expose_services - Expose ECLWatch and elastic4hpcclogs to the internet. This can be unsafe and may not be supported by your organization.
# Setting this to true can cause eclwatch service to stick in a pending state. Only use this if you know what you are doing.
expose_services = true

Expand Down
18 changes: 2 additions & 16 deletions locals.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,22 +13,8 @@ locals {
) : module.metadata.names
tags = var.disable_naming_conventions ? merge(var.tags, { "admin" = var.admin.name, "email" = var.admin.email }) : merge(module.metadata.tags, { "admin" = var.admin.name, "email" = var.admin.email }, try(var.tags))
virtual_network = can(var.virtual_network.private_subnet_id) && can(var.virtual_network.public_subnet_id) && can(var.virtual_network.route_table_id) ? var.virtual_network : data.external.vnet[0].result
cluster_name = "${local.names.resource_group_type}-${local.names.product_name}-terraform-${local.names.location}-${var.admin.name}-${terraform.workspace}"

hpcc_repository = "https://github.com/hpcc-systems/helm-chart/raw/master/docs/hpcc-${var.hpcc.chart_version}.tgz"
hpcc_chart = can(var.hpcc.chart) ? var.hpcc.chart : local.hpcc_repository
hpcc_name = can(var.hpcc.name) ? var.hpcc.name : "myhpcck8s"


storage_version = can(var.storage.chart_version) ? var.storage.chart_version : "0.1.0"
storage_repository = "https://github.com/hpcc-systems/helm-chart/raw/master/docs/hpcc-azurefile-${local.storage_version}.tgz"
storage_chart = can(var.storage.chart) ? var.storage.chart : local.storage_repository
storage_account = can(var.storage.storage_account.resource_group_name) && can(var.storage.storage_account.name) && can(var.storage.storage_account.location) ? var.storage.storage_account : data.external.sa[0].result

elk_version = can(var.elk.chart_version) ? var.elk.chart_version : "1.2.1"
elk_repository = "https://github.com/hpcc-systems/helm-chart/raw/master/docs/elastic4hpcclogs-${local.elk_version}.tgz"
elk_chart = can(var.elk.chart) ? var.elk.chart : local.elk_repository
elk_name = can(var.elk.name) ? var.elk.name : "myhpccelk"
cluster_name = "${local.names.resource_group_type}-${local.names.product_name}-terraform-${local.names.location}-${var.admin.name}${random_integer.int.result}-${terraform.workspace}"
storage_account = can(var.storage.storage_account.resource_group_name) && can(var.storage.storage_account.name) && can(var.storage.storage_account.location) ? var.storage.storage_account : data.external.sa[0].result

az_command = try("az aks get-credentials --name ${module.kubernetes.name} --resource-group ${module.resource_group.name} --overwrite", "")
web_urls = { auto_launch_eclwatch = "http://$(kubectl get svc --field-selector metadata.name=eclwatch | awk 'NR==2 {print $4}'):8010" }
Expand Down
107 changes: 63 additions & 44 deletions main.tf
Original file line number Diff line number Diff line change
@@ -1,13 +1,6 @@
resource "random_string" "random" {
length = 43
upper = false
number = false
special = false
}

resource "random_password" "admin" {
length = 6
special = true
resource "random_integer" "int" {
min = 1
max = 3
}

module "subscription" {
Expand Down Expand Up @@ -120,21 +113,27 @@ resource "kubernetes_secret" "private_docker_registry" {
resource "helm_release" "hpcc" {
count = var.disable_helm ? 0 : 1

name = local.hpcc_name
chart = local.hpcc_chart
name = can(var.hpcc.name) ? var.hpcc.name : "myhpcck8s"
version = can(var.hpcc.version) ? var.hpcc.version : null
chart = can(var.hpcc.remote_chart) ? "hpcc" : var.hpcc.local_chart
repository = can(var.hpcc.remote_chart) ? var.hpcc.remote_chart : null
create_namespace = true
namespace = try(var.hpcc.namespace, terraform.workspace)
atomic = try(var.hpcc.atomic, null)
recreate_pods = try(var.hpcc.recreate_pods, null)
cleanup_on_fail = try(var.hpcc.cleanup_on_fail, null)
disable_openapi_validation = try(var.hpcc.disable_openapi_validation, null)
wait = try(var.hpcc.wait, null)
dependency_update = try(var.hpcc.dependency_update, null)
atomic = try(var.hpcc.atomic, false)
recreate_pods = try(var.hpcc.recreate_pods, false)
reuse_values = try(var.hpcc.reuse_values, false)
reset_values = try(var.hpcc.reset_values, false)
force_update = try(var.hpcc.force_update, false)
cleanup_on_fail = try(var.hpcc.cleanup_on_fail, false)
disable_openapi_validation = try(var.hpcc.disable_openapi_validation, false)
max_history = try(var.hpcc.max_history, 0)
wait = try(var.hpcc.wait, true)
dependency_update = try(var.hpcc.dependency_update, false)
timeout = try(var.hpcc.timeout, 900)
wait_for_jobs = try(var.hpcc.wait_for_jobs, null)
lint = try(var.hpcc.lint, null)
wait_for_jobs = try(var.hpcc.wait_for_jobs, false)
lint = try(var.hpcc.lint, false)

values = concat(var.expose_services ? [file("${path.root}/values/esp.yaml")] : [],
values = concat(var.hpcc.expose_eclwatch ? [file("${path.root}/values/esp.yaml")] : [],
try([for v in var.hpcc.values : file(v)], []), [file("${path.root}/values/values-retained-azurefile.yaml")])

dynamic "set" {
Expand Down Expand Up @@ -170,29 +169,43 @@ resource "helm_release" "hpcc" {
}

depends_on = [
helm_release.elk,
helm_release.elastic4hpcclogs,
helm_release.storage,
module.kubernetes
]
}

resource "helm_release" "elk" {
count = var.disable_helm || !var.elk.enable ? 0 : 1
resource "helm_release" "elastic4hpcclogs" {
count = var.disable_helm || !var.elastic4hpcclogs.enable ? 0 : 1

name = local.elk_name
name = can(var.elastic4hpcclogs.name) ? var.elastic4hpcclogs.name : "myelastic4hpcclogs"
namespace = try(var.hpcc.namespace, terraform.workspace)
chart = local.elk_chart
values = try([for v in var.elk.values : file(v)], [])
chart = can(var.elastic4hpcclogs.remote_chart) ? "elastic4hpcclogs" : var.elastic4hpcclogs.local_chart
repository = can(var.elastic4hpcclogs.remote_chart) ? var.elastic4hpcclogs.remote_chart : null
version = can(var.elastic4hpcclogs.version) ? var.elastic4hpcclogs.version : null
values = concat(try([for v in var.elastic4hpcclogs.values : file(v)], []), can(var.elastic4hpcclogs.expose) ? [file("${path.root}/values/elastic4hpcclogs.yaml")] : [])
create_namespace = true
atomic = try(var.elk.atomic, null)
recreate_pods = try(var.elk.recreate_pods, null)
cleanup_on_fail = try(var.elk.cleanup_on_fail, null)
disable_openapi_validation = try(var.elk.disable_openapi_validation, null)
wait = try(var.elk.wait, null)
dependency_update = try(var.elk.dependency_update, null)
timeout = try(var.elk.timeout, 600)
wait_for_jobs = try(var.elk.wait_for_jobs, null)
lint = try(var.elk.lint, null)
atomic = try(var.elastic4hpcclogs.atomic, false)
force_update = try(var.elastic4hpcclogs.force_update, false)
recreate_pods = try(var.elastic4hpcclogs.recreate_pods, false)
reuse_values = try(var.elastic4hpcclogs.reuse_values, false)
reset_values = try(var.elastic4hpcclogs.reset_values, false)
cleanup_on_fail = try(var.elastic4hpcclogs.cleanup_on_fail, false)
disable_openapi_validation = try(var.elastic4hpcclogs.disable_openapi_validation, false)
wait = try(var.elastic4hpcclogs.wait, true)
max_history = try(var.storage.max_history, 0)
dependency_update = try(var.elastic4hpcclogs.dependency_update, false)
timeout = try(var.elastic4hpcclogs.timeout, 300)
wait_for_jobs = try(var.elastic4hpcclogs.wait_for_jobs, false)
lint = try(var.elastic4hpcclogs.lint, false)

# dynamic "set" {
# for_each = can(var.elastic4hpcclogs.expose) ? [1] : []
# content {
# name = "kibana.service.annotations.service\\.beta\\.kubernetes\\.io/azure-load-balancer-internal"
# value = tostring(false)
# }
# }

depends_on = [
helm_release.storage
Expand All @@ -203,19 +216,25 @@ resource "helm_release" "storage" {
count = var.disable_helm ? 0 : 1

name = "azstorage"
chart = local.storage_chart
chart = can(var.storage.remote_chart) ? "hpcc-azurefile" : var.storage.local_chart
repository = can(var.storage.remote_chart) ? var.storage.remote_chart : null
version = can(var.storage.version) ? var.storage.version : null
values = concat(can(var.storage.storage_account.name) ? [file("${path.root}/values/hpcc-azurefile.yaml")] : [], try([for v in var.storage.values : file(v)], []))
create_namespace = true
namespace = try(var.hpcc.namespace, terraform.workspace)
atomic = try(var.storage.atomic, null)
recreate_pods = try(var.storage.recreate_pods, null)
atomic = try(var.storage.atomic, false)
force_update = try(var.storage.force_update, false)
recreate_pods = try(var.storage.recreate_pods, false)
reuse_values = try(var.storage.reuse_values, false)
reset_values = try(var.storage.reset_values, false)
cleanup_on_fail = try(var.storage.cleanup_on_fail, null)
disable_openapi_validation = try(var.storage.disable_openapi_validation, null)
wait = try(var.storage.wait, null)
dependency_update = try(var.storage.dependency_update, null)
disable_openapi_validation = try(var.storage.disable_openapi_validation, false)
wait = try(var.storage.wait, true)
max_history = try(var.storage.max_history, 0)
dependency_update = try(var.storage.dependency_update, false)
timeout = try(var.storage.timeout, 600)
wait_for_jobs = try(var.storage.wait_for_jobs, null)
lint = try(var.storage.lint, null)
wait_for_jobs = try(var.storage.wait_for_jobs, false)
lint = try(var.storage.lint, false)

depends_on = [
module.kubernetes
Expand Down
4 changes: 4 additions & 0 deletions values/elastic4hpcclogs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
kibana:
service:
annotations:
service.beta.kubernetes.io/azure-load-balancer-internal: "false"
Loading

0 comments on commit 7d4da7a

Please sign in to comment.