From 5ff8aa66825c28509c4d415e8d02f595bc86a2e4 Mon Sep 17 00:00:00 2001 From: Piyush Kumar Date: Mon, 2 Aug 2021 15:57:05 +0530 Subject: [PATCH] added openstack support (#49) Co-authored-by: Piyush Kumar --- docs/data-sources/cloudaccount_openstack.md | 21 + docs/resources/cloudaccount_openstack.md | 46 ++ docs/resources/cluster_openstack.md | 214 +++++++ docs/resources/project.md | 2 +- examples/e2e/openstack/kubectl.tf | 8 + examples/e2e/openstack/locals.tf | 5 + examples/e2e/openstack/providers.tf | 34 + .../e2e/openstack/resource_cloudaccount.tf | 16 + examples/e2e/openstack/resource_cluster.tf | 38 ++ .../e2e/openstack/resource_clusterprofile.tf | 4 + examples/e2e/openstack/variables.tf | 8 + .../providers.tf | 15 + .../resource.tf | 10 + .../terraform.template.tfvars | 14 + .../variables.tf | 11 + .../providers.tf | 15 + .../resource.tf | 65 ++ .../terraform.template.tfvars | 3 + .../variables.tf | 10 + go.mod | 2 +- go.sum | 4 +- pkg/client/cluster_openstack.go | 156 +++++ spectrocloud/cluster_common.go | 18 + .../data_source_cloud_account_openstack.go | 70 ++ spectrocloud/provider.go | 4 + spectrocloud/resource_cloud_account_aws.go | 4 +- spectrocloud/resource_cloud_account_azure.go | 4 +- .../resource_cloud_account_openstack.go | 187 ++++++ spectrocloud/resource_cluster_openstack.go | 600 ++++++++++++++++++ 29 files changed, 1580 insertions(+), 8 deletions(-) create mode 100644 docs/data-sources/cloudaccount_openstack.md create mode 100644 docs/resources/cloudaccount_openstack.md create mode 100644 docs/resources/cluster_openstack.md create mode 100644 examples/e2e/openstack/kubectl.tf create mode 100644 examples/e2e/openstack/locals.tf create mode 100644 examples/e2e/openstack/providers.tf create mode 100644 examples/e2e/openstack/resource_cloudaccount.tf create mode 100644 examples/e2e/openstack/resource_cluster.tf create mode 100644 examples/e2e/openstack/resource_clusterprofile.tf create mode 100644 examples/e2e/openstack/variables.tf create mode 100644 examples/resources/spectrocloud_cloudaccount_openstack/providers.tf create mode 100644 examples/resources/spectrocloud_cloudaccount_openstack/resource.tf create mode 100644 examples/resources/spectrocloud_cloudaccount_openstack/terraform.template.tfvars create mode 100644 examples/resources/spectrocloud_cloudaccount_openstack/variables.tf create mode 100644 examples/resources/spectrocloud_cluster_openstack/providers.tf create mode 100644 examples/resources/spectrocloud_cluster_openstack/resource.tf create mode 100644 examples/resources/spectrocloud_cluster_openstack/terraform.template.tfvars create mode 100644 examples/resources/spectrocloud_cluster_openstack/variables.tf create mode 100644 pkg/client/cluster_openstack.go create mode 100644 spectrocloud/data_source_cloud_account_openstack.go create mode 100644 spectrocloud/resource_cloud_account_openstack.go create mode 100644 spectrocloud/resource_cluster_openstack.go diff --git a/docs/data-sources/cloudaccount_openstack.md b/docs/data-sources/cloudaccount_openstack.md new file mode 100644 index 00000000..23f64564 --- /dev/null +++ b/docs/data-sources/cloudaccount_openstack.md @@ -0,0 +1,21 @@ +--- +page_title: "spectrocloud_cloudaccount_openstack Data Source - terraform-provider-spectrocloud" +subcategory: "" +description: |- + +--- + +# Data Source `spectrocloud_cloudaccount_openstack` + + + + + +## Schema + +### Optional + +- **id** (String) The ID of this resource. +- **name** (String) + + diff --git a/docs/resources/cloudaccount_openstack.md b/docs/resources/cloudaccount_openstack.md new file mode 100644 index 00000000..73005746 --- /dev/null +++ b/docs/resources/cloudaccount_openstack.md @@ -0,0 +1,46 @@ +--- +page_title: "spectrocloud_cloudaccount_openstack Resource - terraform-provider-spectrocloud" +subcategory: "" +description: |- + +--- + +# Resource `spectrocloud_cloudaccount_openstack` + + + +## Example Usage + +```terraform +resource "spectrocloud_cloudaccount_openstack" "account" { + name = "openstack-dev" + private_cloud_gateway_id = "" + openstack_username = var.openstack_username + openstack_password = var.openstack_password + identity_endpoint = var.identity_endpoint + parent_region = var.region + default_domain = var.domain + default_project = var.project +} +``` + +## Schema + +### Required + +- **default_domain** (String) +- **default_project** (String) +- **identity_endpoint** (String) +- **name** (String) +- **openstack_password** (String, Sensitive) +- **openstack_username** (String) +- **parent_region** (String) +- **private_cloud_gateway_id** (String) + +### Optional + +- **ca_certificate** (String) +- **id** (String) The ID of this resource. +- **openstack_allow_insecure** (Boolean) + + diff --git a/docs/resources/cluster_openstack.md b/docs/resources/cluster_openstack.md new file mode 100644 index 00000000..ce8e63e5 --- /dev/null +++ b/docs/resources/cluster_openstack.md @@ -0,0 +1,214 @@ +--- +page_title: "spectrocloud_cluster_openstack Resource - terraform-provider-spectrocloud" +subcategory: "" +description: |- + +--- + +# Resource `spectrocloud_cluster_openstack` + + + +## Example Usage + +```terraform +data "spectrocloud_cloudaccount_openstack" "account" { + # id = + name = var.cluster_cloud_account_name +} + +data "spectrocloud_cluster_profile" "profile" { + # id = + name = var.cluster_cluster_profile_name +} + +data "spectrocloud_backup_storage_location" "bsl" { + name = var.backup_storage_location_name +} + +resource "spectrocloud_cluster_openstack" "cluster" { + name = "openstack-piyush-tf-1" + + cluster_profile { + id = data.spectrocloud_cluster_profile.profile.id + } + + cloud_account_id = data.spectrocloud_cloudaccount_openstack.account.id + tags = ["dev"] + + + cloud_config { + domain = "Default" + project = "dev" + region = "RegionOne" + ssh_key = "Spectro2021" + dns_servers = ["10.10.128.8", "8.8.8.8"] + subnet_cidr = "192.168.151.0/24" + } + + machine_pool { + control_plane = true + control_plane_as_worker = true + name = "master-pool" + count = 1 + instance_type = "spectro-xlarge" + azs = ["zone1"] + } + + machine_pool { + name = "worker-basic" + count = 2 + instance_type = "spectro-large" + azs = ["zone1"] + } + + backup_policy { + schedule = "0 0 * * SUN" + backup_location_id = data.spectrocloud_backup_storage_location.bsl.id + prefix = "prod-backup" + expiry_in_hour = 7200 + include_disks = true + include_cluster_resources = true + } + + scan_policy { + configuration_scan_schedule = "0 0 * * SUN" + penetration_scan_schedule = "0 0 * * SUN" + conformance_scan_schedule = "0 0 1 * *" + } +} +``` + +## Schema + +### Required + +- **cloud_account_id** (String) +- **cloud_config** (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--cloud_config)) +- **machine_pool** (Block List, Min: 1) (see [below for nested schema](#nestedblock--machine_pool)) +- **name** (String) + +### Optional + +- **backup_policy** (Block List, Max: 1) (see [below for nested schema](#nestedblock--backup_policy)) +- **cluster_profile** (Block List) (see [below for nested schema](#nestedblock--cluster_profile)) +- **cluster_profile_id** (String, Deprecated) +- **id** (String) The ID of this resource. +- **os_patch_after** (String) +- **os_patch_on_boot** (Boolean) +- **os_patch_schedule** (String) +- **pack** (Block List) (see [below for nested schema](#nestedblock--pack)) +- **scan_policy** (Block List, Max: 1) (see [below for nested schema](#nestedblock--scan_policy)) +- **tags** (Set of String) +- **timeouts** (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-only + +- **cloud_config_id** (String) +- **kubeconfig** (String) + + +### Nested Schema for `cloud_config` + +Required: + +- **dns_servers** (Set of String) +- **domain** (String) +- **project** (String) +- **region** (String) +- **ssh_key** (String) +- **subnet_cidr** (String) + +Optional: + +- **network_id** (String) +- **subnet_id** (String) + + + +### Nested Schema for `machine_pool` + +Required: + +- **count** (Number) +- **instance_type** (String) +- **name** (String) + +Optional: + +- **azs** (Set of String) +- **control_plane** (Boolean) +- **control_plane_as_worker** (Boolean) +- **subnet_id** (String) +- **update_strategy** (String) + + + +### Nested Schema for `backup_policy` + +Required: + +- **backup_location_id** (String) +- **expiry_in_hour** (Number) +- **prefix** (String) +- **schedule** (String) + +Optional: + +- **include_cluster_resources** (Boolean) +- **include_disks** (Boolean) +- **namespaces** (Set of String) + + + +### Nested Schema for `cluster_profile` + +Required: + +- **id** (String) The ID of this resource. + +Optional: + +- **pack** (Block List) (see [below for nested schema](#nestedblock--cluster_profile--pack)) + + +### Nested Schema for `cluster_profile.pack` + +Required: + +- **name** (String) +- **tag** (String) +- **values** (String) + + + + +### Nested Schema for `pack` + +Required: + +- **name** (String) +- **tag** (String) +- **values** (String) + + + +### Nested Schema for `scan_policy` + +Required: + +- **configuration_scan_schedule** (String) +- **conformance_scan_schedule** (String) +- **penetration_scan_schedule** (String) + + + +### Nested Schema for `timeouts` + +Optional: + +- **create** (String) +- **delete** (String) +- **update** (String) + + diff --git a/docs/resources/project.md b/docs/resources/project.md index 87825c31..246761f0 100644 --- a/docs/resources/project.md +++ b/docs/resources/project.md @@ -12,7 +12,7 @@ description: |- ## Example Usage ```terraform -resource "spectrocloud_team" "project" { +resource "spectrocloud_project" "project" { name = "dev1" } ``` diff --git a/examples/e2e/openstack/kubectl.tf b/examples/e2e/openstack/kubectl.tf new file mode 100644 index 00000000..7715bdf2 --- /dev/null +++ b/examples/e2e/openstack/kubectl.tf @@ -0,0 +1,8 @@ +/* +resource "local_file" "kubeconfig" { + content = local.cluster_kubeconfig + filename = "kubeconfig_openstack-1" + file_permission = "0644" + directory_permission = "0755" +} +*/ diff --git a/examples/e2e/openstack/locals.tf b/examples/e2e/openstack/locals.tf new file mode 100644 index 00000000..38cd4e84 --- /dev/null +++ b/examples/e2e/openstack/locals.tf @@ -0,0 +1,5 @@ +/* +locals { + cluster_kubeconfig = spectrocloud_cluster_openstack.cluster.kubeconfig +} +*/ diff --git a/examples/e2e/openstack/providers.tf b/examples/e2e/openstack/providers.tf new file mode 100644 index 00000000..665b8878 --- /dev/null +++ b/examples/e2e/openstack/providers.tf @@ -0,0 +1,34 @@ +terraform { + required_providers { + spectrocloud = { + version = ">= 0.1" + source = "spectrocloud/spectrocloud" + } + } +} + +variable "sc_host" { + description = "Spectro Cloud Endpoint" + default = "api.spectrocloud.com" +} + +variable "sc_username" { + description = "Spectro Cloud Username" +} + +variable "sc_password" { + description = "Spectro Cloud Password" + sensitive = true +} + +variable "sc_project_name" { + description = "Spectro Cloud Project (e.g: Default)" + default = "Default" +} + +provider "spectrocloud" { + host = var.sc_host + username = var.sc_username + password = var.sc_password + project_name = var.sc_project_name +} diff --git a/examples/e2e/openstack/resource_cloudaccount.tf b/examples/e2e/openstack/resource_cloudaccount.tf new file mode 100644 index 00000000..2578e07c --- /dev/null +++ b/examples/e2e/openstack/resource_cloudaccount.tf @@ -0,0 +1,16 @@ +/* +resource "spectrocloud_cloudaccount_openstack" "account" { + name = "openstack-dev" + private_cloud_gateway_id = "60fe915794168c655c0d766a" + openstack_username = var.openstack_username + openstack_password = var.openstack_password + identity_endpoint = var.identity_endpoint + parent_region = var.region + default_domain = var.domain + default_project = var.project +} +*/ + +data "spectrocloud_cloudaccount_openstack" "account" { + name = "openstack-pcg-piyush-dev-2" +} diff --git a/examples/e2e/openstack/resource_cluster.tf b/examples/e2e/openstack/resource_cluster.tf new file mode 100644 index 00000000..5f7d0c43 --- /dev/null +++ b/examples/e2e/openstack/resource_cluster.tf @@ -0,0 +1,38 @@ + +resource "spectrocloud_cluster_openstack" "cluster" { + name = "openstack-piyush-tf-1" + + cluster_profile { + id = data.spectrocloud_cluster_profile.profile.id + } + + cloud_account_id = data.spectrocloud_cloudaccount_openstack.account.id + tags = ["dev"] + + + cloud_config { + domain = "Default" + project = "dev" + region = "RegionOne" + ssh_key = "Spectro2021" + dns_servers = ["10.10.128.8", "8.8.8.8"] + subnet_cidr = "192.168.151.0/24" + } + + machine_pool { + control_plane = true + control_plane_as_worker = true + name = "master-pool" + count = 1 + instance_type = "spectro-xlarge" + azs = ["zone1"] + } + + machine_pool { + name = "worker-basic" + count = 2 + instance_type = "spectro-large" + azs = ["zone1"] + } + +} diff --git a/examples/e2e/openstack/resource_clusterprofile.tf b/examples/e2e/openstack/resource_clusterprofile.tf new file mode 100644 index 00000000..42c29dd5 --- /dev/null +++ b/examples/e2e/openstack/resource_clusterprofile.tf @@ -0,0 +1,4 @@ +# If looking up a cluster profile instead of creating a new one +data "spectrocloud_cluster_profile" "profile" { + name = "openstack-profile" +} \ No newline at end of file diff --git a/examples/e2e/openstack/variables.tf b/examples/e2e/openstack/variables.tf new file mode 100644 index 00000000..4df69e1c --- /dev/null +++ b/examples/e2e/openstack/variables.tf @@ -0,0 +1,8 @@ +variable "openstack_username" {} +variable "openstack_password" {} +variable "project" {} +variable "domain" {} +variable "identity_endpoint" {} +variable "region" {} + +# Cluster diff --git a/examples/resources/spectrocloud_cloudaccount_openstack/providers.tf b/examples/resources/spectrocloud_cloudaccount_openstack/providers.tf new file mode 100644 index 00000000..c0288b7a --- /dev/null +++ b/examples/resources/spectrocloud_cloudaccount_openstack/providers.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + spectrocloud = { + version = ">= 0.1" + source = "spectrocloud/spectrocloud" + } + } +} + +provider "spectrocloud" { + host = var.sc_host + username = var.sc_username + password = var.sc_password + project_name = var.sc_project_name +} diff --git a/examples/resources/spectrocloud_cloudaccount_openstack/resource.tf b/examples/resources/spectrocloud_cloudaccount_openstack/resource.tf new file mode 100644 index 00000000..c8d0cc10 --- /dev/null +++ b/examples/resources/spectrocloud_cloudaccount_openstack/resource.tf @@ -0,0 +1,10 @@ +resource "spectrocloud_cloudaccount_openstack" "account" { + name = "openstack-dev" + private_cloud_gateway_id = "" + openstack_username = var.openstack_username + openstack_password = var.openstack_password + identity_endpoint = var.identity_endpoint + parent_region = var.region + default_domain = var.domain + default_project = var.project +} \ No newline at end of file diff --git a/examples/resources/spectrocloud_cloudaccount_openstack/terraform.template.tfvars b/examples/resources/spectrocloud_cloudaccount_openstack/terraform.template.tfvars new file mode 100644 index 00000000..196ddd1c --- /dev/null +++ b/examples/resources/spectrocloud_cloudaccount_openstack/terraform.template.tfvars @@ -0,0 +1,14 @@ +# Spectro Cloud credentials +sc_host = "" +sc_username = "" +sc_password = "" +sc_project_name = "Default" + +# Openstack Cloud Account credentials +openstack_username = "" +openstack_password = "" + +identity_endpoint = "" +project = "" +domain = "Default" +region = "RegionOne" diff --git a/examples/resources/spectrocloud_cloudaccount_openstack/variables.tf b/examples/resources/spectrocloud_cloudaccount_openstack/variables.tf new file mode 100644 index 00000000..d407ed6e --- /dev/null +++ b/examples/resources/spectrocloud_cloudaccount_openstack/variables.tf @@ -0,0 +1,11 @@ +variable "sc_host" {} +variable "sc_username" {} +variable "sc_password" {} +variable "sc_project_name" {} + +variable "openstack_username" {} +variable "openstack_password" {} +variable "project" {} +variable "domain" {} +variable "region" {} +variable "identity_endpoint" {} diff --git a/examples/resources/spectrocloud_cluster_openstack/providers.tf b/examples/resources/spectrocloud_cluster_openstack/providers.tf new file mode 100644 index 00000000..c0288b7a --- /dev/null +++ b/examples/resources/spectrocloud_cluster_openstack/providers.tf @@ -0,0 +1,15 @@ +terraform { + required_providers { + spectrocloud = { + version = ">= 0.1" + source = "spectrocloud/spectrocloud" + } + } +} + +provider "spectrocloud" { + host = var.sc_host + username = var.sc_username + password = var.sc_password + project_name = var.sc_project_name +} diff --git a/examples/resources/spectrocloud_cluster_openstack/resource.tf b/examples/resources/spectrocloud_cluster_openstack/resource.tf new file mode 100644 index 00000000..2333e935 --- /dev/null +++ b/examples/resources/spectrocloud_cluster_openstack/resource.tf @@ -0,0 +1,65 @@ +data "spectrocloud_cloudaccount_openstack" "account" { + # id = + name = var.cluster_cloud_account_name +} + +data "spectrocloud_cluster_profile" "profile" { + # id = + name = var.cluster_cluster_profile_name +} + +data "spectrocloud_backup_storage_location" "bsl" { + name = var.backup_storage_location_name +} + +resource "spectrocloud_cluster_openstack" "cluster" { + name = "openstack-piyush-tf-1" + + cluster_profile { + id = data.spectrocloud_cluster_profile.profile.id + } + + cloud_account_id = data.spectrocloud_cloudaccount_openstack.account.id + tags = ["dev"] + + + cloud_config { + domain = "Default" + project = "dev" + region = "RegionOne" + ssh_key = "Spectro2021" + dns_servers = ["10.10.128.8", "8.8.8.8"] + subnet_cidr = "192.168.151.0/24" + } + + machine_pool { + control_plane = true + control_plane_as_worker = true + name = "master-pool" + count = 1 + instance_type = "spectro-xlarge" + azs = ["zone1"] + } + + machine_pool { + name = "worker-basic" + count = 2 + instance_type = "spectro-large" + azs = ["zone1"] + } + + backup_policy { + schedule = "0 0 * * SUN" + backup_location_id = data.spectrocloud_backup_storage_location.bsl.id + prefix = "prod-backup" + expiry_in_hour = 7200 + include_disks = true + include_cluster_resources = true + } + + scan_policy { + configuration_scan_schedule = "0 0 * * SUN" + penetration_scan_schedule = "0 0 * * SUN" + conformance_scan_schedule = "0 0 1 * *" + } +} \ No newline at end of file diff --git a/examples/resources/spectrocloud_cluster_openstack/terraform.template.tfvars b/examples/resources/spectrocloud_cluster_openstack/terraform.template.tfvars new file mode 100644 index 00000000..77482cea --- /dev/null +++ b/examples/resources/spectrocloud_cluster_openstack/terraform.template.tfvars @@ -0,0 +1,3 @@ +cluster_cloud_account_name = "dev" +cluster_cluster_profile_name = "ProdOpenStack" +backup_storage_location_name = "prod-backup-s3" diff --git a/examples/resources/spectrocloud_cluster_openstack/variables.tf b/examples/resources/spectrocloud_cluster_openstack/variables.tf new file mode 100644 index 00000000..a5f8a167 --- /dev/null +++ b/examples/resources/spectrocloud_cluster_openstack/variables.tf @@ -0,0 +1,10 @@ +variable "sc_host" {} +variable "sc_username" {} +variable "sc_password" {} +variable "sc_project_name" {} + +variable "cluster_cloud_account_name" {} +variable "cluster_cluster_profile_name" {} +variable "backup_storage_location_name" {} + +variable "cluster_name" {} diff --git a/go.mod b/go.mod index 6188bb9b..43f59090 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/prometheus/common v0.23.0 github.com/robfig/cron v1.2.0 github.com/spectrocloud/gomi v1.9.1-0.20210519044035-5333c9359877 - github.com/spectrocloud/hapi v1.10.1-0.20210603080539-609a48b967df + github.com/spectrocloud/hapi v1.12.1-0.20210727152825-6181583676be ) // replace github.com/spectrocloud/hapi => ../hapi diff --git a/go.sum b/go.sum index 885e3903..7da10b9d 100644 --- a/go.sum +++ b/go.sum @@ -855,8 +855,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spectrocloud/gomi v0.0.0-20201113051324-08a1179400db/go.mod h1:rPAwipFWzjYkTfx44KmQazP1NR2cnHe7HSFZkc63mf4= github.com/spectrocloud/gomi v1.9.1-0.20210519044035-5333c9359877 h1:rbfwoaCqN862q0LVra0UistoS+QRhM3D/e+pNnwy7zw= github.com/spectrocloud/gomi v1.9.1-0.20210519044035-5333c9359877/go.mod h1:rPAwipFWzjYkTfx44KmQazP1NR2cnHe7HSFZkc63mf4= -github.com/spectrocloud/hapi v1.10.1-0.20210603080539-609a48b967df h1:nmSn2ky4SsFTrj6B9r2jaj+2/wn2rni2lt9OPpgiKTY= -github.com/spectrocloud/hapi v1.10.1-0.20210603080539-609a48b967df/go.mod h1:PY/aOnWz7w1hZE9RIEKeSYhuHJDpaeKHfsQ97pQ/yZk= +github.com/spectrocloud/hapi v1.12.1-0.20210727152825-6181583676be h1:SO7rWIFQ8YEHQ7hTiCJa6t25pDsAInZddmr43pAm5q8= +github.com/spectrocloud/hapi v1.12.1-0.20210727152825-6181583676be/go.mod h1:PY/aOnWz7w1hZE9RIEKeSYhuHJDpaeKHfsQ97pQ/yZk= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= diff --git a/pkg/client/cluster_openstack.go b/pkg/client/cluster_openstack.go new file mode 100644 index 00000000..8e1d4109 --- /dev/null +++ b/pkg/client/cluster_openstack.go @@ -0,0 +1,156 @@ +package client + +import ( + hapitransport "github.com/spectrocloud/hapi/apiutil/transport" + "github.com/spectrocloud/hapi/models" + clusterC "github.com/spectrocloud/hapi/spectrocluster/client/v1alpha1" + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client/herr" +) + +func (h *V1alpha1Client) CreateClusterOpenStack(cluster *models.V1alpha1SpectroOpenStackClusterEntity) (string, error) { + client, err := h.getClusterClient() + if err != nil { + return "", err + } + + params := clusterC.NewV1alpha1SpectroClustersOpenStackCreateParamsWithContext(h.ctx).WithBody(cluster) + success, err := client.V1alpha1SpectroClustersOpenStackCreate(params) + if err != nil { + return "", err + } + + return *success.Payload.UID, nil +} + +func (h *V1alpha1Client) CreateCloudAccountOpenStack(account *models.V1alpha1OpenStackAccount) (string, error) { + client, err := h.getClusterClient() + if err != nil { + return "", err + } + + params := clusterC.NewV1alpha1CloudAccountsOpenStackCreateParamsWithContext(h.ctx).WithBody(account) + success, err := client.V1alpha1CloudAccountsOpenStackCreate(params) + if err != nil { + return "", err + } + + return *success.Payload.UID, nil +} + +func (h *V1alpha1Client) CreateMachinePoolOpenStack(cloudConfigId string, machinePool *models.V1alpha1OpenStackMachinePoolConfigEntity) error { + client, err := h.getClusterClient() + if err != nil { + return nil + } + + params := clusterC.NewV1alpha1CloudConfigsOpenStackMachinePoolCreateParamsWithContext(h.ctx).WithConfigUID(cloudConfigId).WithBody(machinePool) + _, err = client.V1alpha1CloudConfigsOpenStackMachinePoolCreate(params) + return err +} + +func (h *V1alpha1Client) UpdateMachinePoolOpenStack(cloudConfigId string, machinePool *models.V1alpha1OpenStackMachinePoolConfigEntity) error { + client, err := h.getClusterClient() + if err != nil { + return nil + } + + params := clusterC.NewV1alpha1CloudConfigsOpenStackMachinePoolUpdateParamsWithContext(h.ctx). + WithConfigUID(cloudConfigId). + WithMachinePoolName(*machinePool.PoolConfig.Name). + WithBody(machinePool) + _, err = client.V1alpha1CloudConfigsOpenStackMachinePoolUpdate(params) + return err +} + +func (h *V1alpha1Client) DeleteMachinePoolOpenStack(cloudConfigId string, machinePoolName string) error { + client, err := h.getClusterClient() + if err != nil { + return nil + } + + params := clusterC.NewV1alpha1CloudConfigsOpenStackMachinePoolDeleteParamsWithContext(h.ctx).WithConfigUID(cloudConfigId).WithMachinePoolName(machinePoolName) + _, err = client.V1alpha1CloudConfigsOpenStackMachinePoolDelete(params) + return err +} + +func (h *V1alpha1Client) GetCloudAccountOpenStack(uid string) (*models.V1alpha1OpenStackAccount, error) { + client, err := h.getClusterClient() + if err != nil { + return nil, err + } + + params := clusterC.NewV1alpha1CloudAccountsOpenStackGetParamsWithContext(h.ctx).WithUID(uid) + success, err := client.V1alpha1CloudAccountsOpenStackGet(params) + if e, ok := err.(*hapitransport.TransportError); ok && e.HttpCode == 404 { + // TODO(saamalik) check with team if this is proper? + return nil, nil + } else if err != nil { + return nil, err + } + + return success.Payload, nil +} + +func (h *V1alpha1Client) GetCloudConfigOpenStack(configUID string) (*models.V1alpha1OpenStackCloudConfig, error) { + client, err := h.getClusterClient() + if err != nil { + return nil, err + } + + params := clusterC.NewV1alpha1CloudConfigsOpenStackGetParamsWithContext(h.ctx).WithConfigUID(configUID) + success, err := client.V1alpha1CloudConfigsOpenStackGet(params) + + if herr.IsNotFound(err) { + return nil, nil + } else if err != nil { + return nil, err + } + + return success.Payload, nil +} + + +func (h *V1alpha1Client) UpdateCloudAccountOpenStack(account *models.V1alpha1OpenStackAccount) error { + client, err := h.getClusterClient() + if err != nil { + return nil + } + + uid := account.Metadata.UID + params := clusterC.NewV1alpha1CloudAccountsOpenStackUpdateParamsWithContext(h.ctx).WithUID(uid).WithBody(account) + _, err = client.V1alpha1CloudAccountsOpenStackUpdate(params) + return err +} + +func (h *V1alpha1Client) DeleteCloudAccountOpenStack(uid string) error { + client, err := h.getClusterClient() + if err != nil { + return nil + } + + params := clusterC.NewV1alpha1CloudAccountsOpenStackDeleteParamsWithContext(h.ctx).WithUID(uid) + _, err = client.V1alpha1CloudAccountsOpenStackDelete(params) + return err +} + +func (h *V1alpha1Client) GetCloudAccountsOpenStack() ([]*models.V1alpha1OpenStackAccount, error) { + client, err := h.getClusterClient() + if err != nil { + return nil, err + } + + params := clusterC.NewV1alpha1CloudAccountsOpenStackListParamsWithContext(h.ctx) + response, err := client.V1alpha1CloudAccountsOpenStackList(params) + if err != nil { + return nil, err + } + + accounts := make([]*models.V1alpha1OpenStackAccount, len(response.Payload.Items)) + for i, account := range response.Payload.Items { + accounts[i] = account + } + + return accounts, nil +} + + diff --git a/spectrocloud/cluster_common.go b/spectrocloud/cluster_common.go index 9d65785a..659b65cb 100644 --- a/spectrocloud/cluster_common.go +++ b/spectrocloud/cluster_common.go @@ -369,6 +369,24 @@ func resourceMachinePoolVsphereHash(v interface{}) int { return int(hash(buf.String())) } +func resourceMachinePoolOpenStackHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%t-", m["control_plane"].(bool))) + buf.WriteString(fmt.Sprintf("%t-", m["control_plane_as_worker"].(bool))) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["count"].(int))) + + buf.WriteString(fmt.Sprintf("%s-", m["instance_type"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["subnet_id"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["update_strategy"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["azs"].(*schema.Set).GoString())) + + return int(hash(buf.String())) +} + + func hash(s string) uint32 { h := fnv.New32a() _, _ = h.Write([]byte(s)) diff --git a/spectrocloud/data_source_cloud_account_openstack.go b/spectrocloud/data_source_cloud_account_openstack.go new file mode 100644 index 00000000..4fc86196 --- /dev/null +++ b/spectrocloud/data_source_cloud_account_openstack.go @@ -0,0 +1,70 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/spectrocloud/hapi/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func dataSourceCloudAccountOpenStack() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceCloudAccountOpenStackRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ExactlyOneOf: []string{"id", "name"}, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ExactlyOneOf: []string{"id", "name"}, + }, + }, + } +} + +func dataSourceCloudAccountOpenStackRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*client.V1alpha1Client) + + // Warning or errors can be collected in a slice type + var diags diag.Diagnostics + + accounts, err := c.GetCloudAccountsOpenStack() + if err != nil { + return diag.FromErr(err) + } + + var account *models.V1alpha1OpenStackAccount + for _, a := range accounts { + + if v, ok := d.GetOk("id"); ok && v.(string) == a.Metadata.UID { + account = a + break + } else if v, ok := d.GetOk("name"); ok && v.(string) == a.Metadata.Name { + account = a + break + } + } + + if account == nil { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Unable to find openstack cloud account", + Detail: "Unable to find the specified openstack cloud account", + }) + return diags + } + + d.SetId(account.Metadata.UID) + d.Set("name", account.Metadata.Name) + + return diags +} + diff --git a/spectrocloud/provider.go b/spectrocloud/provider.go index 84039e19..1bfcd6c3 100644 --- a/spectrocloud/provider.go +++ b/spectrocloud/provider.go @@ -58,6 +58,9 @@ func New(_ string) func() *schema.Provider { "spectrocloud_cloudaccount_gcp": resourceCloudAccountGcp(), "spectrocloud_cluster_gcp": resourceClusterGcp(), + "spectrocloud_cloudaccount_openstack": resourceCloudAccountOpenstack(), + "spectrocloud_cluster_openstack": resourceClusterOpenStack(), + "spectrocloud_cluster_vsphere": resourceClusterVsphere(), "spectrocloud_cluster_import": resourceClusterImport(), @@ -79,6 +82,7 @@ func New(_ string) func() *schema.Provider { "spectrocloud_cloudaccount_azure": dataSourceCloudAccountAzure(), "spectrocloud_cloudaccount_gcp": dataSourceCloudAccountGcp(), "spectrocloud_cloudaccount_vsphere": dataSourceCloudAccountVsphere(), + "spectrocloud_cloudaccount_openstack": dataSourceCloudAccountOpenStack(), "spectrocloud_backup_storage_location": dataSourceBackupStorageLocation(), }, diff --git a/spectrocloud/resource_cloud_account_aws.go b/spectrocloud/resource_cloud_account_aws.go index 96da5781..29813517 100644 --- a/spectrocloud/resource_cloud_account_aws.go +++ b/spectrocloud/resource_cloud_account_aws.go @@ -101,7 +101,7 @@ func resourceCloudAccountAwsRead(_ context.Context, d *schema.ResourceData, m in return diag.FromErr(err) } } else { - if err := d.Set("arn", account.Spec.AwsStsCredentials.Arn); err != nil { + if err := d.Set("arn", account.Spec.Sts.Arn); err != nil { return diag.FromErr(err) } } @@ -162,7 +162,7 @@ func toAwsAccount(d *schema.ResourceData) *models.V1alpha1AwsAccount { account.Spec.SecretKey = d.Get("aws_secret_key").(string) } else if d.Get("type").(string) == "sts" { account.Spec.CredentialType = models.V1alpha1AwsCloudAccountCredentialTypeSts - account.Spec.AwsStsCredentials = &models.V1alpha1AwsStsCredentials{ + account.Spec.Sts = &models.V1alpha1AwsStsCredentials{ Arn: d.Get("arn").(string), ExternalID: d.Get("external_id").(string), } diff --git a/spectrocloud/resource_cloud_account_azure.go b/spectrocloud/resource_cloud_account_azure.go index adf39f76..dca0c67b 100644 --- a/spectrocloud/resource_cloud_account_azure.go +++ b/spectrocloud/resource_cloud_account_azure.go @@ -130,13 +130,13 @@ func resourceCloudAccountAzureDelete(_ context.Context, d *schema.ResourceData, } func toAzureAccount(d *schema.ResourceData) *models.V1alpha1AzureAccount { - clientSecret := strfmt.Password(d.Get("azure_client_secret").(string)) + clientSecret := strfmt.Password(d.Get("azure_client_secret").(string)).String() account := &models.V1alpha1AzureAccount{ Metadata: &models.V1ObjectMeta{ Name: d.Get("name").(string), UID : d.Id(), }, - Spec: &models.V1alpha1AzureAccountSpec{ + Spec: &models.V1alpha1AzureCloudAccount{ ClientID: ptr.StringPtr(d.Get("azure_client_id").(string)), ClientSecret: &clientSecret, TenantID: ptr.StringPtr(d.Get("azure_tenant_id").(string)), diff --git a/spectrocloud/resource_cloud_account_openstack.go b/spectrocloud/resource_cloud_account_openstack.go new file mode 100644 index 00000000..a694ff73 --- /dev/null +++ b/spectrocloud/resource_cloud_account_openstack.go @@ -0,0 +1,187 @@ +package spectrocloud + +import ( + "context" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/hapi/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" +) + +func resourceCloudAccountOpenstack() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceCloudAccountOpenStackCreate, + ReadContext: resourceCloudAccountOpenStackRead, + UpdateContext: resourceCloudAccountOpenStackUpdate, + DeleteContext: resourceCloudAccountOpenStackDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "private_cloud_gateway_id": { + Type: schema.TypeString, + Required: true, + }, + "openstack_username": { + Type: schema.TypeString, + Required: true, + }, + "openstack_password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + "identity_endpoint": { + Type: schema.TypeString, + Required: true, + }, + "openstack_allow_insecure": { + Type: schema.TypeBool, + Optional: true, + }, + "ca_certificate": { + Type: schema.TypeString, + Optional: true, + }, + "parent_region": { + Type: schema.TypeString, + Required: true, + }, + "default_domain": { + Type: schema.TypeString, + Required: true, + }, + "default_project": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceCloudAccountOpenStackCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*client.V1alpha1Client) + + // Warning or errors can be collected in a slice type + var diags diag.Diagnostics + + account := toOpenStackAccount(d) + + uid, err := c.CreateCloudAccountOpenStack(account) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(uid) + + resourceCloudAccountOpenStackRead(ctx, d, m) + + return diags +} + +func resourceCloudAccountOpenStackRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*client.V1alpha1Client) + + var diags diag.Diagnostics + + uid := d.Id() + + account, err := c.GetCloudAccountOpenStack(uid) + if err != nil { + return diag.FromErr(err) + } else if account == nil { + d.SetId("") + return diags + } + + if err := d.Set("name", account.Metadata.Name); err != nil { + return diag.FromErr(err) + } + if err := d.Set("private_cloud_gateway_id", account.Metadata.Annotations[OverlordUID]); err != nil { + return diag.FromErr(err) + } + if err := d.Set("identity_endpoint", *account.Spec.IdentityEndpoint); err != nil { + return diag.FromErr(err) + } + if err := d.Set("openstack_username", *account.Spec.Username); err != nil { + return diag.FromErr(err) + } + if err := d.Set("openstack_allow_insecure", account.Spec.Insecure); err != nil { + return diag.FromErr(err) + } + if err := d.Set("ca_certificate", account.Spec.CaCert); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("parent_region", account.Spec.ParentRegion); err != nil { + return diag.FromErr(err) + } + if err := d.Set("default_domain", account.Spec.DefaultDomain); err != nil { + return diag.FromErr(err) + } + if err := d.Set("default_project", account.Spec.DefaultProject); err != nil { + return diag.FromErr(err) + } + + + return diags +} + +// +func resourceCloudAccountOpenStackUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*client.V1alpha1Client) + + // Warning or errors can be collected in a slice type + var diags diag.Diagnostics + + account := toOpenStackAccount(d) + + err := c.UpdateCloudAccountOpenStack(account) + if err != nil { + return diag.FromErr(err) + } + + resourceCloudAccountOpenStackRead(ctx, d, m) + + return diags +} + +func resourceCloudAccountOpenStackDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*client.V1alpha1Client) + + var diags diag.Diagnostics + + cloudAccountID := d.Id() + + err := c.DeleteCloudAccountOpenStack(cloudAccountID) + if err != nil { + return diag.FromErr(err) + } + + return diags +} + +func toOpenStackAccount(d *schema.ResourceData) *models.V1alpha1OpenStackAccount { + + account := &models.V1alpha1OpenStackAccount{ + Metadata: &models.V1ObjectMeta{ + Name: d.Get("name").(string), + UID: d.Id(), + }, + + Spec: &models.V1alpha1OpenStackCloudAccount{ + CaCert: d.Get("ca_certificate").(string), + DefaultDomain: d.Get("default_domain").(string), + DefaultProject: d.Get("default_project").(string), + IdentityEndpoint: ptr.StringPtr(d.Get("identity_endpoint").(string)), + Insecure: d.Get("openstack_allow_insecure").(bool), + ParentRegion: d.Get("parent_region").(string), + Password: ptr.StringPtr(d.Get("openstack_password").(string)), + Username: ptr.StringPtr(d.Get("openstack_username").(string)), + }, + } + + return account +} \ No newline at end of file diff --git a/spectrocloud/resource_cluster_openstack.go b/spectrocloud/resource_cluster_openstack.go new file mode 100644 index 00000000..a12d3a47 --- /dev/null +++ b/spectrocloud/resource_cluster_openstack.go @@ -0,0 +1,600 @@ +package spectrocloud + +import ( + "context" + "log" + "sort" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/hapi/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/pkg/client" +) + +func resourceClusterOpenStack() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceClusterOpenStackCreate, + ReadContext: resourceClusterOpenStackRead, + UpdateContext: resourceClusterOpenStackUpdate, + DeleteContext: resourceClusterDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(180 * time.Minute), + Update: schema.DefaultTimeout(180 * time.Minute), + Delete: schema.DefaultTimeout(180 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "cluster_profile_id": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Switch to cluster_profile", + }, + "cluster_profile": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"cluster_profile_id", "pack"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "cloud_account_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "cloud_config_id": { + Type: schema.TypeString, + Computed: true, + }, + "os_patch_on_boot": { + Type: schema.TypeBool, + Optional: true, + }, + "os_patch_schedule": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: validateOsPatchSchedule, + }, + "os_patch_after": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: validateOsPatchOnDemandAfter, + }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + }, + "cloud_config": { + Type: schema.TypeList, + ForceNew: true, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + }, + "region": { + Type: schema.TypeString, + Required: true, + }, + "project": { + Type: schema.TypeString, + Required: true, + }, + "ssh_key": { + Type: schema.TypeString, + Required: true, + }, + "network_id": { + Type: schema.TypeString, + Optional: true, + }, + "subnet_id": { + Type: schema.TypeString, + Optional: true, + }, + "dns_servers": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subnet_cidr": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "pack": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "tag": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "machine_pool": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "control_plane": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "control_plane_as_worker": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "count": { + Type: schema.TypeInt, + Required: true, + }, + "update_strategy": { + Type: schema.TypeString, + Optional: true, + Default: "RollingUpdateScaleOut", + }, + "instance_type": { + Type: schema.TypeString, + Required: true, + }, + "azs": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "subnet_id": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "backup_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Required: true, + }, + "backup_location_id": { + Type: schema.TypeString, + Required: true, + }, + "schedule": { + Type: schema.TypeString, + Required: true, + }, + "expiry_in_hour": { + Type: schema.TypeInt, + Required: true, + }, + "include_disks": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "include_cluster_resources": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "namespaces": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "scan_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "configuration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "penetration_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + "conformance_scan_schedule": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceClusterOpenStackCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*client.V1alpha1Client) + + // Warning or errors can be collected in a slice type + var diags diag.Diagnostics + + cluster := toOpenStackCluster(d) + + uid, err := c.CreateClusterOpenStack(cluster) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(uid) + + stateConf := &resource.StateChangeConf{ + Pending: resourceClusterCreatePendingStates, + Target: []string{"Running"}, + Refresh: resourceClusterStateRefreshFunc(c, d.Id()), + Timeout: d.Timeout(schema.TimeoutCreate) - 1*time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + // Wait, catching any errors + _, err = stateConf.WaitForStateContext(ctx) + if err != nil { + return diag.FromErr(err) + } + + resourceClusterOpenStackRead(ctx, d, m) + + return diags +} + +func toOpenStackCluster(d *schema.ResourceData) *models.V1alpha1SpectroOpenStackClusterEntity { + + cloudConfig := d.Get("cloud_config").([]interface{})[0].(map[string]interface{}) + + cluster := &models.V1alpha1SpectroOpenStackClusterEntity{ + Metadata: &models.V1ObjectMeta{ + Name: d.Get("name").(string), + UID: d.Id(), + Labels: toTags(d), + }, + Spec: &models.V1alpha1SpectroOpenStackClusterEntitySpec{ + CloudAccountUID: ptr.StringPtr(d.Get("cloud_account_id").(string)), + Profiles: toProfiles(d), + Policies: toPolicies(d), + CloudConfig: &models.V1alpha1OpenStackClusterConfig{ + Region: cloudConfig["region"].(string), + SSHKeyName: cloudConfig["ssh_key"].(string), + Domain: &models.V1alpha1OpenStackResource { + ID: cloudConfig["domain"].(string), + Name: cloudConfig["domain"].(string), + }, + Network: &models.V1alpha1OpenStackResource { + ID: cloudConfig["network_id"].(string), + }, + Project: &models.V1alpha1OpenStackResource { + Name: cloudConfig["project"].(string), + }, + Subnet: &models.V1alpha1OpenStackResource { + ID: cloudConfig["subnet_id"].(string), + }, + NodeCidr: cloudConfig["subnet_cidr"].(string), + }, + }, + } + + if cloudConfig["dns_servers"] != nil { + dnsServers := make([]string, 0) + for _, dns := range cloudConfig["dns_servers"].(*schema.Set).List() { + dnsServers = append(dnsServers, dns.(string)) + } + + cluster.Spec.CloudConfig.DNSNameservers = dnsServers + } + + machinePoolConfigs := make([]*models.V1alpha1OpenStackMachinePoolConfigEntity, 0) + + for _, machinePool := range d.Get("machine_pool").([]interface{}) { + mp := toMachinePoolOpenStack(machinePool) + machinePoolConfigs = append(machinePoolConfigs, mp) + } + + // sort + sort.SliceStable(machinePoolConfigs, func(i, j int) bool { + return machinePoolConfigs[i].PoolConfig.IsControlPlane + }) + + cluster.Spec.Machinepoolconfig = machinePoolConfigs + cluster.Spec.ClusterConfig = toClusterConfig(d) + + return cluster +} + + +//goland:noinspection GoUnhandledErrorResult +func resourceClusterOpenStackRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*client.V1alpha1Client) + + var diags diag.Diagnostics + + uid := d.Id() + + cluster, err := c.GetCluster(uid) + if err != nil { + return diag.FromErr(err) + } else if cluster == nil { + // Deleted - Terraform will recreate it + d.SetId("") + return diags + } + + configUID := cluster.Spec.CloudConfigRef.UID + d.Set("cloud_config_id", configUID) + + if err := d.Set("tags", flattenTags(cluster.Metadata.Labels)); err != nil { + return diag.FromErr(err) + } + + var config *models.V1alpha1OpenStackCloudConfig + if config, err = c.GetCloudConfigOpenStack(configUID); err != nil { + return diag.FromErr(err) + } + + kubecfg, err := c.GetClusterKubeConfig(uid) + if err != nil { + return diag.FromErr(err) + } + if err := d.Set("kubeconfig", kubecfg); err != nil { + return diag.FromErr(err) + } + + mp := flattenMachinePoolConfigsOpenStack(config.Spec.MachinePoolConfig) + if err := d.Set("machine_pool", mp); err != nil { + return diag.FromErr(err) + } + + if policy, err := c.GetClusterBackupConfig(d.Id()); err != nil { + return diag.FromErr(err) + } else if policy != nil && policy.Spec.Config != nil { + if err := d.Set("backup_policy", flattenBackupPolicy(policy.Spec.Config)); err != nil { + return diag.FromErr(err) + } + } + + if policy, err := c.GetClusterScanConfig(d.Id()); err != nil { + return diag.FromErr(err) + } else if policy != nil && policy.Spec.DriverSpec != nil { + if err := d.Set("scan_policy", flattenScanPolicy(policy.Spec.DriverSpec)); err != nil { + return diag.FromErr(err) + } + } + + return diags +} + +func flattenMachinePoolConfigsOpenStack(machinePools []*models.V1alpha1OpenStackMachinePoolConfig) []interface{} { + + if machinePools == nil { + return make([]interface{}, 0) + } + + ois := make([]interface{}, 0) + + for _, machinePool := range machinePools { + oi := make(map[string]interface{}) + + oi["control_plane"] = machinePool.IsControlPlane + oi["control_plane_as_worker"] = machinePool.UseControlPlaneAsWorker + oi["name"] = machinePool.Name + oi["count"] = int(machinePool.Size) + oi["update_strategy"] = machinePool.UpdateStrategy.Type + + oi["subnet_id"] = machinePool.Subnet.ID + oi["azs"] = machinePool.Azs + oi["instance_type"] = machinePool.FlavorConfig.Name + + ois = append(ois, oi) + } + + return ois +} + +func resourceClusterOpenStackUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + c := m.(*client.V1alpha1Client) + + // Warning or errors can be collected in a slice type + var diags diag.Diagnostics + + cloudConfigId := d.Get("cloud_config_id").(string) + + if d.HasChange("machine_pool") { + oraw, nraw := d.GetChange("machine_pool") + if oraw == nil { + oraw = new(schema.Set) + } + if nraw == nil { + nraw = new(schema.Set) + } + + os := oraw.([]interface{}) + ns := nraw.([]interface{}) + + osMap := make(map[string]interface{}) + for _, mp := range os { + machinePool := mp.(map[string]interface{}) + osMap[machinePool["name"].(string)] = machinePool + } + + for _, mp := range ns { + machinePoolResource := mp.(map[string]interface{}) + name := machinePoolResource["name"].(string) + hash := resourceMachinePoolOpenStackHash(machinePoolResource) + + machinePool := toMachinePoolOpenStack(machinePoolResource) + + var err error + if oldMachinePool, ok := osMap[name]; !ok { + log.Printf("Create machine pool %s", name) + err = c.CreateMachinePoolOpenStack(cloudConfigId, machinePool) + } else if hash != resourceMachinePoolOpenStackHash(oldMachinePool) { + log.Printf("Change in machine pool %s", name) + err = c.UpdateMachinePoolOpenStack(cloudConfigId, machinePool) + } + + if err != nil { + return diag.FromErr(err) + } + + // Processed (if exists) + delete(osMap, name) + } + + // Deleted old machine pools + for _, mp := range osMap { + machinePool := mp.(map[string]interface{}) + name := machinePool["name"].(string) + log.Printf("Deleted machine pool %s", name) + if err := c.DeleteMachinePoolOpenStack(cloudConfigId, name); err != nil { + return diag.FromErr(err) + } + } + } + + if d.HasChanges("cluster_profile") { + if err := updateProfiles(c, d); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("backup_policy") { + if err := updateBackupPolicy(c, d); err != nil { + return diag.FromErr(err) + } + } + + if d.HasChange("scan_policy") { + if err := updateScanPolicy(c, d); err != nil { + return diag.FromErr(err) + } + } + + resourceClusterOpenStackRead(ctx, d, m) + + return diags +} + + +func toMachinePoolOpenStack(machinePool interface{}) *models.V1alpha1OpenStackMachinePoolConfigEntity { + m := machinePool.(map[string]interface{}) + + labels := make([]string, 0) + controlPlane := m["control_plane"].(bool) + controlPlaneAsWorker := m["control_plane_as_worker"].(bool) + if controlPlane { + labels = append(labels, "master") + } + + azs := make([]string, 0) + for _, val := range m["azs"].(*schema.Set).List() { + azs = append(azs, val.(string)) + } + + mp := &models.V1alpha1OpenStackMachinePoolConfigEntity{ + CloudConfig: &models.V1alpha1OpenStackMachinePoolCloudConfigEntity{ + Azs: azs, + Subnet: &models.V1alpha1OpenStackResource { + ID: m["subnet_id"].(string), + }, + FlavorConfig: &models.V1alpha1OpenstackFlavorConfig { + Name: ptr.StringPtr(m["instance_type"].(string)), + }, + }, + PoolConfig: &models.V1alpha1MachinePoolConfigEntity{ + IsControlPlane: controlPlane, + Labels: labels, + Name: ptr.StringPtr(m["name"].(string)), + Size: ptr.Int32Ptr(int32(m["count"].(int))), + UpdateStrategy: &models.V1alpha1UpdateStrategy{ + Type: m["update_strategy"].(string), + }, + UseControlPlaneAsWorker: controlPlaneAsWorker, + }, + } + return mp +} + +