diff --git a/Jenkinsfile b/Jenkinsfile index 7b6906ab..6d606682 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,7 +1,7 @@ pipeline { agent { docker { - image 'public.ecr.aws/bitnami/golang:1.22.3' + image 'public.ecr.aws/bitnami/golang:1.22.5' args '-u root:sudo' reuseNode false } diff --git a/Makefile b/Makefile index 28aba9f8..91c186a1 100644 --- a/Makefile +++ b/Makefile @@ -51,6 +51,10 @@ test: testacc: TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m + +fwgen: + bash internal/scripts/fwgen.sh + push: aws s3 cp ./bin/${BINARY}_${VERSION}_darwin_amd64 s3://$(BUCKET_NAME)/$(TAG)/$(BUILD_NUMBER)/${BINARY}_${VERSION}_darwin_amd64 --no-progress aws s3 cp ./bin/${BINARY}_${VERSION}_freebsd_386 s3://$(BUCKET_NAME)/$(TAG)/$(BUILD_NUMBER)/${BINARY}_${VERSION}_freebsd_386 --no-progress diff --git a/docs/data-sources/mks_cluster.md b/docs/data-sources/mks_cluster.md new file mode 100644 index 00000000..c3f8c756 --- /dev/null +++ b/docs/data-sources/mks_cluster.md @@ -0,0 +1,252 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "rafay_mks_cluster Data Source - rafay" +subcategory: "" +description: |- + +--- + +# rafay_mks_cluster (Data Source) +```terraform +data "rafay_mks_cluster" "mks-sample-cluster" { + metadata = { + name = "mks-sample-cluster" + project = "sample-project" + } +} +``` + + + +## Schema + +### Required + +- `metadata` (Attributes) metadata of the resource (see [below for nested schema](#nestedatt--metadata)) + +### Read-Only + +- `api_version` (String) api version +- `kind` (String) kind +- `spec` (Attributes) cluster specification (see [below for nested schema](#nestedatt--spec)) + + +### Nested Schema for `metadata` + +Required: + +- `name` (String) name of the resource +- `project` (String) Project of the resource + +Read-Only: + +- `annotations` (Map of String) annotations of the resource +- `description` (String) description of the resource +- `labels` (Map of String) labels of the resource + + + +### Nested Schema for `spec` + +Read-Only: + +- `blueprint` (Attributes) (see [below for nested schema](#nestedatt--spec--blueprint)) +- `cloud_credentials` (String) The credentials to be used to ssh into the Clusster Nodes +- `config` (Attributes) MKS V3 cluster specification (see [below for nested schema](#nestedatt--spec--config)) +- `proxy` (Attributes) (see [below for nested schema](#nestedatt--spec--proxy)) +- `sharing` (Attributes) (see [below for nested schema](#nestedatt--spec--sharing)) +- `system_components_placement` (Attributes) (see [below for nested schema](#nestedatt--spec--system_components_placement)) +- `type` (String) The type of the cluster this spec corresponds to + + +### Nested Schema for `spec.blueprint` + +Read-Only: + +- `name` (String) +- `version` (String) Version of the blueprint + + + +### Nested Schema for `spec.config` + +Read-Only: + +- `auto_approve_nodes` (Boolean) Auto approves incoming nodes by default +- `cluster_ssh` (Attributes) SSH config for all the nodes within the cluster (see [below for nested schema](#nestedatt--spec--config--cluster_ssh)) +- `dedicated_control_plane` (Boolean) Select this option for preventing scheduling of user workloads on Control Plane nodes +- `high_availability` (Boolean) Select this option for highly available control plane. Minimum three control plane nodes are required +- `kubernetes_upgrade` (Attributes) (see [below for nested schema](#nestedatt--spec--config--kubernetes_upgrade)) +- `kubernetes_version` (String) Kubernetes version of the Control Plane +- `installer_ttl` (Integer) By default, this setting allows ttl configuration for installer config. If not provided by default will set ttl to 365 days. +- `location` (String) The data center location where the cluster nodes will be launched +- `network` (Attributes) MKS Cluster Network Specification (see [below for nested schema](#nestedatt--spec--config--network)) +- `nodes` (Attributes Map) holds node configuration for the cluster (see [below for nested schema](#nestedatt--spec--config--nodes)) + + +### Nested Schema for `spec.config.cluster_ssh` + +Read-Only: + +- `passphrase` (String) Provide ssh passphrase +- `port` (String) Provide ssh port +- `private_key_path` (String) Provide local path to the private key +- `username` (String) Provide the ssh username + + + +### Nested Schema for `spec.config.kubernetes_upgrade` + +Read-Only: + +- `params` (Attributes) (see [below for nested schema](#nestedatt--spec--config--kubernetes_upgrade--params)) +- `strategy` (String) Kubernetes upgrade strategy for worker nodes and Valid options are: concurrent/sequential + + +### Nested Schema for `spec.config.kubernetes_upgrade.params` + +Read-Only: + +- `worker_concurrency` (String) It can be number or percentage + + + + +### Nested Schema for `spec.config.network` + +Read-Only: + +- `cni` (Attributes) MKS Cluster CNI Specification (see [below for nested schema](#nestedatt--spec--config--network--cni)) +- `ipv6` (Attributes) (see [below for nested schema](#nestedatt--spec--config--network--ipv6)) +- `pod_subnet` (String) Kubernetes pod subnet +- `service_subnet` (String) Kubernetes service subnet + + +### Nested Schema for `spec.config.network.cni` + +Read-Only: + +- `name` (String) Provide the CNI name, e.g., Calico or Cilium +- `version` (String) Provide the CNI version, e.g., 3.26.1 + + + +### Nested Schema for `spec.config.network.ipv6` + +Read-Only: + +- `pod_subnet` (String) Kubernetes pod subnet +- `service_subnet` (String) Kubernetes service subnet + + + + +### Nested Schema for `spec.config.nodes` + +Read-Only: + +- `arch` (String) System Architecture of the node +- `hostname` (String) Hostname of the node +- `interface` (String) Interface to be used on the node +- `labels` (Map of String) labels to be added to the node +- `operating_system` (String) OS of the node +- `private_ip` (String) Private ip address of the node +- `roles` (Set of String) Valid roles are: 'ControlPlane', 'Worker', 'Storage' +- `ssh` (Attributes) MKS Node SSH definition (see [below for nested schema](#nestedatt--spec--config--nodes--ssh)) +- `taints` (Attributes Set) taints to be added to the node (see [below for nested schema](#nestedatt--spec--config--nodes--taints)) + + +### Nested Schema for `spec.config.nodes.ssh` + +Read-Only: + +- `ip_address` (String) Use this to override node level ssh details +- `passphrase` (String) SSH Passphrase +- `port` (String) SSH Port +- `private_key_path` (String) Specify Path to SSH private key +- `username` (String) SSH Username + + + +### Nested Schema for `spec.config.nodes.taints` + +Read-Only: + +- `effect` (String) +- `key` (String) +- `value` (String) + + + + + +### Nested Schema for `spec.proxy` + +Read-Only: + +- `allow_insecure_bootstrap` (Boolean) +- `bootstrap_ca` (String) +- `enabled` (Boolean) +- `http_proxy` (String) +- `https_proxy` (String) +- `no_proxy` (String) +- `proxy_auth` (String) + + + +### Nested Schema for `spec.sharing` + +Read-Only: + +- `enabled` (Boolean) +- `projects` (Attributes Set) (see [below for nested schema](#nestedatt--spec--sharing--projects)) + + +### Nested Schema for `spec.sharing.projects` + +Read-Only: + +- `name` (String) + + + + +### Nested Schema for `spec.system_components_placement` + +Read-Only: + +- `daemon_set_override` (Attributes) (see [below for nested schema](#nestedatt--spec--system_components_placement--daemon_set_override)) +- `node_selector` (Map of String) +- `tolerations` (Attributes Set) (see [below for nested schema](#nestedatt--spec--system_components_placement--tolerations)) + + +### Nested Schema for `spec.system_components_placement.daemon_set_override` + +Read-Only: + +- `daemon_set_tolerations` (Attributes Set) (see [below for nested schema](#nestedatt--spec--system_components_placement--daemon_set_override--daemon_set_tolerations)) +- `node_selection_enabled` (Boolean) + + +### Nested Schema for `spec.system_components_placement.daemon_set_override.daemon_set_tolerations` + +Read-Only: + +- `effect` (String) +- `key` (String) +- `operator` (String) +- `toleration_seconds` (Number) +- `value` (String) + + + + +### Nested Schema for `spec.system_components_placement.tolerations` + +Read-Only: + +- `effect` (String) +- `key` (String) +- `operator` (String) +- `toleration_seconds` (Number) +- `value` (String) diff --git a/docs/index.md b/docs/index.md index b9c4d0d1..7dc462b0 100644 --- a/docs/index.md +++ b/docs/index.md @@ -21,6 +21,7 @@ There are examples in the [GitHub repo](https://github.com/RafaySystems/terrafor | Resource | Version Released | | ----------------------------------------- | ---------------- | +| `rafay_mks_cluster` | 1.1.36 | `rafay_driver` | 1.1.22 | | `rafay_environment` | 1.1.18 | | `rafay_environment_template` | 1.1.18 | diff --git a/docs/resources/aks_cluster.md b/docs/resources/aks_cluster.md index a7cd7c38..d0be3308 100644 --- a/docs/resources/aks_cluster.md +++ b/docs/resources/aks_cluster.md @@ -127,6 +127,119 @@ resource "rafay_aks_cluster" "demo-terraform" { +# rafay_aks_cluster (Azure CNI Overlay with Workload Identity) + +## Example Usage + +--- + +```terraform +resource "rafay_aks_cluster" "demo-terraform" { + apiversion = "rafay.io/v1alpha1" + kind = "Cluster" + metadata { + name = "demo-terraform" + project = "terraform" + } + spec { + type = "aks" + blueprint = "default-aks" + cloudprovider = "testuser-azure" + cluster_config { + apiversion = "rafay.io/v1alpha1" + kind = "aksClusterConfig" + metadata { + name = "demo-terraform" + } + spec { + resource_group_name = "testuser-terraform" + managed_cluster { + apiversion = "2023-11-01" + identity { + type = "SystemAssigned" + } + location = "centralindia" + properties { + api_server_access_profile { + enable_private_cluster = true + } + dns_prefix = "testuser-test-dns" + kubernetes_version = "1.28.5" + network_profile { + network_plugin = "azure" + load_balancer_sku = "standard" + network_plugin_mode = "overlay" + pod_cidr = "192.168.0.0/16" + service_cidr = "10.0.0.0/16" + dns_service_ip = "10.0.0.10" + } + power_state { + code = "Running" + } + oidc_issuer_profile { + enabled = true + } + security_profile { + workload_identity { + enabled = true + } + } + } + type = "Microsoft.ContainerService/managedClusters" + } + node_pools { + apiversion = "2023-11-01" + name = "primary" + location = "centralindia" + properties { + count = 2 + enable_auto_scaling = true + max_count = 2 + max_pods = 40 + min_count = 1 + mode = "System" + orchestrator_version = "1.28.5" + os_type = "Linux" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + node_labels = { + app = "infra" + dedicated = "true" + } + node_taints = ["app=infra:PreferNoSchedule"] + } + type = "Microsoft.ContainerService/managedClusters/agentPools" + } + } + } + system_components_placement { + node_selector = { + app = "infra" + dedicated = "true" + } + tolerations { + effect = "PreferNoSchedule" + key = "app" + operator = "Equal" + value = "infra" + } + + daemonset_override { + node_selection_enabled = false + tolerations { + key = "app1dedicated" + value = true + effect = "NoSchedule" + operator = "Equal" + } + } + } + } +} +``` + +--- + ## Argument Reference ### Required @@ -232,7 +345,7 @@ resource "rafay_aks_cluster" "demo-terraform" { - `managed_cluster` - (Block List) The AKS managed cluster. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster)) - `node_pools` - (Block List, Min: 1) The AKS node pool. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--node_pools)) - `maintenance_configurations` - (Block List, Min: 0) The AKS Maintenance Configurations used to configure Auto-Upgrade Profile Schedule. (See [below for nested schema] -(#nestedblock--spec--cluster_config--spec--maintenance_configurations)) + (#nestedblock--spec--cluster_config--spec--maintenance_configurations)) - `resource_group_name` - (String) The AKS resource group for the cluster. @@ -275,11 +388,13 @@ resource "rafay_aks_cluster" "demo-terraform" { - `linux_profile` - (Block List) The configurations for linux profile. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster--properties--linux_profile)) - `network_profile` - (Block List) Profile of the network configuration. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster--properties--network_profile)) - `node_resource_group` - (String) The name of the resource group containing agent pool nodes. +- `oidc_issuer_profile` (Block List) Profile of OpenID Connect configuration. (see [below for nested schema](#nestedblock--spec--cluster_config--spec--subscription_id--properties--oidc_issuer_profile)) - `pod_identity_profile` - (Block List) Azure Active Directory (Azure AD) pod-managed identities use Kubernetes primitives to associate managed identities for Azure resources and identities in Azure AD with pods. See [Use Azure AD Pod-Managed Identities](https://learn.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) for more information. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster--properties--pod_identity_profile)) - `power_state` - (Block List) Cluster Power State to Stop/Start the AKS cluster. See [Stop and start an Azure Kubernetes Service (AKS) cluster](https://learn.microsoft.com/en-us/azure/aks/start-stop-cluster?tabs=azure-cli) for more information. +- `security_profile` - (Block List) The security profile of the managed cluster. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster--properties--security_profile)) - `service_principal_profile` - (Block List) Information about a service principal identity for the cluster to use for manipulating Azure APIs. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster--properties--service_principal_profile)) - `auto_upgrade_profile` - (Block List) Configure Auto-Upgrade Profile to handle automatic k8s version upgrade and node os image upgrade. See [below for nested schema] -(#nestedblock--spec--cluster_config--spec--managed_cluster--properties--auto_upgrade_profile) + (#nestedblock--spec--cluster_config--spec--managed_cluster--properties--auto_upgrade_profile) @@ -533,6 +648,7 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.managed_cluster.properties.auto_upgrade_profile` **_Required_** + - `upgrade_channel` - (String) Configure channel with one of the following values [none, rapid, stable, patch, node-image] - `node_os_upgrade_channel` - (String) Configure channel with one of the following values [None, NodeImage, SecurityPatch] @@ -639,6 +755,7 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations` **_Required_** + - `api_version` - (String) The AKS maintenance configuration API version. The recommended value is `2024-01-01`. - `name` - (String) The AKS maintenance configuration name. It should be one of the following values [default, aksManagedNodeOSUpgradeSchedule, aksManagedAutoUpgradeSchedule]. - `type` - (String) The AKS maintenance configuration type. The supported value is `Microsoft.ContainerService/managedClusters/maintenanceConfigurations`. @@ -649,9 +766,11 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties` **_Required_** + - `maintenance_window` - (Block List, Min: 1) Configure maintenance window in a maintenance configuration. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window)) **_Optional_** + - `not_allowed_time` - (Block List) Configure start and end time for maintenance configuration to not run. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--not_allowed_time)) - `time_in_week` - (Block List) Configure day and hour_slots for maintenance configuration to run. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--time_in_week)) @@ -660,6 +779,7 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.not_allowed_time` **_Optional_** + - `end` - (String) The end of a time span. - `start` - (String) The start of a time span. @@ -668,19 +788,22 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.time_in_week` **_Optional_** + - `day` - (String) Specify day of the week. Value should be one of the following [Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday] -- `hourSlot` - (Block List) Each integer hour represents a time range beginning at 0m after the hour ending at the next hour (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 UTC. Specifying [0, 1] means the 00:00 - 02:00 UTC time range. Value should be between 0 to 23 +- `hourSlot` - (Block List) Each integer hour represents a time range beginning at 0m after the hour ending at the next hour (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 UTC. Specifying [0, 1] means the 00:00 - 02:00 UTC time range. Value should be between 0 to 23 ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window` **_Required_** + - `duration_hours` - (Number) Configure the value between 4 to 24 hours. - `start_time` - (String) Configure the start time of maintenance window. Accepted values should be in format of 'HH:MM'. - `schedule` - (Block List) Recurrence schedule for the maintenance window. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--schedule)) **_Optional_** + - `not_allowed_dates` - (Block List) Date ranges on which upgrade is not allowed. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--not_allowed_dates)) - `start_date` - (String) The date the maintenance window activates. If the current date is before this date, the maintenance window is inactive and will not be used for upgrades. If not specified, the maintenance window will be active right away. - `utc_offset` - (String) The UTC offset in format +/-HH:mm. If not specified, the default is '+00:00'. @@ -690,6 +813,7 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule` **_Optional_** + - `absolute_monthly` - (Block List) For schedules like: 'recur every month on the 10th' or 'recur every 4 months on the 15th'. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--schedule--absolute_monthly)) - `relative_monthly` - (Block List) For schedules like: 'recur every month on the first Wednesday' or 'recur every 4 months on last Friday'. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--schedule--relative_monthly)) - `daily` - (Block List) For schedules like: 'recur every 2 days'. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--schedule--daily)) @@ -700,6 +824,7 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule.absolute_monthly` **_Required_** + - `day_of_month` - (Number) The date of the Month. Value should be between 1 to 31. - `interval_months` - (Number) Specifies the number of months between each set of occurrences. Value should be between 1 to 6. @@ -708,6 +833,7 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule.relative_monthly` **_Required_** + - `day_of_week` - (String) The day of the week. Value should be one of the following [Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday] - `interval_months` - (Number) Specifies the number of months between each set of occurrences. Value should be between 1 to 6. - `week_index` - (String) Specifies on which week of the month the dayOfWeek applies. Value should be one of the following [First, Second, Third, Fourth, Last] @@ -717,6 +843,7 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule.daily` **_Required_** + - `intervalDays` - (Number) Specifies the number of days between each set of occurrences. Value should be between 1 to 7. @@ -724,6 +851,7 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule.weekly` **_Required_** + - `day_of_week` - (Number) Specifies on which day of the week the maintenance occurs. Value should be one of the following [Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday] - `interval_weeks` - (Number) Specifies the number of weeks between each set of occurrences. Value should be between 1 to 4. @@ -732,10 +860,10 @@ resource "rafay_aks_cluster" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.not_allowed_dates` **_Required_** + - `end` - (String) End date for the date span. - `start` - (String) Start date for the date span. - ### Nested Schema for `spec.sharing` @@ -763,6 +891,30 @@ resource "rafay_aks_cluster" "demo-terraform" { - `delete` - (String) Sets the timeout duration for deleting a resource. The default timeout is 10 minutes. - `update` - (String) Sets the timeout duration for updating a resource. The default timeout is 10 minutes. +### Nested Schema for `spec.cluster_config.spec.subscription_id.properties.oidc_issuer_profile` + + + +**_Required_** + +- `enabled` - (Boolean) Enable/Disable OIDC issuer profile. + +### Nested Schema for `spec.cluster_config.spec.managed_cluster.properties.security_profile` + + + +**_Required_** + +- `workload_identity` - (Block List) Enable/Disable workload identity. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster--properties--security_profile--workload_identity)) + + + +### Nested Schema for `spec.cluster_config.spec.managed_cluster.properties.security_profile.workload_identity` + +**_Required_** + +- `enabled` - (Boolean) Enable/Disable workload identity. + ## Attribute Reference --- @@ -792,108 +944,3 @@ data "rafay_aks_cluster" "cluster" { ``` --- - -# rafay_aks_cluster (Azure CNI Overlay) - -## Example Usage - ---- - -```terraform -resource "rafay_aks_cluster" "demo-terraform" { - apiversion = "rafay.io/v1alpha1" - kind = "Cluster" - metadata { - name = "demo-terraform" - project = "terraform" - } - spec { - type = "aks" - blueprint = "default-aks" - cloudprovider = "testuser-azure" - cluster_config { - apiversion = "rafay.io/v1alpha1" - kind = "aksClusterConfig" - metadata { - name = "demo-terraform" - } - spec { - resource_group_name = "testuser-terraform" - managed_cluster { - apiversion = "2023-11-01" - identity { - type = "SystemAssigned" - } - location = "centralindia" - properties { - api_server_access_profile { - enable_private_cluster = true - } - dns_prefix = "testuser-test-dns" - kubernetes_version = "1.28.5" - network_profile { - network_plugin = "azure" - load_balancer_sku = "standard" - network_plugin_mode = "overlay" - pod_cidr = "192.168.0.0/16" - service_cidr = "10.0.0.0/16" - dns_service_ip = "10.0.0.10" - } - power_state { - code = "Running" - } - } - type = "Microsoft.ContainerService/managedClusters" - } - node_pools { - apiversion = "2023-11-01" - name = "primary" - location = "centralindia" - properties { - count = 2 - enable_auto_scaling = true - max_count = 2 - max_pods = 40 - min_count = 1 - mode = "System" - orchestrator_version = "1.28.5" - os_type = "Linux" - type = "VirtualMachineScaleSets" - vm_size = "Standard_DS2_v2" - node_labels = { - app = "infra" - dedicated = "true" - } - node_taints = ["app=infra:PreferNoSchedule"] - } - type = "Microsoft.ContainerService/managedClusters/agentPools" - } - } - } - system_components_placement { - node_selector = { - app = "infra" - dedicated = "true" - } - tolerations { - effect = "PreferNoSchedule" - key = "app" - operator = "Equal" - value = "infra" - } - - daemonset_override { - node_selection_enabled = false - tolerations { - key = "app1dedicated" - value = true - effect = "NoSchedule" - operator = "Equal" - } - } - } - } -} -``` - ---- diff --git a/docs/resources/aks_cluster_v3.md b/docs/resources/aks_cluster_v3.md index e61b6c2f..5f447235 100644 --- a/docs/resources/aks_cluster_v3.md +++ b/docs/resources/aks_cluster_v3.md @@ -79,6 +79,14 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { power_state { code = "Running" } + oidc_issuer_profile { + enabled = true + } + security_profile { + workload_identity { + enabled = true + } + } addon_profiles { http_application_routing { enabled = true @@ -173,6 +181,113 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { } ``` +--- + +# rafay_aks_cluster_v3 (Azure CNI Overlay with Workload Identity) + +## Example Usage + +--- + +```terraform +resource "rafay_aks_cluster" "demo-terraform" { + apiversion = "rafay.io/v1alpha1" + kind = "Cluster" + metadata { + name = "demo-terraform" + project = "terraform" + } + spec { + type = "aks" + blueprint = "default-aks" + cloudprovider = "testuser-azure" + cluster_config { + apiversion = "rafay.io/v1alpha1" + kind = "aksClusterConfig" + metadata { + name = "demo-terraform" + } + spec { + resource_group_name = "testuser-terraform" + managed_cluster { + apiversion = "2023-11-01" + identity { + type = "SystemAssigned" + } + location = "centralindia" + properties { + api_server_access_profile { + enable_private_cluster = true + } + dns_prefix = "testuser-test-dns" + kubernetes_version = "1.28.5" + network_profile { + network_plugin = "azure" + load_balancer_sku = "standard" + network_plugin_mode = "overlay" + pod_cidr = "192.168.0.0/16" + service_cidr = "10.0.0.0/16" + dns_service_ip = "10.0.0.10" + } + power_state { + code = "Running" + } + } + type = "Microsoft.ContainerService/managedClusters" + } + node_pools { + apiversion = "2023-11-01" + name = "primary" + location = "centralindia" + properties { + count = 2 + enable_auto_scaling = true + max_count = 2 + max_pods = 40 + min_count = 1 + mode = "System" + orchestrator_version = "1.28.5" + os_type = "Linux" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + node_labels = { + app = "infra" + dedicated = "true" + } + node_taints = ["app=infra:PreferNoSchedule"] + } + type = "Microsoft.ContainerService/managedClusters/agentPools" + } + } + } + system_components_placement { + node_selector = { + app = "infra" + dedicated = "true" + } + tolerations { + effect = "PreferNoSchedule" + key = "app" + operator = "Equal" + value = "infra" + } + + daemonset_override { + node_selection_enabled = false + tolerations { + key = "app1dedicated" + value = true + effect = "NoSchedule" + operator = "Equal" + } + } + } + } +} +``` + +--- + ## Argument Reference @@ -276,7 +391,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { - `managed_cluster` - (Block List) The AKS managed cluster. (See [below for nested schema](#nestedblock--spec--config--spec--managed_cluster)) - `node_pools` - (Block List, Min: 1) The AKS node pool. (See [below for nested schema](#nestedblock--spec--config--spec--node_pools)) - `maintenance_configurations` - (Block List, Min: 0) The AKS Maintenance Configurations used to configure Auto-Upgrade Profile Schedule. (See [below for nested schema] -(#nestedblock--spec--cluster_config--spec--maintenance_configurations)) + (#nestedblock--spec--cluster_config--spec--maintenance_configurations)) - `resource_group_name` - (String) The AKS resource group for the cluster. @@ -319,17 +434,20 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { - `linux_profile` - (Block List) The configurations for linux profile. (See [below for nested schema](#nestedblock--spec--config--spec--managed_cluster--properties--linux_profile)) - `network_profile` - (Block List) Profile of the network configuration. (See [below for nested schema](#nestedblock--spec--config--spec--managed_cluster--properties--network_profile)) - `node_resource_group` - (String) The name of the resource group containing agent pool nodes. +- `oidc_issuer_profile` (Block List) Profile of OpenID Connect configuration. (see [below for nested schema](#nestedblock--spec--cluster_config--spec--subscription_id--properties--oidc_issuer_profile)) - `pod_identity_profile` - (Block List) Azure Active Directory (Azure AD) pod-managed identities use Kubernetes primitives to associate managed identities for Azure resources and identities in Azure AD with pods. See [Use Azure AD Pod-Managed Identities](https://learn.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) for more information. (See [below for nested schema](#nestedblock--spec--config--spec--managed_cluster--properties--pod_identity_profile)) - `power_state` - (Block List) Cluster Power State to Stop/Start the AKS cluster. See [Stop and start an Azure Kubernetes Service (AKS) cluster](https://learn.microsoft.com/en-us/azure/aks/start-stop-cluster?tabs=azure-cli) for more information. +- `security_profile` - (Block List) The security profile of the managed cluster. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster--properties--security_profile)) - `service_principal_profile` - (Block List) Information about a service principal identity for the cluster to use for manipulating Azure APIs. (See [below for nested schema](#nestedblock--spec--config--spec--managed_cluster--properties--service_principal_profile)) - `auto_upgrade_profile` - (Block List) Configure Auto-Upgrade Profile to handle automatic k8s version upgrade and node os image upgrade. See [below for nested schema] -(#nestedblock--spec--cluster_config--spec--managed_cluster--properties--auto_upgrade_profile) + (#nestedblock--spec--cluster_config--spec--managed_cluster--properties--auto_upgrade_profile) ### Nested Schema for `spec.cluster_config.spec.managed_cluster.properties.auto_upgrade_profile` **_Required_** + - `upgrade_channel` - (String) Configure channel with one of the following values [none, rapid, stable, patch, node-image] - `node_os_upgrade_channel` - (String) Configure channel with one of the following values [None, NodeImage, SecurityPatch] @@ -683,6 +801,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations` **_Required_** + - `api_version` - (String) The AKS maintenance configuration API version. The recommended value is `2024-01-01`. - `name` - (String) The AKS maintenance configuration name. It should be one of the following values [default, aksManagedNodeOSUpgradeSchedule, aksManagedAutoUpgradeSchedule]. - `type` - (String) The AKS maintenance configuration type. The supported value is `Microsoft.ContainerService/managedClusters/maintenanceConfigurations`. @@ -693,9 +812,11 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties` **_Required_** + - `maintenance_window` - (Block List, Min: 1) Configure maintenance window in a maintenance configuration. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window)) **_Optional_** + - `not_allowed_time` - (Block List) Configure start and end time for maintenance configuration to not run. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--not_allowed_time)) - `time_in_week` - (Block List) Configure day and hour_slots for maintenance configuration to run. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--time_in_week)) @@ -704,6 +825,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.not_allowed_time` **_Optional_** + - `end` - (String) The end of a time span. - `start` - (String) The start of a time span. @@ -712,19 +834,22 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.time_in_week` **_Optional_** + - `day` - (String) Specify day of the week. Value should be one of the following [Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday] -- `hourSlot` - (Block List) Each integer hour represents a time range beginning at 0m after the hour ending at the next hour (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 UTC. Specifying [0, 1] means the 00:00 - 02:00 UTC time range. Value should be between 0 to 23 +- `hourSlot` - (Block List) Each integer hour represents a time range beginning at 0m after the hour ending at the next hour (non-inclusive). 0 corresponds to 00:00 UTC, 23 corresponds to 23:00 UTC. Specifying [0, 1] means the 00:00 - 02:00 UTC time range. Value should be between 0 to 23 ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window` **_Required_** + - `duration_hours` - (Number) Configure the value between 4 to 24 hours. - `start_time` - (String) Configure the start time of maintenance window. Accepted values should be in format of 'HH:MM'. - `schedule` - (Block List) Recurrence schedule for the maintenance window. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--schedule)) **_Optional_** + - `not_allowed_dates` - (Block List) Date ranges on which upgrade is not allowed. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--not_allowed_dates)) - `start_date` - (String) The date the maintenance window activates. If the current date is before this date, the maintenance window is inactive and will not be used for upgrades. If not specified, the maintenance window will be active right away. - `utc_offset` - (String) The UTC offset in format +/-HH:mm. If not specified, the default is '+00:00'. @@ -734,6 +859,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule` **_Optional_** + - `absolute_monthly` - (Block List) For schedules like: 'recur every month on the 10th' or 'recur every 4 months on the 15th'. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--schedule--absolute_monthly)) - `relative_monthly` - (Block List) For schedules like: 'recur every month on the first Wednesday' or 'recur every 4 months on last Friday'. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--schedule--relative_monthly)) - `daily` - (Block List) For schedules like: 'recur every 2 days'. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--maintenance_configurations--properties--maintenance_window--schedule--daily)) @@ -744,6 +870,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule.absolute_monthly` **_Required_** + - `day_of_month` - (Number) The date of the Month. Value should be between 1 to 31. - `interval_months` - (Number) Specifies the number of months between each set of occurrences. Value should be between 1 to 6. @@ -752,6 +879,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule.relative_monthly` **_Required_** + - `day_of_week` - (String) The day of the week. Value should be one of the following [Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday] - `interval_months` - (Number) Specifies the number of months between each set of occurrences. Value should be between 1 to 6. - `week_index` - (String) Specifies on which week of the month the dayOfWeek applies. Value should be one of the following [First, Second, Third, Fourth, Last] @@ -761,6 +889,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule.daily` **_Required_** + - `intervalDays` - (Number) Specifies the number of days between each set of occurrences. Value should be between 1 to 7. @@ -768,6 +897,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.schedule.weekly` **_Required_** + - `day_of_week` - (Number) Specifies on which day of the week the maintenance occurs. Value should be one of the following [Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday] - `interval_weeks` - (Number) Specifies the number of weeks between each set of occurrences. Value should be between 1 to 4. @@ -776,6 +906,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `spec.cluster_config.spec.maintenance_configurations.properties.maintenance_window.not_allowed_dates` **_Required_** + - `end` - (String) End date for the date span. - `start` - (String) Start date for the date span. @@ -809,12 +940,60 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { ### Nested Schema for `timeouts` +### Nested Schema for `spec.cluster_config.spec.subscription_id.properties.oidc_issuer_profile` + + + +**_Required_** + +- `enabled` - (Boolean) Enable/Disable OIDC issuer profile. + +### Nested Schema for `spec.cluster_config.spec.managed_cluster.properties.security_profile` + + + +**_Required_** + +- `workload_identity` - (Block List) Enable/Disable workload identity. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster--properties--security_profile--workload_identity)) + + + +### Nested Schema for `spec.cluster_config.spec.managed_cluster.properties.security_profile.workload_identity` + +**_Required_** + +- `enabled` - (Boolean) Enable/Disable workload identity. + **_Optional_** - `create` - (String) Sets the timeout duration for creating a resource. The default timeout is 10 minutes. - `delete` - (String) Sets the timeout duration for deleting a resource. The default timeout is 10 minutes. - `update` - (String) Sets the timeout duration for updating a resource. The default timeout is 10 minutes. +### Nested Schema for `spec.cluster_config.spec.subscription_id.properties.oidc_issuer_profile` + + + +**_Required_** + +- `enabled` - (Boolean) Enable/Disable OIDC issuer profile. + +### Nested Schema for `spec.cluster_config.spec.managed_cluster.properties.security_profile` + + + +**_Required_** + +- `workload_identity` - (Block List) Enable/Disable workload identity. (See [below for nested schema](#nestedblock--spec--cluster_config--spec--managed_cluster--properties--security_profile--workload_identity)) + + + +### Nested Schema for `spec.cluster_config.spec.managed_cluster.properties.security_profile.workload_identity` + +**_Required_** + +- `enabled` - (Boolean) Enable/Disable workload identity. + ## Attribute Reference --- @@ -840,110 +1019,3 @@ output "aks_cluster_v3" { value = data.rafay_aks_cluster_v3.cluster } ``` - ---- - -# rafay_aks_cluster_v3 (Azure CNI Overlay) - -## Example Usage - ---- - -```terraform -resource "rafay_aks_cluster" "demo-terraform" { - apiversion = "rafay.io/v1alpha1" - kind = "Cluster" - metadata { - name = "demo-terraform" - project = "terraform" - } - spec { - type = "aks" - blueprint = "default-aks" - cloudprovider = "testuser-azure" - cluster_config { - apiversion = "rafay.io/v1alpha1" - kind = "aksClusterConfig" - metadata { - name = "demo-terraform" - } - spec { - resource_group_name = "testuser-terraform" - managed_cluster { - apiversion = "2023-11-01" - identity { - type = "SystemAssigned" - } - location = "centralindia" - properties { - api_server_access_profile { - enable_private_cluster = true - } - dns_prefix = "testuser-test-dns" - kubernetes_version = "1.28.5" - network_profile { - network_plugin = "azure" - load_balancer_sku = "standard" - network_plugin_mode = "overlay" - pod_cidr = "192.168.0.0/16" - service_cidr = "10.0.0.0/16" - dns_service_ip = "10.0.0.10" - } - power_state { - code = "Running" - } - } - type = "Microsoft.ContainerService/managedClusters" - } - node_pools { - apiversion = "2023-11-01" - name = "primary" - location = "centralindia" - properties { - count = 2 - enable_auto_scaling = true - max_count = 2 - max_pods = 40 - min_count = 1 - mode = "System" - orchestrator_version = "1.28.5" - os_type = "Linux" - type = "VirtualMachineScaleSets" - vm_size = "Standard_DS2_v2" - node_labels = { - app = "infra" - dedicated = "true" - } - node_taints = ["app=infra:PreferNoSchedule"] - } - type = "Microsoft.ContainerService/managedClusters/agentPools" - } - } - } - system_components_placement { - node_selector = { - app = "infra" - dedicated = "true" - } - tolerations { - effect = "PreferNoSchedule" - key = "app" - operator = "Equal" - value = "infra" - } - - daemonset_override { - node_selection_enabled = false - tolerations { - key = "app1dedicated" - value = true - effect = "NoSchedule" - operator = "Equal" - } - } - } - } -} -``` - ---- diff --git a/docs/resources/aks_workload_identity.md b/docs/resources/aks_workload_identity.md new file mode 100644 index 00000000..d7897b5b --- /dev/null +++ b/docs/resources/aks_workload_identity.md @@ -0,0 +1,164 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "rafay_aks_workload_identity Resource - terraform-provider-rafay" +subcategory: "" +description: |- +--- + +# rafay_aks_workload_identity (Resource) + +This example is for the AKS Workload Identity. + +## Important + +The `depends_on` constraint is **mandatory** for the `rafay_aks_workload_identity` resource. Ensure that the workload identity resource explicitly depends on the associated cluster resource. This ensures proper resource creation and dependency handling between the workload identity and the cluster it is associated with. + +## Example Usage + +```terraform +resource "rafay_aks_workload_identity" "demo-terraform" { + depends_on = [rafay_aks_cluster.my_cluster] + + metadata { + cluster_name = "aks-tf-wi-1" + project = "defaultproject" + } + + spec { + create_identity = true + + metadata { + name = "aks-tf-wi-1-uai-1" + location = "centralindia" + resource_group = "aks-rg-ci" + tags = { + "owner" = "aks" + "department" = "engg" + } + } + + role_assignments { + name = "Key Vault Secrets User" + scope = "/subscriptions/a2252eb2-7a25-432b-a5ec-e18eba6f26b1/resourceGroups/aks-rg-ci/providers/Microsoft.KeyVault/vaults/aks-keyvault" + } + + service_accounts { + create_account = true + + metadata { + name = "aks-tf-wi-1-sa-10" + namespace = "default" + annotations = { + "role" = "dev" + } + labels = { + "owner" = "aks" + "department" = "engg" + } + } + } + + } +} +``` + +## Example Usage (Soft Creation of AKS Workload Identity and Soft Creation of Service Account) + +```terraform +resource "rafay_aks_workload_identity" "demo-terraform" { + depends_on = [rafay_aks_cluster.my_cluster] + + metadata { + cluster_name = "aks-tf-wi-1" + project = "defaultproject" + } + + spec { + create_identity = false + + metadata { + name = "aks-tf-wi-1-uai-1" + location = "centralindia" + resource_group = "aks-rg-ci" + client_id = "00000000-0000-0000-0000-000000000000" + principal_id = "00000000-0000-0000-0000-000000000000" + tags = { + "owner" = "aks" + "department" = "engg" + } + } + + role_assignments { + name = "Key Vault Secrets User" + scope = "/subscriptions/a2252eb2-7a25-432b-a5ec-e18eba6f26b1/resourceGroups/aks-rg-ci/providers/Microsoft.KeyVault/vaults/aks-keyvault" + } + + service_accounts { + create_account = false + + metadata { + name = "aks-tf-wi-1-sa-10" + namespace = "default" + annotations = { + "role" = "dev" + } + labels = { + "owner" = "aks" + "department" = "engg" + } + } + } + + } +} +``` + +## Argument Reference + +The following arguments are supported: + +- depends_on: (Required) The `depends_on` attribute ensures that the `rafay_aks_workload_identity` resource is created after the associated `rafay_aks_cluster` resource. It is mandatory for the workload identity resource to depend on the corresponding cluster resource. + +- `metadata`: (Required) The metadata for the AKS workload identity. It includes the `cluster_name` and `project` attributes. + + - `cluster_name`: (Required) The name of the cluster. + + - `project`: (Required) The project name. + +- `spec`: (Required) The specification for the AKS workload identity. It includes the `create_identity`, `metadata`, `role_assignments`, and `service_accounts` attributes. + + - `create_identity`: (Required) Specifies whether to create the identity or not. + + - `metadata`: (Required) The metadata for the AKS workload identity. It includes the `name`, `location`, `resource_group`, and `tags` attributes. + + - `name`: (Required) The name of the AKS workload identity. + + - `location`: (Required) The location of the AKS workload identity. + + - `resource_group`: (Required) The resource group of the AKS workload identity. + + - `tags`: (Optional) The tags for the AKS workload identity. + + - `client_id`: (Optional) The client ID of the existing AKS workload identity,required when reusing the identity. + + - `principal_id`: (Optional) The principal ID of the existing AKS workload identity,required when reusing the identity. + + - `role_assignments`: (Optional) The role assignments for the AKS workload identity. It includes the `name` and `scope` attributes. + + - `name`: (Required) The name of the role assignment. + + - `scope`: (Required) The scope of the role assignment. + + - `service_accounts`: (Optional) The service accounts for the AKS workload identity. It includes the `create_account`, `metadata`, `labels` and `annotations` attributes. + + - `create_account`: (Required) Specifies whether to create the service account or not. + + - `metadata`: (Required) The metadata for the service account. It includes the `name`, `namespace`, and `labels` attributes. + + - `name`: (Required) The name of the service account. + + - `namespace`: (Required) The namespace of the service account. + + - `annotations`: (Optional) The annotations for the service account. + + - `labels`: (Optional) The labels for the service account. diff --git a/docs/resources/breakglassaccess.md b/docs/resources/breakglassaccess.md new file mode 100644 index 00000000..cdc4f9fa --- /dev/null +++ b/docs/resources/breakglassaccess.md @@ -0,0 +1,104 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "rafay_breakglassaccess Resource - terraform-provider-rafay" +subcategory: "" +description: |- + +--- + +# rafay_breakglassaccess (Resource) + +Breakglass access is a resource that allows admins to create limited time access. + +## Example Usage + +```terraform +#Basic example for breakglass access +resource "rafay_breakglassaccess" "test_user" { + metadata { + name = "test@rafay.co" + } + spec { + groups { + group_expiry { + expiry = 12 + name = "grp1" + } + group_expiry { + expiry = 12 + name = "grp2" + start_time = "2024-09-20T08:00:00Z" + } + user_type = "local" + } + groups { + group_expiry { + expiry = 6 + name = "grp3" + } + group_expiry { + expiry = 6 + name = "grp4" + start_time = "2024-09-20T08:00:00Z" + } + user_type = "sso" + } + } +} +``` + + +## Schema + +***Required*** +- `metadata` (Block List, Max: 1) Metadata of the break glass access resource (see [below for nested schema](#nestedblock--metadata)) +- `spec` (Block List, Max: 1) Specification of the break glass access resource (see [below for nested schema](#nestedblock--spec)) + +***Optional*** +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. + + +### Nested Schema for `metadata` + +***Required*** +- `name` (String) name of the resource + + +### Nested Schema for `spec` + + +***Required*** +- `groups` (Block List) List of groups with expiry details to be activated for user with user type(sso/local) (see [below for nested schema](#nestedblock--spec--groups)) + + +### Nested Schema for `spec.groups` + +***Required*** +- `group_expiry` (Block List) (see [below for nested schema](#nestedblock--spec--groups--group_expiry)) +- `user_type` (String) Type of the user(sso/local) for which access will be added + + +### Nested Schema for `spec.groups.group_expiry` + +***Required*** +- `expiry` (Number) Hours after which user access will expire +- `name` (String) Group Name which access will be added + +***Optional*** +- `start_time` (String) Time from when user access will be active + + + +### Nested Schema for `timeouts` + +***Optional*** +- `create` - (String) Sets the timeout duration for creating a resource. The default timeout is 10 minutes. +- `delete` - (String) Sets the timeout duration for deleting a resource. The default timeout is 10 minutes. +- `update` - (String) Sets the timeout duration for updating a resource. The default timeout is 10 minutes. + + + diff --git a/docs/resources/cost_profile.md b/docs/resources/cost_profile.md index afc45164..f00ffba9 100644 --- a/docs/resources/cost_profile.md +++ b/docs/resources/cost_profile.md @@ -73,6 +73,27 @@ resource "rafay_cost_profile" "tfdemocostprofile-azure" { } } ``` +Example cost profile for GCP + +```terraform +resource "rafay_cost_profile" "tfdemocostprofile-gcp" { + metadata { + name = "tfdemocostprofile-gcp" + project = "terraform" + } + spec { + version = "v0" + provider_type = "gcp" + installation_params { + gcp { + gcp_credentials { + cloud_credentials_name = "newcred" + } + } + } + } +} +``` Example cost profile for Other providers @@ -123,7 +144,7 @@ resource "rafay_cost_profile" "tfdemocostprofile-other" { - `installation_params` (Block List, Max: 1) The parameters for the profile installation. (See [below for nested schema](#nestedblock--spec--installation_params)) - `version` (String) The version of the profile. -- `provider_type` (String) The cloud provider type. The supported values are: `aws`, `azure`, and `other`. +- `provider_type` (String) The cloud provider type. The supported values are: `aws`, `azure`, `gcp` , and `other`. ***Optional*** @@ -136,6 +157,7 @@ resource "rafay_cost_profile" "tfdemocostprofile-other" { - `aws` (Block List, Max: 1) The AWS cost profile parameters. Use with AWS specific parameters. (See [below for nested schema](#nestedblock--spec--installation_params--aws)) - `azure` (Block List, Max: 1) The Azure cost profile parameters. Use with Azure specific parameters. (See [below for nested schema](#nestedblock--spec--installation_params--azure)) +- `gcp` (Block List, Max: 1) The GCP cost profile parameters. Use with GCP specific parameters. (See [below for nested schema](#nestedblock--spec--installation_params--gcp)) - `other` (Block List, Max: 1) The cost profile parameters for other providers. Use custom parameters for other providers. (See [below for nested schema](#nestedblock--spec--installation_params--other)) @@ -158,6 +180,11 @@ resource "rafay_cost_profile" "tfdemocostprofile-other" { - `custom_pricing` (Block List, Max: 1) The Azure cost profile parameters. Use Azure specific parameters. (See [below for nested schema](#nestedblock--spec--installation_params--azure--custom_pricing)) + +### Nested Schema for `spec.installation_params.gcp` + +- `gcp_credentials` (Block List, Max: 1) The GCP credentials to fetch for cost data. (See [below for nested schema](#nestedblock--spec--installation_params--gcp--gcp_credentials)) + ### Nested Schema for `spec.installation_params.other` @@ -198,6 +225,11 @@ resource "rafay_cost_profile" "tfdemocostprofile-other" { - `billing_account_id` (String) The Azure Billing Account ID. - `offer_id` (String) The Azure Offer ID. + +### Nested Schema for `spec.installation_params.gcp.gcp_credentials` + +- `cloud_credentials_name` (String) The cloud credentials name. + ### Nested Schema for `spec.sharing.projects` diff --git a/docs/resources/driver.md b/docs/resources/driver.md index e64c2487..cd7d0f97 100644 --- a/docs/resources/driver.md +++ b/docs/resources/driver.md @@ -45,13 +45,64 @@ resource "rafay_driver" "driver" { key : "key" operator : "Equal" value : "value" - effect : "NoSchedule" + effect : "NoExecute" toleration_seconds = 300 } ] } } } + inputs { + name = "cc-1" + } + inputs { + name = "inline-cc-1" + data { + envs { + key = "name-modified" + value = "modified-value" + options { + description = "contains the input variables with default values" + sensitive = false + override { + type = "allowed" + } + } + } + envs { + key = "name-new" + value = "new-value" + } + files { + name = "some/variables.tf" + options { + description = "contains the input variables with default values" + sensitive = true + override { + type = "allowed" + } + } + } + variables { + name = "new-variable" + value_type = "text" + value = "new-value" + options { + override { + type = "restricted" + restricted_values = ["new-value", "modified-value"] + } + description = "this is a dummy variable" + sensitive = false + required = true + } + } + } + } + outputs = jsonencode({ + key1 = "value1" + key2 = "value2" + }) } } ``` @@ -86,6 +137,8 @@ resource "rafay_driver" "driver" { ***Required*** - `config` (Block List, Max: 1) Driver configuration (see [below for nested schema](#nestedblock--spec--config)) +- `inputs` (Block List) Inputs for the driver (see [below for nested schema](#nestedblock--spec--inputs)) +- `outputs` (String) Outputs for the driver in JSON string format ***Optional*** @@ -155,27 +208,39 @@ resource "rafay_driver" "driver" { - `labels` (Map of String) Specify the labels - `namespace` (String) Specify the namespace - `node_selector` (Map of String) Specify the node selectors -- `security_context` (Block List, Max: 1) Specify the security context (see [below for nested schema](#nestedblock--spec--config--container--working_dir_path--security_context)) +- `security_context` (Block List, Max: 1) Specify the security context (see [below for nested schema](#nestedblock--spec--config--container--kube_options--security_context)) - `service_account_name` (String) Specify the service account name +- `tolerations` (Block List) Specify the tolerations (see [below for nested schema](#nestedblock--spec--config--container--kube_options--tolerations)) + + +### Nested Schema for `spec.config.container.kube_options.tolerations` - -### Nested Schema for `spec.config.container.working_dir_path.security_context` +***Optional*** + +- `key` (String) Specify the key +- `operator` (String) Specify the operator, Accepted values are `Exists`, `Equal`. +- `value` (String) Specify the value +- `effect` (String) Specify the effect, Accepted values are `NoSchedule`, `PreferNoSchedule`, `NoExecute`. +- `toleration_seconds` (Number) Specify the toleration seconds when `NoExecute` effect is given. + + +### Nested Schema for `spec.config.container.kube_options.security_context` ***Optional*** -- `privileged` (Block List, Max: 1) Specify if privileged permissions (see [below for nested schema](#nestedblock--spec--config--container--working_dir_path--security_context--privileged)) -- `read_only_root_file_system` (Block List, Max: 1) Specify if permission is read only root file system (see [below for nested schema](#nestedblock--spec--config--container--working_dir_path--security_context--read_only_root_file_system)) +- `privileged` (Block List, Max: 1) Specify if privileged permissions (see [below for nested schema](#nestedblock--spec--config--container--kube_options--security_context--privileged)) +- `read_only_root_file_system` (Block List, Max: 1) Specify if permission is read only root file system (see [below for nested schema](#nestedblock--spec--config--container--kube_options--security_context--read_only_root_file_system)) - -### Nested Schema for `spec.config.container.working_dir_path.security_context.read_only_root_file_system` + +### Nested Schema for `spec.config.container.kube_options.security_context.read_only_root_file_system` ***Optional*** - `value` (Boolean) - -### Nested Schema for `spec.config.container.working_dir_path.security_context.read_only_root_file_system` + +### Nested Schema for `spec.config.container.kube_options.security_context.read_only_root_file_system` ***Optional*** @@ -256,4 +321,26 @@ resource "rafay_driver" "driver" { - `delete` (String) - `update` (String) + +### Nested Schema for `spec.inputs` + +***Required*** + +- `name` (String) name of the config context + + +### Nested Schema for `compound ref` +***Required*** + +- `name` (string) name of the driver ref +- `data` (Block List, Max: 1) Inline definition for driver (see [below for nested schema](#nestedblock--inline)) + + +### Nested Schema for `inline` + +***Required*** + +- `config` (Block List, Max: 1) Driver configuration (see [below for nested schema](#nestedblock--spec--config)) +- `inputs` (Block List) Inputs for the driver (see [below for nested schema](#nestedblock--spec--inputs)) +- `outputs` (String) Outputs for the driver in JSON string format diff --git a/docs/resources/eks_cluster.md b/docs/resources/eks_cluster.md index f1457778..ea7cc51b 100644 --- a/docs/resources/eks_cluster.md +++ b/docs/resources/eks_cluster.md @@ -770,6 +770,7 @@ addons { - `service_role_permission_boundary` - (String) - The service role permission bounadary policy ARN of the cluster. - `service_role_arn` - (String) The service role ARN of the cluster. - `with_oidc` - (Boolean) Enables the IAM OpenID connect (OIDC) provider as well as the IAM roles for service accounts (IRSA) for the Amazon CNI plugin. +- `pod_identity_associations` - (Block List) The pod identity associations to create in the cluster. (See [below for nested schema](#nestedblock--cluster_config--iam--pod_identity_associations)) @@ -815,6 +816,25 @@ addons { - `external_dns` - (Boolean) Adds external-dns policies for Amazon Route 53. - `image_builder` - (Boolean) Allow full Elastic Container Registry (ECR) access. + +### Nested Schema for `cluster_config.iam.pod_identity_associations` + +***Required*** + +- `permission_policy` - (String) Holds a policy document to attach to the service account in json string format. +- `permission_policy_arns` - (List of String) The list of ARNs of the IAM policies to attach to the service account. +- `role_arn` - (String) The ARN of the role to attach to the service account. +- `permission_boundary_arn` - (String) ARN of the permissions boundary to associate +- `namespace` - (String) The namespace of the service account. +- `service_account_name` - (String) The name of the service account + **Note**: At least `permission_policy`, `permission_policy_arns`, or `role_arn` is required. + +***Optional*** +- `tags` - (Map of String) The AWS tags for the service account. +- `create_service_account` - (Bool) Enable flag to create service account +- `role_name` - (String) User defined name for role +- `well_known_policies` - (Block List) Use to attach common IAM policies. (See [below for nested schema](#nestedblock--cluster_config--iam--service_accounts--well_known_policies)) + ### Nested Schema for `cluster_config.addons` @@ -863,7 +883,7 @@ addons { ***Optional*** -- `encrypt_existing_secrets` - (Boolean) Set to false to disable encrypting existing secrets. Default is true. +- `encrypt_existing_secrets` - (Boolean) Set to false to disable encrypting existing secrets. Default is False. diff --git a/docs/resources/eks_pod_identity.md b/docs/resources/eks_pod_identity.md new file mode 100644 index 00000000..23f3e09c --- /dev/null +++ b/docs/resources/eks_pod_identity.md @@ -0,0 +1,80 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "rafay_eks_pod_identity Resource - terraform-provider-rafay" +subcategory: "" +description: |- + +--- + +# rafay_eks_pod_identity (Resource) + +This example is for EKS Pod Identity + +## Example Usage + +```terraform +resource "rafay_eks_pod_identity" "pod_identity_1" { + metadata { + cluster_name = "eks_cluster_name" + project_name = "defaultproject" + } + spec { + service_account_name = "svc_one" + namespace = "rafay-demo" + create_service_account = true + role_arn = "arn:aws:iam::679196758854:role/rafay-eks-full" + } +} +``` + + +## Schema + +### Required + +- `metadata` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--metadata)) +- `spec` (Block List, Min: 1, Max: 1) (see [below for nested schema](#nestedblock--spec)) + + + +### Nested Schema for `metadata` + +Required: + +- `cluster_name` (String) +- `project_name` (String) + + + +### Nested Schema for `spec` + +Required: + +- `namespace` (String) namespace of service account +- `service_account_name` (String) name of service account + +Optional: + +- `create_service_account` (Boolean) enable flag to create service account +- `permission_boundary_arn` (String) permission boundary ARN +- `permission_policy` (String) permission policy document +- `permission_policy_arns` (List of String) permission policy ARNs +- `role_arn` (String) role ARN of AWS role to associate with service account +- `role_name` (String) aws role name to associate +- `tags` (Map of String) AWS tags for the service account +- `well_known_policies` (Block List) for attaching common IAM policies (see [below for nested schema](#nestedblock--spec--well_known_policies)) + + +### Nested Schema for `spec.well_known_policies` + +Optional: + +- `auto_scaler` (Boolean) service account annotations +- `aws_load_balancer_controller` (Boolean) adds policies for using the aws-load-balancer-controller. +- `cert_manager` (Boolean) adds cert-manager policies. +- `ebs_csi_controller` (Boolean) adds cert-manager policies. +- `efs_csi_controller` (Boolean) adds policies for using the ebs-csi-controller. +- `external_dns` (Boolean) adds external-dns policies for Amazon Route 53. +- `image_builder` (Boolean) allows for full ECR (Elastic Container Registry) access. + + diff --git a/docs/resources/environment.md b/docs/resources/environment.md index 32002e78..5d7bdcd3 100644 --- a/docs/resources/environment.md +++ b/docs/resources/environment.md @@ -75,6 +75,7 @@ resource "rafay_environment" "eks-rds-env-example" { - `variables` (Block List) Variables data for environment to be created (see [below for nested schema](#nestedblock--spec--variables)) - `env_vars` (Block List) Environment variables data (see [below for nested schema](#nestedblock--spec--envs)) - `files` (Block List) File path information (see [below for nested schema](#nestedblock--spec--files)) +- `schedule_optouts` (Block List) Request to opt of of schedules, this may be honoured or not depending on the template configurations and approvals (see [below for nested schema](#nestedblock--spec--schedule-optouts)) ### Nested Schema for `spec.agents` @@ -174,4 +175,15 @@ resource "rafay_environment" "eks-rds-env-example" { ***Optional*** - `data` (String) Data of the file content ( required if name if not a relative path ) -- `sensitive` (Boolean) Determines whether the value is sensitive or not, accordingly applies encryption on it \ No newline at end of file +- `sensitive` (Boolean) Determines whether the value is sensitive or not, accordingly applies encryption on it + + +### Nested Schema for `spec.schedule_optouts` + +***Required*** + +- `name` (String) Name of the schedule to be opted out from + +***Optional*** + +- `duration` (String) Requested opt out duration \ No newline at end of file diff --git a/docs/resources/environment_template.md b/docs/resources/environment_template.md index 70ec3089..af93eded 100644 --- a/docs/resources/environment_template.md +++ b/docs/resources/environment_template.md @@ -72,6 +72,56 @@ resource "rafay_environment_template" "aws-et-example" { } } timeout_seconds = 1000 + execute_once = true + } + on_init { + name = "infracost" + type = "driver" + driver { + data { + config { + type = "http" + http { + method = "GET" + endpoint = "https://jsonplaceholder.typicode.com/todos/1" + } + } + inputs { + name = "some-cc" + data { + variables { + name = "name" + value_type = "text" + value = "aws-elasticache" + options { + description = "this is the resource name to be applied" + sensitive = false + required = true + } + } + } + } + outputs = jsonencode({ + key1 = "value1" + key2 = "value2" + }) + } + } + } + } + schedules { + description = "Destroy tf ec2 instance" + name = "destroy-tf-ec2-instance" + type = "destroy" + cadence { + time_to_live = "5h" + } + opt_out_options { + max_allowed_duration = "24h" + max_allowed_times = 2 + allow_opt_out { + value = true + } } } agents { @@ -128,6 +178,7 @@ resource "rafay_environment_template" "aws-et-example" { - `agent_override` (Block List, Max: 1) Agent override (see [below for nested schema](#nestedblock--spec--agent_override)) - `icon_url` (String) Icon URL for the template - `readme` (String) Readme for the template +- `schedules` (Block List) Reference to schedules associated with environment templates (see [below for nested schema](#nestedblock--spec--schedules)) ### Nested Schema for `spec.agent_override` @@ -178,6 +229,7 @@ resource "rafay_environment_template" "aws-et-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--on_completion--options)) - `timeout_seconds` (Number) Specify the timeout in seconds +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.on_completion.agents` @@ -253,6 +305,7 @@ resource "rafay_environment_template" "aws-et-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--on_failure--options)) - `timeout_seconds` (Number) Specify the timeout in seconds +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.on_failure.agents` @@ -329,6 +382,7 @@ resource "rafay_environment_template" "aws-et-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--on_init--options)) - `timeout_seconds` (Number) Specify the timeout in seconds +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.on_init.agents` @@ -405,6 +459,7 @@ resource "rafay_environment_template" "aws-et-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--on_success--options)) - `timeout_seconds` (Number) Specify the timeout in seconds +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.on_success.agents` @@ -572,6 +627,7 @@ resource "rafay_environment_template" "aws-et-example" { - `config` (Block List, Max: 1) WorkflowHandler configuration (see [below for nested schema](#nestedblock--spec--hooks--driver--data--config)) - `inputs` (Block List) Specify the input data (see [below for nested schema](#nestedblock--spec--hooks--driver--data--inputs)) +- `outputs` (String) Specify the output data ### Nested Schema for `spec.hooks.driver.data.config` @@ -1174,4 +1230,80 @@ Optional: ***Optional*** - `restricted_values` (List of String) If the override type is restricted, values it is restricted to -- `type` (String) Specify the type of ovverride this variable supports \ No newline at end of file +- `type` (String) Specify the type of ovverride this variable supports + + +### Nested Schema for `spec.provider_options.custom` + +***Required*** + +- `tasks` (Block List) Configure the custom tasks (see [below for nested schema](#nestedblock--spec--provider_options--custom--tasks)) + + +### Nested Schema for `spec.provider_options.custom.tasks` + +**Required** + +- `name` (String) name of the task +- `type` (String) Specify the type of task, Available options are `driver`. + +***Optional*** + +- `agents` (Block List) Specify the resource ref agents (see [below for nested schema](#nestedblock--spec--provider_options--custom--tasks--agents)) +- `depends_on` (List of String) specify task dependencies +- `description` (String) description of task +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--hooks--driver)) +- `on_failure` (String) Specify the on failure action +- `timeout_seconds` (Number) Specify the timeout in seconds + + +### Nested Schema for `spec.provider_options.custom.tasks.agents` + +***Required*** + +- `name` (String) name of the agent resource + + +### Nested Schema for `spec.provider_options.system` + +***Required*** + +- `kind` (String) Specify the type of rafay resource, Available options are `credential`, `cluster`. + + +### Nested Schema for `spec.schedules` + +***Required*** + +- `name` (String) name of the schedule +- `type` (String) schedule type, Available options are `deploy`, `destroy`, `workflows`. +- `cadence` (Block List, Max: 1) Configure a schedule cadence at which a job would automatically trigger (see [below for nested schema](#nestedblock--spec--schedules--cadence)) + +***Optional*** + +- `description` (String) Description of the schedule +- `context` (Block List, Max: 1) Input data configuration that are needed as part of this schedule run (see [below for nested schema](#nestedblock--spec--contexts)) +- `workflows` (Block List, Max: 1) Name of the custom workflow provider that needs to be executed with this job (see [below for nested schema](#nestedblock--spec--provider_options--custom--tasks)) +- `opt_out_options` (Block List, Max: 1) Opt Out Options configured with this schedule (see [below for nested schema](#nestedblock--spec--schedules--optout)) + + +### Nested Schema for `spec.schedules.cadence` + +### https://en.wikipedia.org/wiki/Cron +- `cron_expression` (String) Cron expression used to configure scheduling jobs + +***Optional*** + +- `cron_timezone` (String) Specify the timezone of cron expression +- `time_to_live` (String) Specify the maximum time to live duration of an environment, time units are 'h', 'd' e.g. 8h, 2d + + +### Nested Schema for `spec.schedules.optout` + +- `allow_opt_out` (Bool) Specify whether users can opt out from this schedule + +***Optional*** + +- `max_allowed_duration` (String) Specify the maximum allowed opt out duration, time units are 'm', 'h', 'd' e.g. 8h, 2d +- `max_allowed_times` (Number) Specify the maximum number of times users can opt out without approval e.g. users can max opt out of this schedule thrice +- `approval` (Block List, Max: 1) Details of approval workflow that needs to be execution in case of user opt-out (see [below for nested schema](#nestedblock--spec--provider_options--custom--tasks)) \ No newline at end of file diff --git a/docs/resources/mks_cluster.md b/docs/resources/mks_cluster.md new file mode 100644 index 00000000..299ff93a --- /dev/null +++ b/docs/resources/mks_cluster.md @@ -0,0 +1,515 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "rafay_mks_cluster Resource - rafay" +subcategory: "" +description: |- + +--- + +# rafay_mks_cluster (Resource) + +## Example usage + +### Basic Single node Cluster +```terraform +resource "rafay_mks_cluster" "mks-noha-converged-cluster" { + api_version = "infra.k8smgmt.io/v3" + kind = "Cluster" + metadata = { + name = "mks-noha-converged-cluster" + project = "terraform" + } + spec = { + blueprint = { + name = "minimal" + } + config = { + auto_approve_nodes = true + kubernetes_version = "v1.28.9" + installer_ttl = 365 + network = { + cni = { + name = "Calico" + version = "3.26.1" + } + pod_subnet = "10.244.0.0/16" + service_subnet = "10.96.0.0/12" + } + cluster_ssh = { + username = "ubuntu" + port = "22" + private_key_path = "/path/to/privatekey/" + } + nodes = { + "hostname1" = { + arch = "amd64" + hostname = "hostname1" + operating_system = "Ubuntu22.04" + private_ip = "10.12.25.234" + roles = ["ControlPlane", "Worker"] + ssh = { + ip_address = "129.146.203.73" + } + } + } + } + } +} +``` + + +### Example HA cluster with Converged Control Plane nodes + +In this example, control plane nodes will also be used for workload placement, and since `spec.cloud_credentials` are provided, you can perform Terraform operations from anywhere outside the node's network. + +```terraform +resource "rafay_mks_cluster" "mks-ha-cluster" { + api_version = "infra.k8smgmt.io/v3" + kind = "Cluster" + metadata = { + name = "mks-ha-cluster" + project = "terraform" + } + spec = { + blueprint = { + name = "minimal" + } + cloud_credentials = "mks-ssh-credentials" + config = { + auto_approve_nodes = true + high_availability = true + kubernetes_version = "v1.28.9" + installer_ttl = 365 + kubernetes_upgrade = { + strategy = "sequential" + params = { + worker_concurrency = "50%" + } + } + network = { + cni = { + name = "Calico" + version = "3.26.1" + } + pod_subnet = "10.244.0.0/16" + service_subnet = "10.96.0.0/12" + } + nodes = { + "hostname1" = { + arch = "amd64" + hostname = "hostname1" + operating_system = "Ubuntu22.04" + private_ip = "10.12.25.233" + roles = ["ControlPlane", "Worker"] + labels = { + "app" = "infra" + "infra" = "true" + } + }, + "hostname2" = { + arch = "amd64" + hostname = "hostname2" + operating_system = "Ubuntu22.04" + private_ip = "10.12.25.234" + roles = ["ControlPlane", "Worker"] + }, + "hostname3" = { + arch = "amd64" + hostname = "hostname3" + operating_system = "Ubuntu22.04" + private_ip = "10.12.25.235" + roles = ["ControlPlane", "Worker"] + }, + "hostname4" = { + arch = "amd64" + hostname = "hostname4" + operating_system = "Ubuntu22.04" + private_ip = "10.12.114.236" + roles = ["Worker"] + } + } + } + } +} +``` + + +### Example HA Cluster Having Dedicated Control Plane with System Component Placement + +In this example, the cluster is configured with both `spec.config.dedicated_control_plane` and `spec.system_components_placement`, which means that control plane node are dedicated(no workloads will be placed) and Rafay managed add-ons are placed on nodes with matching `labels` and `taints` + +```terraform +resource "rafay_mks_cluster" "mks-ha-cluster-with-dedicated-cp" { + api_version = "infra.k8smgmt.io/v3" + kind = "Cluster" + metadata = { + name = "mks-ha-cluster-with-dedicated-cp" + project = "terraform" + } + spec = { + blueprint = { + name = "minimal" + } + cloud_credentials = "mks-ssh-credentials" + config = { + auto_approve_nodes = true + high_availability = true + dedicated_control_plane = true + kubernetes_version = "v1.28.9" + installer_ttl = 365 + kubernetes_upgrade = { + strategy = "sequential" + params = { + worker_concurrency = "50%" + } + } + network = { + cni = { + name = "Calico" + version = "3.26.1" + } + pod_subnet = "10.244.0.0/16" + service_subnet = "10.96.0.0/12" + } + nodes = { + "hostname1" = { + arch = "amd64" + hostname = "hostname1" + operating_system = "Ubuntu22.04" + private_ip = "10.12.25.233" + roles = ["ControlPlane"] + labels = { + "app" = "infra" + "infra" = "true" + } + }, + "hostname2" = { + arch = "amd64" + hostname = "hostname2" + operating_system = "Ubuntu22.04" + private_ip = "10.12.25.234" + roles = ["ControlPlane"] + }, + "hostname3" = { + arch = "amd64" + hostname = "hostname3" + operating_system = "Ubuntu22.04" + private_ip = "10.12.25.235" + roles = ["ControlPlane"] + }, + "hostname4" = { + arch = "amd64" + hostname = "hostname4" + operating_system = "Ubuntu22.04" + private_ip = "10.12.114.236" + roles = ["Worker"] + labels = { + "app" = "infra" + "infra" = "true" + } + taints = [ + { + effect = "NoSchedule" + key = "infra" + value = "true" + }, + { + effect = "NoSchedule" + key = "app" + value = "infra" + }, + ] + } + } + } + system_components_placement = { + node_selector = { + "app" = "infra" + "infra" = "true" + } + tolerations = [ + { + effect = "NoSchedule" + key = "infra" + operator = "Equal" + value = "true" + }, + { + effect = "NoSchedule" + key = "app" + operator = "Equal" + value = "infra" + }, + { + effect = "NoSchedule" + key = "app" + operator = "Equal" + value = "platform" + }, + ] + } + } +} +``` + + +### To upgrade the cluster + +You can change the current Kubernetes version under `spec.config.kubernetes_version` to target supported version by Rafay and also customise the upgrade behaviour with `spec.config.kubernetes_upgrade` + + + +## Schema + +**Required** + +- `metadata` (Attributes) Contains data that helps uniquely identify the cluster (see [below for nested schema](#nestedatt--metadata)) +- `spec` (Attributes) This is the desired state for the cluster and defines it's characteristics (see [below for nested schema](#nestedatt--spec)) + +**Optional** + +- `api_version` (String) Api version for the cluster. Defaults to `infra.k8smgmt.io/v3` +- `kind` (String) Kind. Defaults to `Cluster` + + + +### Nested Schema for `metadata` + +**Required** + +- `name` (String) The name of the cluster. +- `project` (String) The name of the Rafay project cluster will be created in + +**Optional** +- `description` (String) Description for the cluster +- `labels` (Map of String) Key-value pairs containing metadata and are used to identify cluster +- `annotations` (Map of String) Annotations are extra non-identifying metadata associated with Cluster + + + +### Nested Schema for `spec` + +**Required** + +- `blueprint` (Attributes) The blueprint to be used for this cluster. (see [below for nested schema](#nestedatt--spec--blueprint)) +- `config` (Attributes) "Contains cluster configuration such as Kubernetes version, network configuration, etc. (see [below for nested schema](#nestedatt--spec--config)) + + +**Optional** + +- `cloud_credentials` (String) The SSH credentials to be used run bootstrap cmds for node discovery. It's required if [spec.config.cluster_ssh](#nestedatt--spec--config--cluster_ssh) is not provided. +- `proxy` (Attributes) The proxy to be used for this cluster. (see [below for nested schema](#nestedatt--spec--proxy)) +- `sharing` (Attributes) Sharing spec to be used for sharing the cluster with projects (see [below for nested schema](#nestedatt--spec--sharing)) +- `system_components_placement` (Attributes) Option to place Rafay Managed Add-ons and core components on Nodes with matching taints and labels. (see [below for nested schema](#nestedatt--spec--system_components_placement)) +- `type` (String) The cluster type. Defaults to `mks` + + + +### Nested Schema for `spec.blueprint` + +**Required** + +- `name` (String) Name of the blueprint + +**Optional** + +- `version` (String) Version of the blueprint + + + +### Nested Schema for `spec.config` + +**Required** + +- `kubernetes_version` (String) Configure Kubernetes version of the Control Plane. +- `network` (Attributes) Contains network specification for the cluster (see [below for nested schema](#nestedatt--spec--config--network)) +- `nodes` (Attributes Map) Contains configuration for each node in the cluster (see [below for nested schema](#nestedatt--spec--config--nodes)) + +**Optional** + +- `installer_ttl` (Integer) By default, this setting allows ttl configuration for installer config. If not provided by default will set ttl to 365 days. +- `auto_approve_nodes` (Boolean) By default, this setting allows incoming nodes to be automatically approved. It is recommended to set this option to true to avoid the need for manual approval for each node. +- `cluster_ssh` (Attributes). The default SSH Local configuration to run bootstrap commands for node discovery. It's required if `spec.cloud_credentials` are not provided(see [below for nested schema](#nestedatt--spec--config--cluster_ssh)) +- `dedicated_control_plane` (Boolean) Select this option for preventing scheduling of user workloads on Control Plane nodes +- `high_availability` (Boolean) Select this option for highly available control plane. Minimum three control plane nodes are required +- `kubernetes_upgrade` (Attributes) Strategize the Kubernetes upgrade behaviour among the worker nodes (see [below for nested schema](#nestedatt--spec--config--kubernetes_upgrade)) +- `location` (String) The data center location where the cluster nodes will be launched + + +### Nested Schema for `spec.config.network` + +**Required** + +- `cni` (Attributes) CNI Specification for the cluster (see [below for nested schema](#nestedatt--spec--config--network--cni)) +- `pod_subnet` (String) Pods will be assigned IPs within this CIDR. For example: 10.244.0.0/16. +- `service_subnet` (String) Kuberenetes Services will be assigned IPs within this CIDR. For example: 10.244.0.0/16. + +**Optional** + +- `ipv6` (Attributes) Enable for Dual Stack support (see [below for nested schema](#nestedatt--spec--config--network--ipv6)) + + +### Nested Schema for `spec.config.network.cni` + +**Required** + +- `name` (String) The CNI plugin to be used in the cluster. Supported plugins are `Calico` or `Cilium` +- `version` (String) Version of the CNI Plugin + + + +### Nested Schema for `spec.config.network.ipv6` + +**Optional** + +- `pod_subnet` (String) Pods will be assigned IPs within this IPV6 CIDR. For example: 2001:db8:42:0::/56 +- `service_subnet` (String) Kuberenetes Services will be assigned IPs within this CIDR. For ex: 2001:db8:42:1::/112 + + + + +### Nested Schema for `spec.config.nodes` + +**Required** + +- `arch` (String) System Architecture of the node +- `hostname` (String) Hostname of the node +- `operating_system` (String) Operating System of the node +- `private_ip` (String) Private ip address of the node +- `roles` (Set of String) Valid roles are: 'ControlPlane', 'Worker', 'Storage' + +**Optional** + +- `interface` (String) Interface to be used on the node +- `labels` (Map of String) Use Kubernetes labels to control how workloads are scheduled to your nodes. +- `ssh` (Attributes) Override SSH Config at the node level. This is usefull when nodes within cluster come up with different SSH configuration.(see [below for nested schema](#nestedatt--spec--config--nodes--ssh)) +- `taints` (Attributes Set) A node taint lets you mark a node so that the scheduler avoids or prevents using it for certain Pods. Node taints can be used with tolerations to ensure that Pods aren't scheduled onto inappropriate nodes (see [below for nested schema](#nestedatt--spec--config--nodes--taints)) + + +### Nested Schema for `spec.config.nodes.ssh` + +**Optional** + +- `ip_address` (String) IP address to ssh into node +- `passphrase` (String) SSH Passphrase +- `port` (String) SSH Port +- `private_key_path` (String) Specify Path to SSH private key +- `username` (String) SSH Username + + + +### Nested Schema for `spec.config.nodes.taints` + +**Required** + +- `key` (String) The taint key to be applied to a node. +- `effect` (String) The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + +**Optional** + +- `value` (String) The taint value corresponding to the taint key + + + + + +### Nested Schema for `spec.config.cluster_ssh` + +**Optional** + +- `passphrase` (String) Provide ssh passphrase +- `port` (String) Provide ssh port +- `private_key_path` (String) Provide local path to the private key +- `username` (String) Provide the ssh username + + + +### Nested Schema for `spec.config.kubernetes_upgrade` + +**Required** + +- `params` (Attributes) (see [below for nested schema](#nestedatt--spec--config--kubernetes_upgrade--params)) +- `strategy` (String) Kubernetes upgrade strategy for worker nodes and Valid options are: concurrent or sequential + + +### Nested Schema for `spec.config.kubernetes_upgrade.params` + +**Required** + +- `worker_concurrency` (String) It can be number of worker nodes or percentage of worker nodes to be upgraded at the same time + + + + + +### Nested Schema for `spec.proxy` + +**Required** + +- `enabled` (Boolean) Enable this option if your infrastructure is running behind a proxy +- `http_proxy` (String) Configure proxy information with protocol, host and port information. + +**Optional** +- `allow_insecure_bootstrap` (Boolean) Select this option if proxy is terminating/inspecting TLS traffic +- `bootstrap_ca` (String) Root CA certificate of the proxy +- `https_proxy` (String) Configure proxy information with protocol, host and port information. +- `no_proxy` (String) Comma seperated list of hosts that need connectivity without proxy +- `proxy_auth` (String) + + + +### Nested Schema for `spec.sharing` + +**Required** + +- `enabled` (Boolean) Enable sharing for this resource. +- `projects` (Block List) The list of projects this resource is shared with. (see [below for nested schema](#nestedblock--spec--sharing--projects)) + + +### Nested Schema for `spec.sharing.projects` + +**Required** + +- `name` (String) The name of the project to share the resource. + + + + +### Nested Schema for `spec.system_components_placement` + +**Optional** +- `node_selector` (Map of String) Node selctors for pods that matches with node labels. +- `tolerations` (Attributes Set) Corresponding tolerations to match with Node taints (see [below for nested schema](#nestedatt--spec--system_components_placement--tolerations)) +- `daemon_set_override` (Attributes) Enabling this allows to add additional tolerations for the Rafay daemon sets to match the taints available in the nodes. (see [below for nested schema](#nestedatt--spec--system_components_placement--daemon_set_override)) + + + + + +### Nested Schema for `spec.system_components_placement.tolerations` + +- `key` (String) The taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be `Exists`; this combination means to match all values and all keys. +- `operator` (String) Operator represents a key's relationship to the value. Valid operators are `Exists` and `Equal` . Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. +- `value` (String) Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. +- `effect` (String) Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. +- `toleration_seconds` (Number) TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute) tolerates the taint + + + +### Nested Schema for `spec.system_components_placement.daemon_set_override` + +**Optional** + +- `daemon_set_tolerations` (Attributes Set) Tolerations for Rafay daemon sets (see [below for nested schema](#nestedatt--spec--system_components_placement--daemon_set_override--daemon_set_tolerations)) +- `node_selection_enabled` (Boolean) Enable to place Rafay daemon sets on nodes with matching labels only. + + +### Nested Schema for `spec.system_components_placement.daemon_set_override.daemon_set_tolerations` + +**Optional** + +- `key` (String) The taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be `Exists`; this combination means to match all values and all keys. +- `operator` (String) Operator represents a key's relationship to the value. Valid operators are `Exists` and `Equal` . Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. +- `value` (String) Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. +- `effect` (String) Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. +- `toleration_seconds` (Number) TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute) tolerates the taint diff --git a/docs/resources/resource_template.md b/docs/resources/resource_template.md index 7ca862b5..8c263703 100644 --- a/docs/resources/resource_template.md +++ b/docs/resources/resource_template.md @@ -36,6 +36,36 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { } lock_timeout_seconds = 1 } + driver { + data { + config { + type = "http" + http { + method = "GET" + endpoint = "https://jsonplaceholder.typicode.com/todos/1" + } + } + inputs { + name = "some-cc" + data { + variables { + name = "name" + value_type = "text" + value = "aws-elasticache" + options { + description = "this is the resource name to be applied" + sensitive = false + required = true + } + } + } + } + outputs = jsonencode({ + key1 = "value1" + key2 = "value2" + }) + } + } } repository_options { name = var.repo_name @@ -79,6 +109,7 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { } } on_failure = "continue" + execute_once = true } after { name = "internal-approval" @@ -204,6 +235,7 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--on_completion--options)) - `timeout_seconds` (Number) Specify the timeout in seconds - `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.on_completion.agents` @@ -280,6 +312,7 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--on_failure--options)) - `timeout_seconds` (Number) Specify the timeout in seconds - `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.on_failure.agents` @@ -355,7 +388,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--on_init--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.on_init.agents` @@ -431,7 +465,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--on_success--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.on_success.agents` @@ -541,6 +576,7 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--deploy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds - `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.deploy.plan.before.agents` @@ -617,7 +653,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--deploy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.deploy.plan.before.agents` @@ -703,7 +740,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--deploy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.deploy.plan.before.agents` @@ -782,7 +820,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--deploy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.deploy.plan.before.agents` @@ -867,7 +906,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--deploy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.deploy.plan.before.agents` @@ -945,7 +985,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--deploy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.deploy.plan.before.agents` @@ -1031,7 +1072,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--deploy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.deploy.plan.before.agents` @@ -1107,7 +1149,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--deploy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.deploy.plan.before.agents` @@ -1200,7 +1243,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--destroy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.destroy.plan.before.agents` @@ -1277,7 +1321,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--destroy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.destroy.plan.before.agents` @@ -1364,7 +1409,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--destroy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.destroy.plan.before.agents` @@ -1442,7 +1488,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--destroy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.destroy.plan.before.agents` @@ -1527,7 +1574,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--destroy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.destroy.plan.before.agents` @@ -1611,7 +1659,8 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `on_failure` (String) Specify the on failure action - `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--provider--terraform--destroy--plan--before--options)) - `timeout_seconds` (Number) Specify the timeout in seconds -- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.hooks.provider.terraform.destroy.plan.before.agents` @@ -1674,6 +1723,13 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `success_condition` (String) Specify the success condition of the request + +### Nested Schema for `spec.artifact_driver` + +***Optional*** +- `name` (String) name of the driver resource +- `data` (Block List, Max: 1) Inline workflow handler definition (see [below for nested schema](#nestedblock--spec--provider_options--driver--data)) + ### Nested Schema for `spec.provider_options` @@ -1704,6 +1760,7 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `config` (Block List, Max: 1) WorkflowHandler configuration (see [below for nested schema](#nestedblock--spec--provider_options--driver--data--config)) - `inputs` (Block List) Specify the input data (see [below for nested schema](#nestedblock--spec--provider_options--driver--data--inputs)) +- `outputs` (String) Specify the output data ### Nested Schema for `spec.provider_options.driver.data.config` @@ -1722,6 +1779,7 @@ resource "rafay_resource_template" "aws-elasticache-rt-example" { - `success_condition` (String) Specify the success condition - `timeout_seconds` (Number) Specify the timeout in seconds - `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) +- `execute_once` (Boolean) Specify if the hook should be executed only once ### Nested Schema for `spec.provider_options.driver.data.config.type` @@ -2479,19 +2537,24 @@ Optional: **Required** -- `name` (String) name of the hook -- `type` (String) Specify the type of hook, Available options are `container`, `http`, `driver`. +- `name` (String) name of the task +- `type` (String) Specify the type of task, Available options are `driver`. ***Optional*** -- `agents` (Block List) Specify the resource ref agents (see [below for nested schema](#nestedblock--spec--hooks--on_completion--agents)) -- `depends_on` (List of String) specify hook dependencies -- `description` (String) description of hook +- `agents` (Block List) Specify the resource ref agents (see [below for nested schema](#nestedblock--spec--provider_options--custom--tasks--agents)) +- `depends_on` (List of String) specify task dependencies +- `description` (String) description of task - `driver` (Block List, Max: 1) Specify the driver responsible for execution (see [below for nested schema](#nestedblock--spec--provider_options--driver)) - `on_failure` (String) Specify the on failure action -- `options` (Block List, Max: 1) Specify the hook options (see [below for nested schema](#nestedblock--spec--hooks--on_completion--options)) - `timeout_seconds` (Number) Specify the timeout in seconds + +### Nested Schema for `spec.provider_options.custom.tasks.agents` + +***Required*** + +- `name` (String) name of the agent resource ### Nested Schema for `spec.provider_options.system` diff --git a/docs/resources/workload.md b/docs/resources/workload.md index 1e3f90ec..0b601036 100644 --- a/docs/resources/workload.md +++ b/docs/resources/workload.md @@ -107,7 +107,7 @@ resource "rafay_workload" "tftestworkload3" { --- -Create a workload of K8s type by uploading from local system. +Create a workload of K8s type by uploading from local system with environment placement. ```terraform resource "rafay_workload" "tftestworkload4" { @@ -119,6 +119,9 @@ resource "rafay_workload" "tftestworkload4" { namespace = "test-workload4" placement { selector = "rafay.dev/clusterName=cluster-1" + environment { + name = "test-env" + } } version = "v0" artifact { @@ -339,13 +342,25 @@ Optional: - `labels` - (Block List; Max: 1) A list of labels for the placement. (See [below for nested schema](#nestedblock--spec--placement--labels)) +***Optional*** + +- `environment` - (String) The environment placement for workload. (See [below for nested schema](#nestedblock--spec--placement--environment)) + ### Nested Schema for `spec.placement.labels` ***Required**** - `key` - (String) The key of the placement label. -- `value` - (String) The value of the placement label. +- `value` - (String) The value of the placement label. + + + +### Nested Schema for `spec.placement.environment` + +***Required**** + +- `name` - (String) The name of the environment. diff --git a/examples/resources/rafay_aks_cluster/resource.tf b/examples/resources/rafay_aks_cluster/resource.tf index f4e0c3b2..e81187b1 100644 --- a/examples/resources/rafay_aks_cluster/resource.tf +++ b/examples/resources/rafay_aks_cluster/resource.tf @@ -45,7 +45,7 @@ resource "rafay_aks_cluster" "demo-terraform" { } } auto_upgrade_profile { - upgrade_channel = "rapid" + upgrade_channel = "rapid" node_os_upgrade_channel = "NodeImage" } } @@ -66,12 +66,12 @@ resource "rafay_aks_cluster" "demo-terraform" { type = "VirtualMachineScaleSets" vm_size = "Standard_DS2_v2" } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } maintenance_configurations { api_version = "2024-01-01" - name = "aksManagedNodeOSUpgradeSchedule" + name = "aksManagedNodeOSUpgradeSchedule" properties { maintenance_window { duration_hours = 4 @@ -89,13 +89,13 @@ resource "rafay_aks_cluster" "demo-terraform" { } maintenance_configurations { api_version = "2024-01-01" - name = "aksManagedAutoUpgradeSchedule" + name = "aksManagedAutoUpgradeSchedule" properties { maintenance_window { duration_hours = 4 schedule { weekly { - day_of_week = "Tuesday" + day_of_week = "Tuesday" interval_weeks = 1 } } @@ -166,8 +166,8 @@ resource "rafay_aks_cluster" "demo-terraform1" { type = "VirtualMachineScaleSets" vm_size = "Standard_DS2_v2" } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } node_pools { apiversion = "2024-01-01" @@ -182,8 +182,8 @@ resource "rafay_aks_cluster" "demo-terraform1" { type = "VirtualMachineScaleSets" vm_size = "Standard_DS2_v2" } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } } } @@ -345,8 +345,8 @@ resource "rafay_aks_cluster" "demo-terraform-existing-vnet" { "key" = "value" } } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } node_pools { apiversion = "2024-01-01" @@ -371,8 +371,8 @@ resource "rafay_aks_cluster" "demo-terraform-existing-vnet" { "key" = "value" } } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } } } @@ -445,8 +445,8 @@ resource "rafay_aks_cluster" "demo-terraform-scp" { } node_taints = ["app=infra:PreferNoSchedule"] } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } } } @@ -536,8 +536,8 @@ resource "rafay_aks_cluster" "demo-terraform-authType-localAccounts-k8sRBAC" { type = "VirtualMachineScaleSets" vm_size = "Standard_B4ms" } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } } } @@ -610,8 +610,8 @@ resource "rafay_aks_cluster" "demo-terraform-authType-azureAuthentication-k8sRBA type = "VirtualMachineScaleSets" vm_size = "Standard_B4ms" } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } } } @@ -684,8 +684,8 @@ resource "rafay_aks_cluster" "demo-terraform-authType-azureAuthentication-azureR type = "VirtualMachineScaleSets" vm_size = "Standard_B4ms" } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } } } @@ -769,8 +769,8 @@ resource "rafay_aks_cluster" "demo-terraform-multiple-ACR" { type = "VirtualMachineScaleSets" vm_size = "Standard_B4ms" } - type = "Microsoft.ContainerService/managedClusters/agentPools" - + type = "Microsoft.ContainerService/managedClusters/agentPools" + } } } @@ -859,10 +859,217 @@ resource "rafay_aks_cluster" "aks_cluster_azure_cni_overlay" { type = "VirtualMachineScaleSets" vm_size = "Standard_DS2_v2" } + type = "Microsoft.ContainerService/managedClusters/agentPools" + + } + } + } + } +} + +resource "rafay_aks_cluster" "demo-terraform-wi" { + apiversion = "rafay.io/v1alpha1" + kind = "Cluster" + metadata { + name = "gautham-aks-wi-4" + project = "defaultproject" + } + spec { + type = "aks" + blueprint = "minimal" + cloudprovider = "gautham-azure-creds" + cluster_config { + apiversion = "rafay.io/v1alpha1" + kind = "aksClusterConfig" + metadata { + name = "gautham-aks-wi-4" + } + spec { + resource_group_name = "gautham-rg-ci" + managed_cluster { + apiversion = "2024-01-01" + identity { + type = "SystemAssigned" + } + location = "centralindia" + properties { + api_server_access_profile { + enable_private_cluster = true + } + dns_prefix = "gautham-test-dns" + enable_rbac = true + kubernetes_version = "1.28.9" + network_profile { + network_plugin = "kubenet" + } + power_state { + code = "Running" + } + oidc_issuer_profile { + enabled = true + } + security_profile { + workload_identity { + enabled = true + } + } + } + type = "Microsoft.ContainerService/managedClusters" + } + node_pools { + apiversion = "2023-11-01" + name = "primary" + properties { + count = 2 + enable_auto_scaling = true + max_count = 2 + max_pods = 40 + min_count = 1 + mode = "System" + orchestrator_version = "1.28.9" + os_type = "Linux" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } type = "Microsoft.ContainerService/managedClusters/agentPools" - + location = "centralindia" } } } } -} \ No newline at end of file +} + +resource "rafay_aks_cluster" "demo_terraform_wi_cluster" { + apiversion = "rafay.io/v1alpha1" + kind = "Cluster" + metadata { + name = "gautham-aks-wi-tf" + project = "defaultproject" + } + spec { + type = "aks" + blueprint = "minimal" + cloudprovider = "gautham-azure-creds" + cluster_config { + apiversion = "rafay.io/v1alpha1" + kind = "aksClusterConfig" + metadata { + name = "gautham-aks-wi-tf" + } + spec { + resource_group_name = "gautham-rg-ci" + managed_cluster { + apiversion = "2024-01-01" + identity { + type = "SystemAssigned" + } + location = "centralindia" + properties { + api_server_access_profile { + enable_private_cluster = true + } + dns_prefix = "gautham-test-dns" + enable_rbac = true + kubernetes_version = "1.29.2" + network_profile { + network_plugin = "kubenet" + } + power_state { + code = "Running" + } + oidc_issuer_profile { + enabled = true + } + security_profile { + workload_identity { + enabled = true + } + } + } + type = "Microsoft.ContainerService/managedClusters" + } + node_pools { + apiversion = "2023-11-01" + name = "primary" + properties { + count = 2 + enable_auto_scaling = true + max_count = 2 + max_pods = 40 + min_count = 1 + mode = "System" + orchestrator_version = "1.29.2" + os_type = "Linux" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + type = "Microsoft.ContainerService/managedClusters/agentPools" + location = "centralindia" + } + node_pools { + apiversion = "2023-11-01" + name = "secondary" + properties { + count = 2 + enable_auto_scaling = true + max_count = 2 + max_pods = 40 + min_count = 1 + mode = "System" + orchestrator_version = "1.29.2" + os_type = "Linux" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + type = "Microsoft.ContainerService/managedClusters/agentPools" + location = "centralindia" + } + } + } + } +} + +resource "rafay_aks_workload_identity" "demo-terraform-wi" { + metadata { + cluster_name = "gautham-aks-wi-tf" + project = "defaultproject" + } + + spec { + create_identity = true + + metadata { + name = "gautham-aks-wi-tf-uai-1" + location = "centralindia" + resource_group = "gautham-rg-ci" + tags = { + "owner" = "gautham" + "department" = "gautham" + } + } + + role_assignments { + name = "Key Vault Secrets User" + scope = "/subscriptions/a2252eb2-7a25-432b-a5ec-e18eba6f26b1/resourceGroups/qa-automation/providers/Microsoft.KeyVault/vaults/gautham-rauto-kv-1" + } + + service_accounts { + create_account = true + + metadata { + name = "gautham-tf-wi-1-sa-11" + namespace = "aks-wi-ns" + annotations = { + "role" = "dev" + } + labels = { + "owner" = "gautham" + "department" = "gautham" + } + } + } + } + + depends_on = [rafay_aks_cluster.demo_terraform_wi_cluster] +} + diff --git a/examples/resources/rafay_aks_cluster_v3/resource.tf b/examples/resources/rafay_aks_cluster_v3/resource.tf index f4ad4bee..1ad42082 100644 --- a/examples/resources/rafay_aks_cluster_v3/resource.tf +++ b/examples/resources/rafay_aks_cluster_v3/resource.tf @@ -55,7 +55,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { } } auto_upgrade_profile { - upgrade_channel = "rapid" + upgrade_channel = "rapid" node_os_upgrade_channel = "NodeImage" } } @@ -79,7 +79,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { type = "Microsoft.ContainerService/managedClusters/agentPools" } maintenance_configurations { - name = "aksManagedAutoUpgradeSchedule" + name = "aksManagedAutoUpgradeSchedule" api_version = "2024-01-01" properties { maintenance_window { @@ -87,7 +87,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { schedule { weekly { interval_weeks = 1 - day_of_week = "Friday" + day_of_week = "Friday" } } start_date = "2024-07-19" @@ -98,7 +98,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { type = "Microsoft.ContainerService/managedClusters/maintenanceConfigurations" } maintenance_configurations { - name = "aksManagedNodeOSUpgradeSchedule" + name = "aksManagedNodeOSUpgradeSchedule" api_version = "2024-01-01" properties { maintenance_window { @@ -106,7 +106,7 @@ resource "rafay_aks_cluster_v3" "demo-terraform" { schedule { weekly { interval_weeks = 1 - day_of_week = "Friday" + day_of_week = "Friday" } } start_date = "2024-07-19" @@ -445,3 +445,355 @@ resource "rafay_aks_cluster_v3" "demo-terraform-tf" { } } } + +resource "rafay_aks_cluster_v3" "demo-terraform" { + metadata { + name = "gautham-aks-v3-tf-1" + project = "defaultproject" + } + spec { + type = "aks" + blueprint_config { + name = "default-aks" + } + cloud_credentials = "gautham-azure-creds" + config { + kind = "aksClusterConfig" + metadata { + name = "gautham-aks-v3-tf-1" + } + spec { + resource_group_name = "gautham-rg-ci" + managed_cluster { + api_version = "2023-11-01" + sku { + name = "Base" + tier = "Free" + } + identity { + type = "SystemAssigned" + } + location = "centralindia" + tags = { + "email" = "mvgautham@rafay.co" + "env" = "dev" + } + properties { + api_server_access_profile { + enable_private_cluster = true + } + dns_prefix = "gautham-aks-v3-tf-2401202303-dns" + kubernetes_version = "1.28.9" + network_profile { + network_plugin = "kubenet" + load_balancer_sku = "standard" + } + power_state { + code = "Running" + } + + oidc_issuer_profile { + enabled = true + } + security_profile { + workload_identity { + enabled = false + } + } + } + type = "Microsoft.ContainerService/managedClusters" + } + node_pools { + api_version = "2023-11-01" + name = "primary" + location = "centralindia" + properties { + count = 1 + enable_auto_scaling = true + max_count = 1 + max_pods = 40 + min_count = 1 + mode = "System" + orchestrator_version = "1.28.9" + os_type = "Linux" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + type = "Microsoft.ContainerService/managedClusters/agentPools" + } + } + } + } +} + +resource "rafay_aks_cluster_v3" "demo-terraform" { + metadata { + name = "gautham-tf-wi-1" + project = "defaultproject" + } + spec { + type = "aks" + blueprint_config { + name = "minimal" + } + cloud_credentials = "gautham-azure-creds" + system_components_placement { + node_selector = { + app = "infra" + dedicated = "true" + } + tolerations { + effect = "PreferNoSchedule" + key = "app" + operator = "Equal" + value = "infra" + } + daemon_set_override { + node_selection_enabled = false + tolerations { + key = "app1dedicated" + value = true + effect = "NoSchedule" + operator = "Equal" + } + } + } + config { + kind = "aksClusterConfig" + metadata { + name = "gautham-tf-wi-1" + } + spec { + resource_group_name = "gautham-rg-ci" + managed_cluster { + api_version = "2023-11-01" + sku { + name = "Base" + tier = "Free" + } + identity { + type = "SystemAssigned" + } + location = "centralindia" + tags = { + "email" = "gautham@rafay.co" + "env" = "dev" + } + properties { + api_server_access_profile { + enable_private_cluster = true + } + dns_prefix = "aks-v3-tf-2401202303-dns" + kubernetes_version = "1.28.9" + network_profile { + network_plugin = "kubenet" + load_balancer_sku = "standard" + } + power_state { + code = "Running" + } + addon_profiles { + http_application_routing { + enabled = true + } + azure_policy { + enabled = true + } + azure_keyvault_secrets_provider { + enabled = true + config { + enable_secret_rotation = false + rotation_poll_interval = "2m" + } + } + } + oidc_issuer_profile { + enabled = true + } + security_profile { + workload_identity { + enabled = true + } + } + } + type = "Microsoft.ContainerService/managedClusters" + } + node_pools { + api_version = "2023-11-01" + name = "primary" + location = "centralindia" + properties { + count = 1 + enable_auto_scaling = true + max_count = 1 + max_pods = 40 + min_count = 1 + mode = "System" + orchestrator_version = "1.28.9" + os_type = "Linux" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + type = "Microsoft.ContainerService/managedClusters/agentPools" + } + } + } + } +} + +resource "rafay_aks_cluster_v3" "demo-terraform-wi-cluster" { + metadata { + name = "gautham-tf-wi-v3" + project = "defaultproject" + } + spec { + type = "aks" + blueprint_config { + name = "minimal" + } + cloud_credentials = "gautham-azure-creds" + + config { + kind = "aksClusterConfig" + metadata { + name = "gautham-tf-wi-v3" + } + spec { + resource_group_name = "gautham-rg-ci" + managed_cluster { + api_version = "2023-11-01" + sku { + name = "Base" + tier = "Free" + } + identity { + type = "SystemAssigned" + } + location = "centralindia" + tags = { + "email" = "gautham@rafay.co" + "env" = "dev" + } + properties { + api_server_access_profile { + enable_private_cluster = true + } + dns_prefix = "aks-v3-tf-2401202303-dns" + kubernetes_version = "1.29.2" + network_profile { + network_plugin = "kubenet" + load_balancer_sku = "standard" + } + power_state { + code = "Running" + } + addon_profiles { + http_application_routing { + enabled = true + } + azure_policy { + enabled = true + } + azure_keyvault_secrets_provider { + enabled = true + config { + enable_secret_rotation = false + rotation_poll_interval = "2m" + } + } + } + oidc_issuer_profile { + enabled = true + } + security_profile { + workload_identity { + enabled = true + } + } + } + type = "Microsoft.ContainerService/managedClusters" + } + node_pools { + api_version = "2023-11-01" + name = "primary" + location = "centralindia" + properties { + count = 2 + enable_auto_scaling = true + max_count = 2 + max_pods = 40 + min_count = 2 + mode = "System" + orchestrator_version = "1.29.2" + os_type = "Linux" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + type = "Microsoft.ContainerService/managedClusters/agentPools" + } + node_pools { + api_version = "2023-11-01" + name = "secondary" + location = "centralindia" + properties { + count = 2 + enable_auto_scaling = true + max_count = 2 + max_pods = 40 + min_count = 2 + mode = "System" + orchestrator_version = "1.29.2" + os_type = "Linux" + type = "VirtualMachineScaleSets" + vm_size = "Standard_DS2_v2" + } + type = "Microsoft.ContainerService/managedClusters/agentPools" + } + } + } + } +} +resource "rafay_aks_workload_identity" "demo-terraform-wi" { + metadata { + cluster_name = "gautham-tf-wi-v3" + project = "defaultproject" + } + + spec { + create_identity = true + + metadata { + name = "gautham-tf-wi-v3-uai-1" + location = "centralindia" + resource_group = "shobhit-rg" + tags = { + "owner" = "gautham" + "department" = "gautham" + "app" = "gautham" + } + } + + role_assignments { + name = "Key Vault Secrets User" + scope = "/subscriptions/a2252eb2-7a25-432b-a5ec-e18eba6f26b1/resourceGroups/qa-automation/providers/Microsoft.KeyVault/vaults/gautham-rauto-kv-1" + } + + service_accounts { + create_account = true + + metadata { + name = "gautham-tf-wi-v3-sa-11" + namespace = "aks-wi-ns" + annotations = { + "role" = "dev" + } + labels = { + "owner" = "gautham" + "department" = "gautham" + } + } + } + } + + depends_on = [rafay_aks_cluster_v3.demo-terraform-wi-cluster] +} diff --git a/examples/resources/rafay_aks_workload_identity/providers.tf b/examples/resources/rafay_aks_workload_identity/providers.tf new file mode 100644 index 00000000..02394ea7 --- /dev/null +++ b/examples/resources/rafay_aks_workload_identity/providers.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + rafay = { + version = ">= 1.1.4" + "source" : "registry.terraform.io/RafaySystems/rafay", + } + } +} + +provider "rafay" { + provider_config_file = var.rafay_config_file +} diff --git a/examples/resources/rafay_aks_workload_identity/resource.tf b/examples/resources/rafay_aks_workload_identity/resource.tf new file mode 100644 index 00000000..92cabde7 --- /dev/null +++ b/examples/resources/rafay_aks_workload_identity/resource.tf @@ -0,0 +1,143 @@ +# Workload Identity Resource with v1 cluster resource +resource "rafay_aks_workload_identity" "demo-terraform" { + metadata { + cluster_name = "gautham-tf-wi-1" + project = "defaultproject" + } + + spec { + create_identity = true + + metadata { + name = "gautham-tf-wi-1-uai-1" + location = "centralindia" + resource_group = "gautham-rg-ci" + tags = { + "owner" = "gautham" + "department" = "gautham" + } + } + + role_assignments { + name = "Key Vault Secrets User" + scope = "/subscriptions/a2252eb2-7a25-432b-a5ec-e18eba6f26b1/resourceGroups/gautham-rg-ci/providers/Microsoft.KeyVault/vaults/gautham-keyvault" + } + + service_accounts { + create_account = true + + metadata { + name = "gautham-tf-wi-1-sa-10" + namespace = "default" + annotations = { + "role" = "dev" + } + labels = { + "owner" = "gautham" + "department" = "gautham" + } + } + } + + } + + # Depends On is mandatory for the Workload Identity Resource to be created after the cluster resource is created. + depends_on = [rafay_aks_cluster.demo-terraform-wi-cluster] +} + +# Workload Identity resource for soft creation with pre created managed identity and pre created service account +resource "rafay_aks_workload_identity" "demo-terraform" { + metadata { + cluster_name = "gautham-tf-wi-1" + project = "defaultproject" + } + + spec { + create_identity = false + + metadata { + name = "gautham-tf-wi-1-uai-1" + location = "centralindia" + resource_group = "gautham-rg-ci" + client_id = "a2252eb2-7a25-432b-a5ec-e18eba6f26b1" + principal_id = "a2252eb2-7a25-432b-a5ec-e18eba6f26b1" + tags = { + "owner" = "gautham" + "department" = "gautham" + } + } + + role_assignments { + name = "Key Vault Secrets User" + scope = "/subscriptions/a2252eb2-7a25-432b-a5ec-e18eba6f26b1/resourceGroups/gautham-rg-ci/providers/Microsoft.KeyVault/vaults/gautham-keyvault" + } + + service_accounts { + create_account = false + + metadata { + name = "gautham-tf-wi-1-sa-10" + namespace = "default" + annotations = { + "role" = "dev" + } + labels = { + "owner" = "gautham" + "department" = "gautham" + } + } + } + + } + + # Depends On is mandatory for the Workload Identity Resource to be created after the cluster resource is created. + depends_on = [rafay_aks_cluster.demo-terraform-wi-cluster] +} + +# Workload Identity Resource with v3 cluster resource +resource "rafay_aks_workload_identity" "demo-terraform" { + metadata { + cluster_name = "gautham-tf-wi-1" + project = "defaultproject" + } + + spec { + create_identity = true + + metadata { + name = "gautham-tf-wi-1-uai-1" + location = "centralindia" + resource_group = "gautham-rg-ci" + tags = { + "owner" = "gautham" + "department" = "gautham" + } + } + + role_assignments { + name = "Key Vault Secrets User" + scope = "/subscriptions/a2252eb2-7a25-432b-a5ec-e18eba6f26b1/resourceGroups/gautham-rg-ci/providers/Microsoft.KeyVault/vaults/gautham-keyvault" + } + + service_accounts { + create_account = true + + metadata { + name = "gautham-tf-wi-1-sa-10" + namespace = "default" + annotations = { + "role" = "dev" + } + labels = { + "owner" = "gautham" + "department" = "gautham" + } + } + } + } + + # Depends On is mandatory for the Workload Identity Resource to be created after the cluster resource is created. + depends_on = [rafay_aks_cluster_v3.demo-terraform-wi-cluster] +} + + diff --git a/examples/resources/rafay_aks_workload_identity/variables.tf b/examples/resources/rafay_aks_workload_identity/variables.tf new file mode 100644 index 00000000..264d4aa7 --- /dev/null +++ b/examples/resources/rafay_aks_workload_identity/variables.tf @@ -0,0 +1,5 @@ +variable "rafay_config_file" { + description = "rafay provider config file for authentication" + sensitive = true + default = "/Users/user1/.rafay/cli/config.json" +} diff --git a/examples/resources/rafay_break_glass_access/providers.tf b/examples/resources/rafay_break_glass_access/providers.tf new file mode 100644 index 00000000..7a06a6e4 --- /dev/null +++ b/examples/resources/rafay_break_glass_access/providers.tf @@ -0,0 +1,12 @@ +terraform { + required_providers { + rafay = { + version = ">= 0.1" + source = "registry.terraform.io/RafaySystems/rafay" + } + } +} + +provider "rafay" { + provider_config_file = var.rafay_config_file +} diff --git a/examples/resources/rafay_break_glass_access/resource.tf b/examples/resources/rafay_break_glass_access/resource.tf new file mode 100644 index 00000000..aa404125 --- /dev/null +++ b/examples/resources/rafay_break_glass_access/resource.tf @@ -0,0 +1,19 @@ +resource "rafay_breakglassaccess" "test_user" { + metadata { + name = "test@rafay.co" + } + spec { + groups { + group_expiry { + expiry = 7 + name = "grp3" + } + group_expiry { + expiry = 8 + name = "grp1" + start_time = "2024-09-20T08:00:00Z" + } + user_type = "local" + } + } +} diff --git a/examples/resources/rafay_break_glass_access/terraform.template.tfvars b/examples/resources/rafay_break_glass_access/terraform.template.tfvars new file mode 100644 index 00000000..3e52a819 --- /dev/null +++ b/examples/resources/rafay_break_glass_access/terraform.template.tfvars @@ -0,0 +1 @@ +provider_config_file = "" diff --git a/examples/resources/rafay_break_glass_access/variables.tf b/examples/resources/rafay_break_glass_access/variables.tf new file mode 100644 index 00000000..264d4aa7 --- /dev/null +++ b/examples/resources/rafay_break_glass_access/variables.tf @@ -0,0 +1,5 @@ +variable "rafay_config_file" { + description = "rafay provider config file for authentication" + sensitive = true + default = "/Users/user1/.rafay/cli/config.json" +} diff --git a/examples/resources/rafay_cost_profile/resource.tf b/examples/resources/rafay_cost_profile/resource.tf index bf634144..ad05cecc 100644 --- a/examples/resources/rafay_cost_profile/resource.tf +++ b/examples/resources/rafay_cost_profile/resource.tf @@ -56,6 +56,24 @@ resource "rafay_cost_profile" "tfdemocostprofile-azure" { } } } +#Example cost profile for Gcp +resource "rafay_cost_profile" "tfdemocostprofile-gcp" { + metadata { + name = "tfdemocostprofile-gcp" + project = "terraform" + } + spec { + version = "v0" + provider_type = "gcp" + installation_params { + gcp { + gcp_credentials { + cloud_credentials_name = "sample" + } + } + } + } +} #Example cost profile for Other providers resource "rafay_cost_profile" "tfdemocostprofile-other" { metadata { diff --git a/examples/resources/rafay_eks_pod_identity/provider.tf b/examples/resources/rafay_eks_pod_identity/provider.tf new file mode 100644 index 00000000..60654faf --- /dev/null +++ b/examples/resources/rafay_eks_pod_identity/provider.tf @@ -0,0 +1,24 @@ +terraform { + required_providers { + rafay = { + version = ">= 0.1" + source = "registry.terraform.io/RafaySystems/rafay" + } + } +} + +variable "rafay_config_file" { + description = "rafay provider config file for authentication" + default = "/Users/gopim/Downloads/gopi_org-gopi@rafay.co.json" + sensitive = true +} + +# variable "rafay_config_file" { +# description = "rafay provider config file for authentication" +# sensitive = true +# default = "/Users/user1/.rafay/cli/config.json" +# } + +provider "rafay" { + provider_config_file = var.rafay_config_file +} diff --git a/examples/resources/rafay_eks_pod_identity/resource.tf b/examples/resources/rafay_eks_pod_identity/resource.tf new file mode 100644 index 00000000..038bf235 --- /dev/null +++ b/examples/resources/rafay_eks_pod_identity/resource.tf @@ -0,0 +1,12 @@ +resource "rafay_eks_pod_identity" "pod_identity_1" { + metadata { + cluster_name = "eks_cluster_name" + project_name = "defaultproject" + } + spec { + service_account_name = "svc_one" + namespace = "rafay-demo" + create_service_account = true + role_arn = "arn:aws:iam::679196758854:role/rafay-eks-full" + } +} \ No newline at end of file diff --git a/examples/resources/rafay_environment_template/resource.tf b/examples/resources/rafay_environment_template/resource.tf index 1810fee7..ac27ec0b 100644 --- a/examples/resources/rafay_environment_template/resource.tf +++ b/examples/resources/rafay_environment_template/resource.tf @@ -57,6 +57,7 @@ resource "rafay_environment_template" "aws-et" { } } timeout_seconds = 1000 + execute_once = true } } agents { diff --git a/examples/resources/rafay_mks_cluster/main.tf b/examples/resources/rafay_mks_cluster/main.tf new file mode 100644 index 00000000..e688b5d1 --- /dev/null +++ b/examples/resources/rafay_mks_cluster/main.tf @@ -0,0 +1,136 @@ +terraform { + required_providers { + rafay = { + version = "1.1.28" + source = "rafay/rafay" + } + } +} + +provider "rafay" { + provider_config_file = "/Users/vihari/Downloads/rafay-org-vihari@rafay.co.json" +} + + +resource "rafay_mks_cluster" "mks-sample-cluster" { + api_version = "infra.k8smgmt.io/v3" + kind = "Cluster" + metadata = { + name = "vihari-mks-tf-cluster" + project = "defaultproject" + } + spec = { + blueprint = { + name = "minimal" + } + config = { + auto_approve_nodes = true + dedicated_control_plane = false + kubernetes_version = "v1.30.4" + installer_ttl = 365 + kubelet_extra_args = { + "max-pods" = "900" + } + kubernetes_upgrade = { + strategy = "sequential" + params = { + worker_concurrency = "50%" + } + } + network = { + cni = { + name = "Calico" + version = "3.26.1" + } + pod_subnet = "10.244.0.0/16" + service_subnet = "10.96.0.0/12" + } + nodes = { + "vih-a4" = { + arch = "amd64" + hostname = "vih-a4" + operating_system = "Ubuntu22.04" + private_ip = "10.0.0.136" + roles = ["ControlPlane", "Worker", "Storage"] + ssh = { + ip_address = "129.146.58.186" + port = "22" + private_key_path = "/Users/vihari/.ssh/vihari_oci_ssh" + username = "ubuntu" + } + }, + "vih-a5" = { + arch = "amd64" + hostname = "vih-a5" + operating_system = "Ubuntu22.04" + kubelet_extra_args = { + "max-pods" = "600" + } + private_ip = "10.0.0.183" + roles = ["Worker", "Storage"] + ssh = { + ip_address = "129.146.6.223" + port = "22" + private_key_path = "/Users/vihari/.ssh/vihari_oci_ssh" + username = "ubuntu" + } + } + } + cluster_ssh = { + port = "22" + private_key_path = "/Users/vihari/.ssh/vihari_oci_ssh" + username = "ubuntu" + } + # "hostname2" = { + # arch = "amd64" + # hostname = "hostname2" + # operating_system = "Ubuntu22.04" + # private_ip = "10.12.114.59" + # roles = ["Worker"] + # labels = { + # "app" = "infra" + # "infra" = "true" + # } + # taints = [ + # { + # effect = "NoSchedule" + # key = "infra" + # value = "true" + # }, + # { + # effect = "NoSchedule" + # key = "app" + # value = "infra" + # }, + # ] + # } + + # system_components_placement = { + # node_selector = { + # "app" = "infra" + # "infra" = "true" + # } + # tolerations = [ + # { + # effect = "NoSchedule" + # key = "infra" + # operator = "Equal" + # value = "true" + # }, + # { + # effect = "NoSchedule" + # key = "app" + # operator = "Equal" + # value = "infra" + # }, + # { + # effect = "NoSchedule" + # key = "app" + # operator = "Equal" + # value = "platform" + # }, + # ] + # } + } + } +} diff --git a/examples/resources/rafay_resource_template/resource.tf b/examples/resources/rafay_resource_template/resource.tf index 48ef2b93..8731e85d 100644 --- a/examples/resources/rafay_resource_template/resource.tf +++ b/examples/resources/rafay_resource_template/resource.tf @@ -61,6 +61,7 @@ resource "rafay_resource_template" "aws-elasticache" { } } on_failure = "continue" + execute_once = true } after { name = "internal-approval" diff --git a/go.mod b/go.mod index 9e59d74d..230e4487 100644 --- a/go.mod +++ b/go.mod @@ -1,19 +1,21 @@ module github.com/RafaySystems/terraform-provider-rafay -go 1.22.3 - -toolchain go1.22.4 +go 1.22.5 require ( - github.com/RafaySystems/edge-common v1.24.1-0.20240813083948-ba69bed0bc1b - github.com/RafaySystems/rafay-common v1.29.1-rc2.0.20240819105128-51987ba404a9 - github.com/RafaySystems/rctl v1.29.1-0.20240813175545-9dc352b7affb + github.com/RafaySystems/edge-common v1.24.1-0.20240905053610-494a83a439f8 + github.com/RafaySystems/rafay-common v1.29.1-rc2.0.20241018052314-d9046569f4e0 + github.com/RafaySystems/rctl v1.29.1-0.20240930095428-b93a8a4cfea9 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/go-yaml/yaml v2.1.0+incompatible github.com/goccy/go-yaml v1.9.5 github.com/google/go-cmp v0.6.0 - github.com/hashicorp/terraform-plugin-docs v0.9.0 + github.com/hashicorp/terraform-plugin-docs v0.19.4 + github.com/hashicorp/terraform-plugin-framework v1.11.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 + github.com/hashicorp/terraform-plugin-mux v0.16.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 + github.com/hashicorp/terraform-plugin-testing v1.10.0 github.com/julienschmidt/httprouter v1.3.0 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 @@ -23,13 +25,18 @@ require ( ) require ( + github.com/BurntSushi/toml v1.4.0 // indirect github.com/IBM/sarama v1.43.2 // indirect + github.com/Kunde21/markdownfmt/v3 v3.1.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect + github.com/RafaySystems/eaas-playground/proto v0.0.0-20240927124737-3b8ed415ca8a // indirect + github.com/RafaySystems/paas-common v0.0.0-20241008170310-7609b27cde4e // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect @@ -37,6 +44,8 @@ require ( github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gomatic/clock v1.0.0 // indirect github.com/google/gnostic v0.6.9 // indirect + github.com/hashicorp/cli v1.1.6 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/invopop/yaml v0.3.1 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect @@ -52,8 +61,14 @@ require ( github.com/prometheus/common v0.54.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/segmentio/asm v1.2.0 // indirect - golang.org/x/mod v0.18.0 // indirect + github.com/yuin/goldmark v1.7.1 // indirect + github.com/yuin/goldmark-meta v1.1.0 // indirect + go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect + golang.org/x/mod v0.19.0 // indirect golang.org/x/tools v0.22.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect k8s.io/apiextensions-apiserver v0.30.1 // indirect k8s.io/cluster-bootstrap v0.30.1 // indirect @@ -68,7 +83,6 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect - github.com/RafaySystems/eaas-playground v0.0.0-20240819062521-b1b317f32232 // indirect github.com/RoaringBitmap/roaring v1.9.4 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/armon/go-radix v1.0.0 // indirect @@ -105,7 +119,7 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -115,13 +129,13 @@ require ( github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect - github.com/hashicorp/hc-install v0.6.4 // indirect - github.com/hashicorp/hcl/v2 v2.20.1 // indirect + github.com/hashicorp/hc-install v0.8.0 // indirect + github.com/hashicorp/hcl/v2 v2.21.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-json v0.22.1 // indirect - github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect - github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.23.0 + github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -142,7 +156,6 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/mitchellh/cli v1.1.5 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect @@ -160,8 +173,7 @@ require ( github.com/processout/grpc-go-pool v1.2.2-0.20200228131710-c0fcf3af0014 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rivo/uniseg v0.4.2 // indirect - github.com/rs/xid v1.5.0 // indirect - github.com/russross/blackfriday v1.6.0 // indirect + github.com/rs/xid v1.6.0 // indirect github.com/segmentio/encoding v0.4.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/spacemonkeygo/httpsig v0.0.0-20181218213338-2605ae379e47 // indirect @@ -181,35 +193,33 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty v1.15.0 // indirect go.mongodb.org/mongo-driver v1.15.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect - golang.org/x/term v0.21.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240610135401-a8a62080eff3 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3 // indirect - google.golang.org/grpc v1.64.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/api v1.16.4 k8s.io/client-go v12.0.0+incompatible // indirect - k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect - k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect @@ -220,11 +230,11 @@ replace ( cloud.google.com/go => cloud.google.com/go v0.100.2 // github.com/RafaySystems/eaas-playground => ../eaas-playground // github.com/RafaySystems/rafay-common => ../rafay-common - // github.com/RafaySystems/rctl => github.com/RafaySystems/rctl v1.5.14 + // github.com/RafaySystems/rctl => ../rctl // github.com/RafaySystems/terraform-provider-rafay/rafay => ../rafay // github.com/RafaySystems/rctl => ../rctl github.com/RafaySystems/terraform-provider-rafay/ipnet => ../ipnet - github.com/getkin/kin-openapi => github.com/getkin/kin-openapi v0.120.0 + //github.com/getkin/kin-openapi => github.com/getkin/kin-openapi v0.120.0 github.com/go-pg/pg => github.com/go-pg/pg v6.15.1+incompatible k8s.io/api => k8s.io/api v0.26.2 k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.2 diff --git a/go.sum b/go.sum index a830e8e0..78683247 100644 --- a/go.sum +++ b/go.sum @@ -3,19 +3,21 @@ cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTB dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= +github.com/Kunde21/markdownfmt/v3 v3.1.0 h1:KiZu9LKs+wFFBQKhrZJrFZwtLnCCWJahL+S+E/3VnM0= +github.com/Kunde21/markdownfmt/v3 v3.1.0/go.mod h1:tPXN1RTyOzJwhfHoon9wUr4HGYmWgVxSQN6VBJDkrVc= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= @@ -23,14 +25,16 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= -github.com/RafaySystems/eaas-playground v0.0.0-20240819062521-b1b317f32232 h1:OyvJD5nwBHjM7UUS9nX9DNLKeq2BZfWb/Tcsz3K3DHY= -github.com/RafaySystems/eaas-playground v0.0.0-20240819062521-b1b317f32232/go.mod h1:YN2q7XActj/AHM4F+6Czsip5++l7asf/hxK8YZjrmhU= -github.com/RafaySystems/edge-common v1.24.1-0.20240813083948-ba69bed0bc1b h1:byYrgKmgq+JNPm9u/6/DJQSu8AW/TdgPhgrnzpmHBBs= -github.com/RafaySystems/edge-common v1.24.1-0.20240813083948-ba69bed0bc1b/go.mod h1:Lp/hUW/sjTyEgvhmRsnpDQ0c5ccRHHd3DQC/nbJ/jb4= -github.com/RafaySystems/rafay-common v1.29.1-rc2.0.20240819105128-51987ba404a9 h1:r5kpECouDiIcNvKqBPOe5Ly1v4oF6vcnhtDU6anlYZA= -github.com/RafaySystems/rafay-common v1.29.1-rc2.0.20240819105128-51987ba404a9/go.mod h1:s28/gkN7ii/bWnsfbnS5uxxYH1aaA/rm4JZu61rLdkM= -github.com/RafaySystems/rctl v1.29.1-0.20240813175545-9dc352b7affb h1:bozmqi6NqEYngix6hO1TnO6bMTn4dRRyJPaiOubIwO8= -github.com/RafaySystems/rctl v1.29.1-0.20240813175545-9dc352b7affb/go.mod h1:lsuKgPhI39u2kWzSOUoFI5jyWnldUif7HTrs4sVzXSc= +github.com/RafaySystems/eaas-playground/proto v0.0.0-20240927124737-3b8ed415ca8a h1:1QRFP1L8Is7nM/FTu+hKfWyJglts92rAv9mY3k0EZIU= +github.com/RafaySystems/eaas-playground/proto v0.0.0-20240927124737-3b8ed415ca8a/go.mod h1:lAle7/tiVlakfiXdJKnsf6qNdLkoWS712T4pAXytSOQ= +github.com/RafaySystems/edge-common v1.24.1-0.20240905053610-494a83a439f8 h1:Pce1iLS0oXLGmNh0VSXBfiH/u8hHd6xRJkibmL3Aan4= +github.com/RafaySystems/edge-common v1.24.1-0.20240905053610-494a83a439f8/go.mod h1:5mRn2xN25Y8mpObHyOwDB3OLudeuglzHTQX9IiNHxhM= +github.com/RafaySystems/paas-common v0.0.0-20241008170310-7609b27cde4e h1:imstxcBUw0lj9B6mwfVTF6s6wasV2XdXf4D1SJ9ilNs= +github.com/RafaySystems/paas-common v0.0.0-20241008170310-7609b27cde4e/go.mod h1:cbWiDnN8m1Jpgti3yL39uYW4LTQHHMt5KkTTSRxMcQs= +github.com/RafaySystems/rafay-common v1.29.1-rc2.0.20241018052314-d9046569f4e0 h1:Hu04hKKLP62YPgBRShh5eiPGAAYlDk7kt87H3UuZl58= +github.com/RafaySystems/rafay-common v1.29.1-rc2.0.20241018052314-d9046569f4e0/go.mod h1:eWesWjUlPDtTQ/nB/vo+NL3kkyFvlTZiqr2tNuoc+xM= +github.com/RafaySystems/rctl v1.29.1-0.20240930095428-b93a8a4cfea9 h1:7im+HSvv3QyYOxxo7ovPaeJIzWbBIQskg0T/CfkuyAw= +github.com/RafaySystems/rctl v1.29.1-0.20240930095428-b93a8a4cfea9/go.mod h1:v8nXVykruedNqmaYF5STEQJoqbvA5rC/378Of+p8XrI= github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ= github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= github.com/abhay-krishna/cluster-api v1.4.2-eksa.1 h1:mO+idOdh9Bpumgo41WJcrASPtJGSgmRxHNiwtUdUa+E= @@ -41,7 +45,6 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= @@ -59,6 +62,8 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= +github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bufbuild/protocompile v0.10.0 h1:+jW/wnLMLxaCEG8AX9lD0bQ5v9h1RUiMKOBOT5ll9dM= github.com/bufbuild/protocompile v0.10.0/go.mod h1:G9qQIQo0xZ6Uyj6CMNz0saGmx2so+KONo8/KrELABiY= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= @@ -101,7 +106,6 @@ github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lSh github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= @@ -115,8 +119,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/getkin/kin-openapi v0.120.0 h1:MqJcNJFrMDFNc07iwE8iFC5eT2k/NPUFDIpNeiZv8Jg= -github.com/getkin/kin-openapi v0.120.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= +github.com/getkin/kin-openapi v0.123.0 h1:zIik0mRwFNLyvtXK274Q6ut+dPh6nlxBp0x7mNrPhs8= +github.com/getkin/kin-openapi v0.123.0/go.mod h1:wb1aSZA/iWmorQP9KTAS/phLj/t17B5jT7+fS8ed9NM= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -238,8 +242,10 @@ github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8= +github.com/hashicorp/cli v1.1.6/go.mod h1:MPon5QYlgjjo0BSoAiN0ESeT5fRzDjVRp+uioJ0piz4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -257,30 +263,40 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= -github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= -github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= -github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= +github.com/hashicorp/hc-install v0.8.0 h1:LdpZeXkZYMQhoKPCecJHlKvUkQFixN/nvyR1CdfOLjI= +github.com/hashicorp/hc-install v0.8.0/go.mod h1:+MwJYjDfCruSD/udvBmRB22Nlkwwkwf5sAB6uTIhSaU= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-docs v0.9.0 h1:CEu7NToNWRR2os6DfT/Du2s+8qzXHyIcZQ10oiMdbJs= -github.com/hashicorp/terraform-plugin-docs v0.9.0/go.mod h1:47ZcsxMUJxAjGzHf+dZ9q78oYf4PeJxO1N+i5XDtXBc= +github.com/hashicorp/terraform-plugin-docs v0.19.4 h1:G3Bgo7J22OMtegIgn8Cd/CaSeyEljqjH3G39w28JK4c= +github.com/hashicorp/terraform-plugin-docs v0.19.4/go.mod h1:4pLASsatTmRynVzsjEhbXZ6s7xBlUw/2Kt0zfrq8HxA= +github.com/hashicorp/terraform-plugin-framework v1.11.0 h1:M7+9zBArexHFXDx/pKTxjE6n/2UCXY6b8FIq9ZYhwfE= +github.com/hashicorp/terraform-plugin-framework v1.11.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 h1:bxZfGo9DIUoLLtHMElsu+zwqI4IsMZQBRRy4iLzZJ8E= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0/go.mod h1:wGeI02gEhj9nPANU62F2jCaHjXulejm/X+af4PdZaNo= github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-plugin-mux v0.16.0 h1:RCzXHGDYwUwwqfYYWJKBFaS3fQsWn/ZECEiW7p2023I= +github.com/hashicorp/terraform-plugin-mux v0.16.0/go.mod h1:PF79mAsPc8CpusXPfEVa4X8PtkB+ngWoiUClMrNZlYo= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= +github.com/hashicorp/terraform-plugin-testing v1.10.0 h1:2+tmRNhvnfE4Bs8rB6v58S/VpqzGC6RCh9Y8ujdn+aw= +github.com/hashicorp/terraform-plugin-testing v1.10.0/go.mod h1:iWRW3+loP33WMch2P/TEyCxxct/ZEcCGMquSLSCVsrc= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -288,8 +304,6 @@ github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv2 github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -356,13 +370,11 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -372,8 +384,6 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= -github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -434,7 +444,6 @@ github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdL github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/processout/grpc-go-pool v1.2.2-0.20200228131710-c0fcf3af0014 h1:ZBlVnM3IzLIMpdsq8aZNB9f6jPe61avIHNlfc4+HqJI= @@ -454,12 +463,10 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= @@ -542,23 +549,29 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= -github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +github.com/yuin/goldmark v1.7.1 h1:3bajkSilaCbjdKVsKdZjZCLBNPL9pYzrCakKaf4U49U= +github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark-meta v1.1.0 h1:pWw+JLHGZe8Rk0EGsMVssiNb/AaPMHfSRszZeUeiOUc= +github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBvXi1lBb2VP0= +github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= +github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +go.abhg.dev/goldmark/frontmatter v0.2.0 h1:P8kPG0YkL12+aYk2yU3xHv4tcXzeVnN+gU0tJ5JnxRw= +go.abhg.dev/goldmark/frontmatter v0.2.0/go.mod h1:XqrEkZuM57djk7zrlRUB02x8I5J0px76YjkOzhB4YlU= go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -569,17 +582,17 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM= +golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -589,8 +602,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -619,15 +632,15 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -636,8 +649,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -669,15 +682,15 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -687,8 +700,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -734,10 +747,10 @@ google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20240610135401-a8a62080eff3 h1:8RTI1cmuvdY9J7q/jpJWEj5UfgWjhV5MCoXaYmwLBYQ= google.golang.org/genproto v0.0.0-20240610135401-a8a62080eff3/go.mod h1:qb66gsewNb7Ghv1enkhJiRfYGWUklv3n6G8UvprOhzA= -google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 h1:QW9+G6Fir4VcRXVH8x3LilNAb6cxBGLa6+GM4hRwexE= -google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3 h1:9Xyg6I9IWQZhRVfCWjKK+l6kI0jHcPesVlMnT//aHNo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -750,8 +763,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -805,12 +818,12 @@ k8s.io/cluster-bootstrap v0.26.2 h1:CePewHLcXZ+HXaAUstQz/rG7QsEn444UbiOePiphOzs= k8s.io/cluster-bootstrap v0.26.2/go.mod h1:8a7gP7UJjfrrSeRE1XVI/gadaN21tMfCcATuRXYBvDk= k8s.io/component-base v0.26.2 h1:IfWgCGUDzrD6wLLgXEstJKYZKAFS2kO+rBRi0p3LqcI= k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= -k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo= mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s= diff --git a/internal/provider/mks_cluster_data_source.go b/internal/provider/mks_cluster_data_source.go new file mode 100644 index 00000000..2a7a8ded --- /dev/null +++ b/internal/provider/mks_cluster_data_source.go @@ -0,0 +1,549 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/RafaySystems/rafay-common/pkg/hub/client/options" + typed "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" + fw "github.com/RafaySystems/terraform-provider-rafay/internal/resource_mks_cluster" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ datasource.DataSource = &MksClusterDataSource{} + +func NewMksClusterDataSource() datasource.DataSource { + return &MksClusterDataSource{} +} + +func MksClusterDataSourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "api_version": schema.StringAttribute{ + Computed: true, + Description: "api version", + }, + "kind": schema.StringAttribute{ + Computed: true, + Description: "kind", + }, + "metadata": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "annotations": schema.MapAttribute{ + ElementType: types.StringType, + Computed: true, + Description: "annotations of the resource", + }, + "description": schema.StringAttribute{ + Computed: true, + Description: "description of the resource", + }, + "labels": schema.MapAttribute{ + ElementType: types.StringType, + Computed: true, + Description: "labels of the resource", + }, + "name": schema.StringAttribute{ + Required: true, + Description: "name of the resource", + }, + "project": schema.StringAttribute{ + Required: true, + Description: "Project of the resource", + }, + }, + CustomType: fw.MetadataType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.MetadataValue{}.AttributeTypes(ctx), + }, + }, + Required: true, + Description: "metadata of the resource", + }, + "spec": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "blueprint": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "version": schema.StringAttribute{ + Computed: true, + Description: "Version of the blueprint", + }, + }, + CustomType: fw.BlueprintType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.BlueprintValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "cloud_credentials": schema.StringAttribute{ + Computed: true, + Description: "The credentials to be used to ssh into the Clusster Nodes", + }, + "config": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "auto_approve_nodes": schema.BoolAttribute{ + Computed: true, + Description: "Auto approves incoming nodes by default", + }, + "cluster_ssh": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "passphrase": schema.StringAttribute{ + Computed: true, + Description: "Provide ssh passphrase", + }, + "port": schema.StringAttribute{ + Computed: true, + Description: "Provide ssh port", + }, + "private_key_path": schema.StringAttribute{ + Computed: true, + Description: "Provide local path to the private key", + }, + "username": schema.StringAttribute{ + Computed: true, + Description: "Provide the ssh username", + }, + }, + CustomType: fw.ClusterSshType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.ClusterSshValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "SSH config for all the nodes within the cluster", + }, + "dedicated_control_plane": schema.BoolAttribute{ + Computed: true, + Description: "Select this option for preventing scheduling of user workloads on Control Plane nodes", + }, + "high_availability": schema.BoolAttribute{ + Computed: true, + Description: "Select this option for highly available control plane. Minimum three control plane nodes are required", + }, + "kubernetes_upgrade": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "params": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "worker_concurrency": schema.StringAttribute{ + Computed: true, + Description: "It can be number or percentage", + }, + }, + CustomType: fw.ParamsType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.ParamsValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "strategy": schema.StringAttribute{ + Computed: true, + Description: "Kubernetes upgrade strategy for worker nodes and Valid options are: concurrent/sequential", + }, + }, + CustomType: fw.KubernetesUpgradeType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.KubernetesUpgradeValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "kubernetes_version": schema.StringAttribute{ + Computed: true, + Description: "Kubernetes version of the Control Plane", + }, + "installer_ttl": schema.Int64Attribute{ + Computed: true, + Description: "Installer TTL Configuration", + }, + "location": schema.StringAttribute{ + Computed: true, + Description: "The data center location where the cluster nodes will be launched", + }, + "network": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "cni": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + Description: "Provide the CNI name, e.g., Calico or Cilium", + }, + "version": schema.StringAttribute{ + Computed: true, + Description: "Provide the CNI version, e.g., 3.26.1", + }, + }, + CustomType: fw.CniType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.CniValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "MKS Cluster CNI Specification", + }, + "ipv6": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "pod_subnet": schema.StringAttribute{ + Computed: true, + Description: "Kubernetes pod subnet", + }, + "service_subnet": schema.StringAttribute{ + Computed: true, + Description: "Kubernetes service subnet", + }, + }, + CustomType: fw.Ipv6Type{ + ObjectType: types.ObjectType{ + AttrTypes: fw.Ipv6Value{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "pod_subnet": schema.StringAttribute{ + Computed: true, + Description: "Kubernetes pod subnet", + }, + "service_subnet": schema.StringAttribute{ + Computed: true, + Description: "Kubernetes service subnet", + }, + }, + CustomType: fw.NetworkType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.NetworkValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "MKS Cluster Network Specification", + }, + "nodes": schema.MapNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "arch": schema.StringAttribute{ + Computed: true, + Description: "System Architecture of the node", + }, + "hostname": schema.StringAttribute{ + Computed: true, + Description: "Hostname of the node", + }, + "interface": schema.StringAttribute{ + Computed: true, + Description: "Interface to be used on the node", + }, + "labels": schema.MapAttribute{ + ElementType: types.StringType, + Computed: true, + Description: "labels to be added to the node", + }, + "operating_system": schema.StringAttribute{ + Computed: true, + Description: "OS of the node", + }, + "private_ip": schema.StringAttribute{ + Computed: true, + Description: "Private ip address of the node", + }, + "roles": schema.SetAttribute{ + ElementType: types.StringType, + Computed: true, + Description: "Valid roles are: 'ControlPlane', 'Worker', 'Storage'", + }, + "ssh": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "ip_address": schema.StringAttribute{ + Computed: true, + Description: "Use this to override node level ssh details", + }, + "passphrase": schema.StringAttribute{ + Computed: true, + Description: "SSH Passphrase", + }, + "port": schema.StringAttribute{ + Computed: true, + Description: "SSH Port", + }, + "private_key_path": schema.StringAttribute{ + Computed: true, + Description: "Specify Path to SSH private key", + }, + "username": schema.StringAttribute{ + Computed: true, + Description: "SSH Username", + }, + }, + CustomType: fw.SshType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.SshValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "MKS Node SSH definition", + }, + "taints": schema.SetNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "effect": schema.StringAttribute{ + Computed: true, + }, + "key": schema.StringAttribute{ + Computed: true, + }, + "value": schema.StringAttribute{ + Computed: true, + }, + }, + CustomType: fw.TaintsType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.TaintsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "taints to be added to the node", + }, + }, + CustomType: fw.NodesType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.NodesValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "holds node configuration for the cluster", + }, + }, + CustomType: fw.ConfigType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.ConfigValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "MKS V3 cluster specification", + }, + "proxy": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "allow_insecure_bootstrap": schema.BoolAttribute{ + Computed: true, + }, + "bootstrap_ca": schema.StringAttribute{ + Computed: true, + }, + "enabled": schema.BoolAttribute{ + Computed: true, + }, + "http_proxy": schema.StringAttribute{ + Computed: true, + }, + "https_proxy": schema.StringAttribute{ + Computed: true, + }, + "no_proxy": schema.StringAttribute{ + Computed: true, + }, + "proxy_auth": schema.StringAttribute{ + Computed: true, + }, + }, + CustomType: fw.ProxyType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.ProxyValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "sharing": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + }, + "projects": schema.SetNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + }, + CustomType: fw.ProjectsType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.ProjectsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + }, + }, + CustomType: fw.SharingType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.SharingValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "system_components_placement": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "daemon_set_override": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "daemon_set_tolerations": schema.SetNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "effect": schema.StringAttribute{ + Computed: true, + }, + "key": schema.StringAttribute{ + Computed: true, + }, + "operator": schema.StringAttribute{ + Computed: true, + }, + "toleration_seconds": schema.Int64Attribute{ + Computed: true, + }, + "value": schema.StringAttribute{ + Computed: true, + }, + }, + CustomType: fw.DaemonSetTolerationsType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.DaemonSetTolerationsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + }, + "node_selection_enabled": schema.BoolAttribute{ + Computed: true, + }, + }, + CustomType: fw.DaemonSetOverrideType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.DaemonSetOverrideValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "node_selector": schema.MapAttribute{ + ElementType: types.StringType, + Computed: true, + }, + "tolerations": schema.SetNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "effect": schema.StringAttribute{ + Computed: true, + }, + "key": schema.StringAttribute{ + Computed: true, + }, + "operator": schema.StringAttribute{ + Computed: true, + }, + "toleration_seconds": schema.Int64Attribute{ + Computed: true, + }, + "value": schema.StringAttribute{ + Computed: true, + }, + }, + CustomType: fw.TolerationsType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.TolerationsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + }, + }, + CustomType: fw.SystemComponentsPlacementType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.SystemComponentsPlacementValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + Description: "The type of the cluster this spec corresponds to", + }, + }, + CustomType: fw.SpecType{ + ObjectType: types.ObjectType{ + AttrTypes: fw.SpecValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "cluster specification", + }, + }, + } +} + +// MksClusterDataSource defines the data source implementation of MksClusterResource. +type MksClusterDataSource struct { + client typed.Client +} + +func (d *MksClusterDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_mks_cluster" +} + +func (d *MksClusterDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = MksClusterDataSourceSchema(ctx) +} + +func (d *MksClusterDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(typed.Client) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = client +} + +func (d *MksClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data fw.MksClusterModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + // Fetch the cluster from the Hub + hub, err := d.client.InfraV3().Cluster().Get(ctx, options.GetOptions{ + Name: data.Metadata.Name.ValueString(), + Project: data.Metadata.Project.ValueString(), + }) + if err != nil { + resp.Diagnostics.AddError("Failed to fetch data", err.Error()) + return + } + // convert the hub respo into the TF model + resp.Diagnostics.Append(fw.ConvertMksClusterFromHub(ctx, hub, &data)...) + if resp.Diagnostics.HasError() { + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/internal/provider/mks_cluster_data_source_test.go b/internal/provider/mks_cluster_data_source_test.go new file mode 100644 index 00000000..7e5c4dd4 --- /dev/null +++ b/internal/provider/mks_cluster_data_source_test.go @@ -0,0 +1,37 @@ +package provider + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +// TestAccMksClusterDataSource tests the data source for the Rafay MKS cluster + +func TestAccMksClusterDataSource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testFwProviderFactories, + Steps: []resource.TestStep{ + // Read testing + { + Config: testProviderConfig + testMksClusterDataSource(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.rafay_mks_cluster.mks-example-cluster", "metadata.name", "mks-example-cluster"), + resource.TestCheckResourceAttr("data.rafay_mks_cluster.mks-example-cluster", "metadata.project", "defaultproject"), + ), + }, + }, + }) +} + +// Helper function to return the initial configuration +func testMksClusterDataSource() string { + return ` +data "rafay_mks_cluster" "mks-example-cluster" { + metadata = { + name = "mks-example-cluster" + project = "defaultproject" + } +} +` +} diff --git a/internal/provider/mks_cluster_resource.go b/internal/provider/mks_cluster_resource.go new file mode 100644 index 00000000..efb16d4c --- /dev/null +++ b/internal/provider/mks_cluster_resource.go @@ -0,0 +1,224 @@ +package provider + +import ( + "context" + "fmt" + "strings" + "time" + + typed "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" + "github.com/RafaySystems/rctl/pkg/cluster" + + "github.com/RafaySystems/rafay-common/pkg/hub/client/options" + "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + + fw "github.com/RafaySystems/terraform-provider-rafay/internal/resource_mks_cluster" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var ( + _ resource.Resource = &MksClusterResource{} + _ resource.ResourceWithConfigure = &MksClusterResource{} + _ resource.ResourceWithImportState = &MksClusterResource{} + _ resource.ResourceWithConfigValidators = &MksClusterResource{} +) + +func NewMksClusterResource() resource.Resource { + return &MksClusterResource{} +} + +// MksClusterResource defines the resource implemSharentation. +type MksClusterResource struct { + client typed.Client +} + +func (r *MksClusterResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_mks_cluster" +} + +func (r *MksClusterResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = fw.MksClusterResourceSchema(ctx) +} + +func (r *MksClusterResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + client, ok := req.ProviderData.(typed.Client) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *typed.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + // Save the client for use in CRUD operations + r.client = client +} + +func (r *MksClusterResource) ConfigValidators(_ context.Context) []resource.ConfigValidator { + return []resource.ConfigValidator{ + resourcevalidator.AtLeastOneOf( + path.MatchRoot("spec").AtName("config").AtName("cluster_ssh"), + path.MatchRoot("spec").AtName("cloud_credentials"), + ), + } +} + +func (r *MksClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Read Terraform plan data into the model + var data fw.MksClusterModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + // Convert the Terraform model to a Hub model + hub, daig := fw.ConvertMksClusterToHub(ctx, data) + if daig.HasError() { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create cluster, got error: %s", daig)) + return + } + + // Create the cluster + err := cluster.ApplyMksV3Cluster(ctx, r.client, hub) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create cluster, got error: %s", err)) + return + } + + // Wait for the cluster operation to complete + ticker := time.NewTicker(time.Duration(60) * time.Second) + defer ticker.Stop() + timeout := time.After(time.Duration(90) * time.Minute) + daig = fw.WaitForClusterApplyOperation(ctx, r.client, hub, timeout, ticker) + + if daig.HasError() { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to create cluster, got error: %s", daig)) + return + } + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func (r *MksClusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Read Terraform prior state data into the model + var state fw.MksClusterModel + + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + + if resp.Diagnostics.HasError() { + return + } + + // Read the cluster from the Hub + c, err := r.client.InfraV3().Cluster().Get(ctx, options.GetOptions{ + Name: state.Metadata.Name.ValueString(), + Project: state.Metadata.Project.ValueString(), + }) + + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to read the cluster, got error: %s", err)) + return + } + + // Convert the Hub model to a Terraform model + daigs := fw.ConvertMksClusterFromHub(ctx, c, &state) + if daigs.HasError() { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to convert the cluster, got error: %s", daigs)) + return + } + // Save the refreshed state into Terraform + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + +} + +func (r *MksClusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Read Terraform plan data into the model + var plan fw.MksClusterModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + // Convert the Terraform model to a Hub model + hub, daigs := fw.ConvertMksClusterToHub(ctx, plan) + if daigs.HasError() { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to update the cluster, got error: %s", daigs)) + return + } + + // Call the Hub to Apply the cluster + err := cluster.ApplyMksV3Cluster(ctx, r.client, hub) + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to update cluster, got error: %s", err)) + return + } + + // Wait for the cluster operation to complete + ticker := time.NewTicker(time.Duration(60) * time.Second) + defer ticker.Stop() + timeout := time.After(time.Duration(90) * time.Minute) + daigs = fw.WaitForClusterApplyOperation(ctx, r.client, hub, timeout, ticker) + + if daigs.HasError() { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to update cluster, got error: %s", daigs)) + return + } + + // Save updated data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *MksClusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Read Terraform prior state data into the model + var data fw.MksClusterModel + resp.Diagnostics.Append(req.State.Get(ctx, &data)...) + + err := r.client.InfraV3().Cluster().Delete(ctx, options.DeleteOptions{ + Name: data.Metadata.Name.ValueString(), + Project: data.Metadata.Project.ValueString(), + }) + + if err != nil { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete cluster, got error: %s", err)) + return + } + + // Wait for the cluster deletion to be completed + ticker := time.NewTicker(time.Duration(60) * time.Second) + defer ticker.Stop() + + timeout := time.After(time.Duration(30) * time.Minute) + daigs := fw.WaitForClusterDeleteOperation(ctx, r.client, data.Metadata.Name.ValueString(), data.Metadata.Project.ValueString(), timeout, ticker) + + if daigs.HasError() { + resp.Diagnostics.AddError("Client Error", fmt.Sprintf("Unable to delete the cluster, got error: %s", daigs)) + return + } +} + +func (r *MksClusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, "/") + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: name/project. Got: %q", req.ID), + ) + return + } + + name := idParts[0] + project := idParts[1] + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("metadata").AtName("name"), name)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("metadata").AtName("project"), project)...) +} diff --git a/internal/provider/mks_cluster_resource_test.go b/internal/provider/mks_cluster_resource_test.go new file mode 100644 index 00000000..0138b1d5 --- /dev/null +++ b/internal/provider/mks_cluster_resource_test.go @@ -0,0 +1,288 @@ +package provider + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/tfversion" +) + +// Todo: Figure out way to automate bringing up oci instance for testing + +func TestAccMksClusterResource(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: testFwProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + // https://developer.hashicorp.com/terraform/plugin/framework/provider-servers#protocol-version + tfversion.SkipBelow(tfversion.Version1_1_0), + }, + Steps: []resource.TestStep{ + { // Create and Read testing + Config: testMksClusterResourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("rafay_mks_cluster.mks-example-cluster", "metadata.name", "mks-example-cluster"), + resource.TestCheckResourceAttr("rafay_mks_cluster.mks-example-cluster", "metadata.project", "defaultproject"), + resource.TestCheckResourceAttr("rafay_mks_cluster.mks-example-cluster", "spec.config.dedicated_control_plane", "true"), + resource.TestCheckResourceAttr("rafay_mks_cluster.mks-example-cluster", "spec.config.high_availability", "false"), + ), + }, + // Update and Read testing + { + Config: testMksClusterResourceConfigUpdated(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("rafay_mks_cluster.mks-example-cluster", "metadata.name", "mks-example-cluster"), + resource.TestCheckResourceAttr("rafay_mks_cluster.mks-example-cluster", "metadata.project", "defaultproject"), + resource.TestCheckResourceAttr("rafay_mks_cluster.mks-example-cluster", "spec.config.dedicated_control_plane", "true"), + resource.TestCheckResourceAttr("rafay_mks_cluster.mks-example-cluster", "spec.config.high_availability", "false"), + ), + }, + + // Delete testing automatically occurs in TestCase + }, + }) +} + +// Helper function to return the initial configuration +// Cluster Configuration: No Ha Dedeicated Control Plane with one worker node +// Bring up OCI instances and provide the node details +func testMksClusterResourceConfig() string { + return ` +resource "rafay_mks_cluster" "mks-example-cluster" { + api_version = "infra.k8smgmt.io/v3" + kind = "Cluster" + + metadata = { + annotations = { + "key2" = "value2" + } + description = "This is a sample MKS cluster." + display_name = "mks-example-cluster" + labels = { + "env" = "development" + } + name = "mks-example-cluster" + project = "defaultproject" + } + + spec = { + blueprint = { + name = "minimal" + } + cloud_credentials = "mks-ssh-creds" + config = { + auto_approve_nodes = true + dedicated_control_plane = true + high_availability = false + kubernetes_version = "v1.28.9" + kubernetes_upgrade = { + strategy = "sequential" + params = { + worker_concurrency = "50%" + } + } + network = { + cni = { + name = "Calico" + version = "3.26.1" + } + pod_subnet = "10.244.0.0/16" + service_subnet = "10.96.0.0/12" + } + + nodes = { + "mks-sample-cp-node" = { + arch = "amd64" + hostname = "mks-sample-cp-node" + operating_system = "Ubuntu22.04" + private_ip = "10.12.1.148" + roles = ["ControlPlane"] + }, + "mks-sample-w-node" = { + arch = "amd64" + hostname = "mks-sample-w-node" + operating_system = "Ubuntu22.04" + private_ip = "10.12.50.133" + roles = ["Worker"] + labels = { + "app" = "infra" + "infra" = "true" + } + taints = [ + { + effect = "NoSchedule" + key = "app" + value = "infra" + }, + { + effect = "NoSchedule" + key = "infra" + value = "true" + } + ] + } + } + } + system_components_placement = { + daemon_set_override = { + daemon_set_tolerations = [ + { + effect = "NoSchedule" + key = "app" + operator = "Equal" + value = "infra" + }, + { + effect = "NoSchedule" + key = "infra" + operator = "Equal" + value = "true" + }, + ] + node_selection_enabled = true + } + node_selector = { + "app" = "infra" + "infra" = "true" + } + tolerations = [ + { + effect = "NoSchedule" + key = "app" + operator = "Equal" + value = "infra" + }, + { + effect = "NoSchedule" + key = "infra" + operator = "Equal" + value = "true" + }, + ] + } + type = "mks" + } +} +` +} + +// Helper function to return the updated configuration +// Update the kubernetes_version to v1.29.4 +func testMksClusterResourceConfigUpdated() string { + return ` +resource "rafay_mks_cluster" "mks-example-cluster" { + api_version = "infra.k8smgmt.io/v3" + kind = "Cluster" + + metadata = { + annotations = { + "key2" = "value2" + } + description = "This is a sample MKS cluster." + display_name = "mks-example-cluster" + labels = { + "env" = "development" + } + name = "mks-example-cluster" + project = "defaultproject" + } + + spec = { + blueprint = { + name = "minimal" + } + cloud_credentials = "vasu-mks-ssh-010" + config = { + auto_approve_nodes = true + dedicated_control_plane = true + high_availability = false + kubernetes_version = "v1.29.4" + kubernetes_upgrade = { + strategy = "sequential" + params = { + worker_concurrency = "50%" + } + } + network = { + cni = { + name = "Calico" + version = "3.26.1" + } + pod_subnet = "10.244.0.0/16" + service_subnet = "10.96.0.0/12" + } + + nodes = { + "mks-sample-cp-node" = { + arch = "amd64" + hostname = "mks-sample-cp-node" + operating_system = "Ubuntu22.04" + private_ip = "10.12.1.148" + roles = ["ControlPlane"] + }, + "mks-sample-w-node" = { + arch = "amd64" + hostname = "mks-sample-w-node" + operating_system = "Ubuntu22.04" + private_ip = "10.12.50.133" + roles = ["Worker"] + labels = { + "app" = "infra" + "infra" = "true" + } + taints = [ + { + effect = "NoSchedule" + key = "app" + value = "infra" + }, + { + effect = "NoSchedule" + key = "infra" + value = "true" + } + ] + } + } + } + system_components_placement = { + daemon_set_override = { + daemon_set_tolerations = [ + { + effect = "NoSchedule" + key = "app" + operator = "Equal" + value = "infra" + }, + { + effect = "NoSchedule" + key = "infra" + operator = "Equal" + value = "true" + }, + ] + node_selection_enabled = true + } + node_selector = { + "app" = "infra" + "infra" = "true" + } + tolerations = [ + { + effect = "NoSchedule" + key = "app" + operator = "Equal" + value = "infra" + }, + { + effect = "NoSchedule" + key = "infra" + operator = "Equal" + value = "true" + }, + ] + } + type = "mks" + } +} +` +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go new file mode 100644 index 00000000..b14133e3 --- /dev/null +++ b/internal/provider/provider.go @@ -0,0 +1,170 @@ +package provider + +import ( + "context" + "crypto/tls" + "net/http" + "os/user" + "path/filepath" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + + "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" + config "github.com/RafaySystems/rctl/pkg/config" + rctlcontext "github.com/RafaySystems/rctl/pkg/context" + + "github.com/RafaySystems/rctl/pkg/versioninfo" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure RafayFwProvider satisfies terraform framework provider interfaces. +var _ provider.Provider = &RafayFwProvider{} + +const TF_USER_AGENT = "terraform" + +// RafayFwProvider defines the provider implementation using framework. +type RafayFwProvider struct { + version string +} + +func New(version string) func() provider.Provider { + return func() provider.Provider { + return &RafayFwProvider{ + version: version, + } + } +} + +type RafayFwProviderModel struct { + ProviderConfigFile types.String `tfsdk:"provider_config_file"` + IgnoreInsecureTlsError basetypes.BoolValue `tfsdk:"ignore_insecure_tls_error"` +} + +func (p *RafayFwProvider) Schema(ctx context.Context, req provider.SchemaRequest, resp *provider.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "provider_config_file": schema.StringAttribute{ + Optional: true, + }, + "ignore_insecure_tls_error": schema.BoolAttribute{ + Optional: true, + }, + }, + } + +} + +func (p *RafayFwProvider) Metadata(ctx context.Context, req provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "rafay" + resp.Version = p.version +} + +func (p *RafayFwProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + + var data RafayFwProviderModel + + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + + if resp.Diagnostics.HasError() { + return + } + + configFile := data.ProviderConfigFile.ValueString() + ignoreTlsError := data.IgnoreInsecureTlsError + + tflog.Info(ctx, "rafay provider config file", map[string]interface{}{ + "config_file": configFile, + }) + + cliCtx := rctlcontext.GetContext() + + if configFile != "" { + var err error + + configFile = strings.TrimSpace(configFile) + if configFile[0] == '~' { + configFile, err = expandHomeDir(configFile) + } else { + configFile, err = filepath.Abs(configFile) + } + + if err == nil { + tflog.Info(ctx, "rafay provider config file absolute path", map[string]interface{}{ + "config_file": configFile, + }) + configPath := filepath.Dir(configFile) + fileName := filepath.Base(configFile) + cliCtx.ConfigFile = fileName + cliCtx.ConfigDir = configPath + } else { + tflog.Error(ctx, "failed to get rafay provider config absolute path", map[string]interface{}{ + "error": err, + }) + tflog.Info(ctx, "provider will use default config file ~/.rafay/cli/config.json") + } + } else { + tflog.Info(ctx, "provider will use default config file ~/.rafay/cli/config.json") + } + + err := config.InitConfig(cliCtx) + + if err != nil { + tflog.Error(ctx, "rafay provider config init error", map[string]interface{}{ + "error": err.Error(), + }) + resp.Diagnostics.AddError( + "Unable to create rafay provider", + "Unable to init config for authenticated rafay provider: "+err.Error(), + ) + return + } + + if ignoreTlsError.ValueBool() { + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + } + + auth := config.GetConfig().GetAppAuthProfile() + client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent()) + + if err != nil { + resp.Diagnostics.AddError("Unable to initialise the Client, Error", err.Error()) + return + } + // Save the client in the provider data + resp.ResourceData = client + resp.DataSourceData = client + +} + +func (p *RafayFwProvider) DataSources(ctx context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + NewMksClusterDataSource, + } +} + +func (p *RafayFwProvider) Resources(ctx context.Context) []func() resource.Resource { + return []func() resource.Resource{ + // Register new resource here + NewMksClusterResource, + } +} + +func expandHomeDir(path string) (string, error) { + if len(path) == 0 || path[0] != '~' { + return path, nil + } + + usr, err := user.Current() + if err != nil { + return "", err + } + return filepath.Join(usr.HomeDir, path[1:]), nil +} diff --git a/internal/provider/provider_test.go b/internal/provider/provider_test.go new file mode 100644 index 00000000..99a4c9f6 --- /dev/null +++ b/internal/provider/provider_test.go @@ -0,0 +1,31 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" +) + +const ( + // testProviderConfig is a shared configuration to combine with the actual + // test configuration so the Rafay client is properly configured. + // It is also possible to use the RCTL_ environment variables instead, + // such as updating the Makefile and running the testing through that tool. + + testProviderConfig = ` +provider "rafay" { + provider_config_file = "~/.rafay/cli/config.json" + ignore_insecure_tls_error = true +} +` +) + +var ( + // testFwProviderFactories are used to instantiate a provider during + // acceptance testing. The factory function will be invoked for every Terraform + // CLI command executed to create a provider server to which the CLI can + // reattach. + + testFwProviderFactories = map[string]func() (tfprotov6.ProviderServer, error){ + "rafay": providerserver.NewProtocol6WithError(New("test")()), + } +) diff --git a/internal/resource_mks_cluster/mks_cluster_resource_ext.go b/internal/resource_mks_cluster/mks_cluster_resource_ext.go new file mode 100644 index 00000000..3c8bd69c --- /dev/null +++ b/internal/resource_mks_cluster/mks_cluster_resource_ext.go @@ -0,0 +1,1052 @@ +// Contains the conversion methods to convert the Terraform types to the Hub types and vice versa +// For each Tf type, we're extending it with ToHub and FromHub signature +// The state of each type is set to known after the conversion + +package resource_mks_cluster + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/RafaySystems/rafay-common/proto/types/hub/commonpb" + "github.com/RafaySystems/rafay-common/proto/types/hub/infrapb" + v1 "k8s.io/api/core/v1" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + + dynamic "github.com/RafaySystems/rafay-common/pkg/hub/client/dynamic" + typed "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" + + "github.com/RafaySystems/rafay-common/pkg/hub/client/options" +) + +// Utility functions to handle conversion of Terraform types to native types +func getStringValue(tfString types.String) string { + if tfString.IsNull() || tfString.IsUnknown() { + return "" + } + return tfString.ValueString() +} + +func getBoolValue(tfBool types.Bool) bool { + if tfBool.IsNull() || tfBool.IsUnknown() { + return false + } + return tfBool.ValueBool() +} + +func getInt64Value(tfInt types.Int64) int64 { + if tfInt.IsNull() || tfInt.IsUnknown() { + return 0 + } + return tfInt.ValueInt64() +} + +func convertFromTfMap(tfMap types.Map) map[string]string { + result := make(map[string]string) + + if tfMap.IsNull() || tfMap.IsUnknown() { + return result + } + for k, v := range tfMap.Elements() { + result[k] = getStringValue(v.(types.String)) + } + return result +} + +func convertToTfMap(goMap map[string]string) types.Map { + elements := make(map[string]attr.Value) + + for k, v := range goMap { + elements[k] = basetypes.NewStringValue(v) + } + tfMap, _ := basetypes.NewMapValue(types.StringType, elements) + return tfMap + +} + +func (v MetadataValue) ToHub(ctx context.Context) (*commonpb.Metadata, diag.Diagnostics) { + var diags diag.Diagnostics + + var hub commonpb.Metadata + + hub.Name = getStringValue(v.Name) + hub.Project = getStringValue(v.Project) + + if !v.Annotations.IsNull() && !v.Annotations.IsUnknown() { + hub.Annotations = convertFromTfMap(v.Annotations) + } + + if !v.Description.IsNull() && !v.Description.IsUnknown() { + hub.Description = getStringValue(v.Description) + } + + if !v.Labels.IsNull() && !v.Labels.IsUnknown() { + hub.Labels = convertFromTfMap(v.Labels) + } + + return &hub, diags +} + +func (v MetadataValue) FromHub(ctx context.Context, hub *commonpb.Metadata) (MetadataValue, diag.Diagnostics) { + + var diags diag.Diagnostics + + v.Name = types.StringValue(hub.Name) + v.Project = types.StringValue(hub.Project) + + if hub.Description != "" { + v.Description = types.StringValue(hub.Description) + } + + if hub.Annotations != nil { + v.Annotations = convertToTfMap(hub.Annotations) + } + if hub.Labels != nil { + v.Labels = convertToTfMap(hub.Labels) + } + + v.state = attr.ValueStateKnown + return v, diags +} + +func (v CniValue) ToHub(ctx context.Context) (*infrapb.Cni, diag.Diagnostics) { + hub := &infrapb.Cni{} + + hub.Name = getStringValue(v.Name) + hub.Version = getStringValue(v.Version) + return hub, nil +} + +func (v CniValue) FromHub(ctx context.Context, hub *infrapb.Cni) (basetypes.ObjectValue, diag.Diagnostics) { + // Convert the hub object to terraform object + v.Name = types.StringValue(hub.Name) + v.Version = types.StringValue(hub.Version) + + v.state = attr.ValueStateKnown + return v.ToObjectValue(ctx) +} + +func (v Ipv6Value) ToHub(ctx context.Context) (*infrapb.MksSubnet, diag.Diagnostics) { + var hub infrapb.MksSubnet + + hub.PodSubnet = getStringValue(v.PodSubnet) + hub.ServiceSubnet = getStringValue(v.ServiceSubnet) + + return &hub, nil +} + +func (v Ipv6Value) FromHub(ctx context.Context, hub *infrapb.MksSubnet) (basetypes.ObjectValue, diag.Diagnostics) { + // Convert the hub object to terraform object + if hub.PodSubnet != "" { + v.PodSubnet = types.StringValue(hub.PodSubnet) + } + if hub.ServiceSubnet != "" { + v.ServiceSubnet = types.StringValue(hub.ServiceSubnet) + } + + v.state = attr.ValueStateKnown + return v.ToObjectValue(ctx) +} + +func (v NetworkValue) ToHub(ctx context.Context) (*infrapb.MksClusterNetworking, diag.Diagnostics) { + var cniType CniType + + var diags, d diag.Diagnostics + + // Get the value from the object + tfCni, d := cniType.ValueFromObject(ctx, v.Cni) + if diags.HasError() { + diags = append(diags, d...) + return nil, diags + } + + hub := &infrapb.MksClusterNetworking{} + hub.Cni, d = tfCni.(CniValue).ToHub(ctx) + diags = append(diags, d...) + + hub.PodSubnet = getStringValue(v.PodSubnet) + hub.ServiceSubnet = getStringValue(v.ServiceSubnet) + + if !v.Ipv6.IsNull() && !v.Ipv6.IsUnknown() { + // Handle IPv6 + var ipv6Type Ipv6Type + tfIpv6Value, d := ipv6Type.ValueFromObject(ctx, v.Ipv6) + if diags.HasError() { + diags = append(diags, d...) + return hub, diags + } + hub.Ipv6, d = tfIpv6Value.(Ipv6Value).ToHub(ctx) + diags = append(diags, d...) + + } + + return hub, diags +} + +func (v NetworkValue) FromHub(ctx context.Context, hub *infrapb.MksClusterNetworking) (basetypes.ObjectValue, diag.Diagnostics) { + var diags, d diag.Diagnostics + + tfCni, d := NewCniValue(v.Cni.AttributeTypes(ctx), v.Cni.Attributes()) + if d.HasError() { + tfCni = NewCniValueNull() + } + v.Cni, d = tfCni.FromHub(ctx, hub.Cni) + diags = append(diags, d...) + + v.PodSubnet = types.StringValue(hub.PodSubnet) + v.ServiceSubnet = types.StringValue(hub.ServiceSubnet) + + // Handle IPv6 + if hub.Ipv6 != nil { + tfIpv6, d := NewIpv6Value(v.Ipv6.AttributeTypes(ctx), v.Ipv6.Attributes()) + if d.HasError() { + tfIpv6 = NewIpv6ValueNull() + } + v.Ipv6, diags = tfIpv6.FromHub(ctx, hub.Ipv6) + diags = append(diags, d...) + } else { + v.Ipv6, d = NewIpv6ValueNull().ToObjectValue(ctx) + diags = append(diags, d...) + } + + v.state = attr.ValueStateKnown + + obj, d := v.ToObjectValue(ctx) + diags = append(diags, d...) + + return obj, diags + +} + +func (v TaintsValue) ToHub(ctx context.Context) (*v1.Taint, diag.Diagnostics) { + var hub v1.Taint + + hub.Effect = v1.TaintEffect(getStringValue(v.Effect)) + hub.Key = getStringValue(v.Key) + hub.Value = getStringValue(v.Value) + return &hub, nil +} + +func (v TaintsValue) FromHub(ctx context.Context, hub *v1.Taint) (TaintsValue, diag.Diagnostics) { + // Convert the hub object to terraform object + if hub.Effect != "" { + v.Effect = types.StringValue(string(hub.Effect)) + } + if hub.Key != "" { + v.Key = types.StringValue(hub.Key) + } + if hub.Value != "" { + v.Value = types.StringValue(hub.Value) + } + + v.state = attr.ValueStateKnown + return v, nil +} + +func (v TolerationsValue) ToHub(ctx context.Context) (*v1.Toleration, diag.Diagnostics) { + var hub v1.Toleration + + hub.Effect = v1.TaintEffect(getStringValue(v.Effect)) + hub.Key = getStringValue(v.Key) + hub.Value = getStringValue(v.Value) + hub.Operator = v1.TolerationOperator(getStringValue(v.Operator)) + hub.TolerationSeconds = v.TolerationSeconds.ValueInt64Pointer() + + return &hub, nil +} + +func (v TolerationsValue) FromHub(ctx context.Context, hub *v1.Toleration) (TolerationsValue, diag.Diagnostics) { + // Convert the hub object to terraform object + if hub.Effect != "" { + v.Effect = types.StringValue(string(hub.Effect)) + } + if hub.Key != "" { + v.Key = types.StringValue(hub.Key) + } + if hub.Value != "" { + v.Value = types.StringValue(hub.Value) + } + if hub.Operator != "" { + v.Operator = types.StringValue(string(hub.Operator)) + } + if hub.TolerationSeconds != nil { + v.TolerationSeconds = types.Int64PointerValue(hub.TolerationSeconds) + } + + v.state = attr.ValueStateKnown + return v, nil +} + +func (v DaemonSetTolerationsValue) ToHub(ctx context.Context) (*v1.Toleration, diag.Diagnostics) { + var hub v1.Toleration + + hub.Effect = v1.TaintEffect(getStringValue(v.Effect)) + hub.Key = getStringValue(v.Key) + hub.Value = getStringValue(v.Value) + hub.Operator = v1.TolerationOperator(getStringValue(v.Operator)) + hub.TolerationSeconds = v.TolerationSeconds.ValueInt64Pointer() + + return &hub, nil +} + +func (v DaemonSetTolerationsValue) FromHub(ctx context.Context, hub *v1.Toleration) (DaemonSetTolerationsValue, diag.Diagnostics) { + // Convert the hub object to terraform object + if hub.Effect != "" { + v.Effect = types.StringValue(string(hub.Effect)) + } + if hub.Key != "" { + v.Key = types.StringValue(hub.Key) + } + if hub.Value != "" { + v.Value = types.StringValue(hub.Value) + } + if hub.Operator != "" { + v.Operator = types.StringValue(string(hub.Operator)) + } + if hub.TolerationSeconds != nil { + v.TolerationSeconds = types.Int64PointerValue(hub.TolerationSeconds) + } + + v.state = attr.ValueStateKnown + return v, nil +} + +func (v DaemonSetOverrideValue) ToHub(ctx context.Context) (*infrapb.DaemonSetOverride, diag.Diagnostics) { + var diags diag.Diagnostics + hub := &infrapb.DaemonSetOverride{} + + hub.NodeSelectionEnabled = getBoolValue(v.NodeSelectionEnabled) + + for _, toleration := range v.DaemonSetTolerations.Elements() { + h, d := toleration.(DaemonSetTolerationsValue).ToHub(ctx) + diags = append(diags, d...) + hub.Tolerations = append(hub.Tolerations, h) + } + + return hub, diags +} + +func (v DaemonSetOverrideValue) FromHub(ctx context.Context, hub *infrapb.DaemonSetOverride) (basetypes.ObjectValue, diag.Diagnostics) { + // Convert the hub object to terraform object + var diags, d diag.Diagnostics + + if hub.NodeSelectionEnabled { + v.NodeSelectionEnabled = types.BoolValue(hub.NodeSelectionEnabled) + } + + var tfDaemonSetTolerations []attr.Value + + tfDaemonSetTolerationsType := DaemonSetTolerationsType{ + ObjectType: types.ObjectType{ + AttrTypes: DaemonSetTolerationsValue{}.AttributeTypes(ctx), + }, + } + + if hub.Tolerations != nil { + // loop through the hub tolerations and convert them to terraform tolerations + for _, hub := range hub.Tolerations { + tfDsTol := &DaemonSetTolerationsValue{} + h, d := tfDsTol.FromHub(ctx, hub) + diags = append(diags, d...) + tfDaemonSetTolerations = append(tfDaemonSetTolerations, h) + } + + v.DaemonSetTolerations, d = types.SetValue(tfDaemonSetTolerationsType, tfDaemonSetTolerations) + diags = append(diags, d...) + } else { + v.DaemonSetTolerations = types.SetNull(tfDaemonSetTolerationsType) + } + + v.state = attr.ValueStateKnown + obj, d := v.ToObjectValue(ctx) + diags = append(diags, d...) + + return obj, diags +} + +func (v ProxyValue) ToHub(ctx context.Context) (*infrapb.ClusterProxy, diag.Diagnostics) { + + hub := &infrapb.ClusterProxy{} + hub.AllowInsecureBootstrap = getBoolValue(v.AllowInsecureBootstrap) + hub.BootstrapCA = getStringValue(v.BootstrapCa) + hub.Enabled = getBoolValue(v.Enabled) + hub.HttpProxy = getStringValue(v.HttpProxy) + hub.HttpsProxy = getStringValue(v.HttpsProxy) + hub.NoProxy = getStringValue(v.NoProxy) + hub.ProxyAuth = getStringValue(v.ProxyAuth) + + return hub, nil +} + +func (v ProxyValue) FromHub(ctx context.Context, hub *infrapb.ClusterProxy) (basetypes.ObjectValue, diag.Diagnostics) { + // Convert the hub object to terraform object + if hub.AllowInsecureBootstrap { + + v.AllowInsecureBootstrap = types.BoolValue(hub.AllowInsecureBootstrap) + } + if hub.BootstrapCA != "" { + v.BootstrapCa = types.StringValue(hub.BootstrapCA) + } + if hub.Enabled { + v.Enabled = types.BoolValue(hub.Enabled) + } + if hub.HttpProxy != "" { + v.HttpProxy = types.StringValue(hub.HttpProxy) + } + if hub.HttpsProxy != "" { + v.HttpsProxy = types.StringValue(hub.HttpsProxy) + } + if hub.NoProxy != "" { + v.NoProxy = types.StringValue(hub.NoProxy) + } + if hub.ProxyAuth != "" { + v.ProxyAuth = types.StringValue(hub.ProxyAuth) + } + + v.state = attr.ValueStateKnown + return v.ToObjectValue(ctx) +} + +func (v SharingValue) ToHub(ctx context.Context) (*infrapb.Sharing, diag.Diagnostics) { + + hub := &infrapb.Sharing{} + hub.Enabled = getBoolValue(v.Enabled) + + for _, project := range v.Projects.Elements() { + hub.Projects = append(hub.Projects, &infrapb.Projects{ + Name: getStringValue(project.(types.String)), + }) + } + + return hub, nil +} + +func (v SharingValue) FromHub(ctx context.Context, hub *infrapb.Sharing) (basetypes.ObjectValue, diag.Diagnostics) { + // Convert the hub object to terraform object + + v.Enabled = types.BoolValue(hub.Enabled) + + var tfProjects []attr.Value + for _, project := range hub.Projects { + tfProjects = append(tfProjects, types.StringValue(project.Name)) + + } + v.Projects, _ = types.SetValue(types.StringType, tfProjects) + + v.state = attr.ValueStateKnown + return v.ToObjectValue(ctx) +} + +func (v SystemComponentsPlacementValue) ToHub(ctx context.Context) (*infrapb.SystemComponentsPlacement, diag.Diagnostics) { + var diags diag.Diagnostics + hub := &infrapb.SystemComponentsPlacement{} + + hub.NodeSelector = convertFromTfMap(v.NodeSelector) + + for _, toleration := range v.Tolerations.Elements() { + h, d := toleration.(TolerationsValue).ToHub(ctx) + diags = append(diags, d...) + hub.Tolerations = append(hub.Tolerations, h) + } + + if !v.DaemonSetOverride.IsNull() && !v.DaemonSetOverride.IsUnknown() { + var daemonSetType DaemonSetOverrideType + tfDaemonSetValue, d := daemonSetType.ValueFromObject(ctx, v.DaemonSetOverride) + if d.HasError() { + diags = append(diags, d...) + return hub, diags + } + hub.DaemonSetOverride, diags = tfDaemonSetValue.(DaemonSetOverrideValue).ToHub(ctx) + } + + return hub, diags +} + +func (v SystemComponentsPlacementValue) FromHub(ctx context.Context, hub *infrapb.SystemComponentsPlacement) (basetypes.ObjectValue, diag.Diagnostics) { + + var diags, d diag.Diagnostics + if hub.NodeSelector != nil { + v.NodeSelector = convertToTfMap(hub.NodeSelector) + } + var tfTolerations []attr.Value + + tfTolerationsType := TolerationsType{ + ObjectType: types.ObjectType{ + AttrTypes: TolerationsValue{}.AttributeTypes(ctx), + }, + } + + if hub.Tolerations != nil { + // loop through the hub tolerations and convert them to terraform tolerations + for _, hub := range hub.Tolerations { + tfTol := &TolerationsValue{} + h, d := tfTol.FromHub(ctx, hub) + diags = append(diags, d...) + tfTolerations = append(tfTolerations, h) + } + + v.Tolerations, d = types.SetValue(tfTolerationsType, tfTolerations) + diags = append(diags, d...) + } else { + v.Tolerations = types.SetNull(tfTolerationsType) + } + + if hub.DaemonSetOverride != nil { + tfDaemonSetOverride, d := NewDaemonSetOverrideValue(v.DaemonSetOverride.AttributeTypes(ctx), v.DaemonSetOverride.Attributes()) + if d.HasError() { + tfDaemonSetOverride = NewDaemonSetOverrideValueNull() + } + v.DaemonSetOverride, d = tfDaemonSetOverride.FromHub(ctx, hub.DaemonSetOverride) + diags = append(diags, d...) + } else { + v.DaemonSetOverride, d = NewDaemonSetOverrideValueNull().ToObjectValue(ctx) + diags = append(diags, d...) + } + + v.state = attr.ValueStateKnown + + obj, d := v.ToObjectValue(ctx) + diags = append(diags, d...) + + return obj, diags +} + +func (v ClusterSshValue) ToHub(ctx context.Context) (*infrapb.MksClusterSshConfig, diag.Diagnostics) { + hub := &infrapb.MksClusterSshConfig{} + + hub.PrivateKeyPath = getStringValue(v.PrivateKeyPath) + hub.Username = getStringValue(v.Username) + hub.Port = getStringValue(v.Port) + hub.Passphrase = getStringValue(v.Passphrase) + + return hub, nil +} + +func (v ClusterSshValue) FromHub(ctx context.Context, hub *infrapb.MksClusterSshConfig) (basetypes.ObjectValue, diag.Diagnostics) { + + if hub.PrivateKeyPath != "" { + v.PrivateKeyPath = types.StringValue(hub.PrivateKeyPath) + } + if hub.Username != "" { + v.Username = types.StringValue(hub.Username) + } + if hub.Port != "" { + v.Port = types.StringValue(hub.Port) + } + if hub.Passphrase != "" { + v.Passphrase = types.StringValue(hub.Passphrase) + } + + v.state = attr.ValueStateKnown + return v.ToObjectValue(ctx) +} + +func (v SshValue) ToHub(ctx context.Context) (*infrapb.MksNodeSshConfig, diag.Diagnostics) { + hub := &infrapb.MksNodeSshConfig{} + + hub.IpAddress = getStringValue(v.IpAddress) + hub.Passphrase = getStringValue(v.Passphrase) + hub.Port = getStringValue(v.Port) + hub.PrivateKeyPath = getStringValue(v.PrivateKeyPath) + hub.Username = getStringValue(v.Username) + + return hub, nil +} + +func (v SshValue) FromHub(ctx context.Context, hub *infrapb.MksNodeSshConfig) (basetypes.ObjectValue, diag.Diagnostics) { + if hub.IpAddress != "" { + v.IpAddress = types.StringValue(hub.IpAddress) + } + if hub.Passphrase != "" { + v.Passphrase = types.StringValue(hub.Passphrase) + } + if hub.Port != "" { + v.Port = types.StringValue(hub.Port) + } + if hub.PrivateKeyPath != "" { + v.PrivateKeyPath = types.StringValue(hub.PrivateKeyPath) + } + if hub.Username != "" { + v.Username = types.StringValue(hub.Username) + } + + v.state = attr.ValueStateKnown + return v.ToObjectValue(ctx) +} + +func (v NodesValue) ToHub(ctx context.Context) (*infrapb.MksNode, diag.Diagnostics) { + var diags diag.Diagnostics + + hub := &infrapb.MksNode{} + + hub.Arch = getStringValue(v.Arch) + hub.Hostname = getStringValue(v.Hostname) + hub.OperatingSystem = getStringValue(v.OperatingSystem) + hub.PrivateIP = getStringValue(v.PrivateIp) + + hub.Interface = v.Interface.ValueString() + + for _, role := range v.Roles.Elements() { + hub.Roles = append(hub.Roles, getStringValue(role.(types.String))) + } + + hub.Labels = convertFromTfMap(v.Labels) + + for _, taint := range v.Taints.Elements() { + h, d := taint.(TaintsValue).ToHub(ctx) + diags = append(diags, d...) + hub.Taints = append(hub.Taints, h) + } + + if !v.Ssh.IsNull() && !v.Ssh.IsUnknown() { + var sshType SshType + tfSshValue, d := sshType.ValueFromObject(ctx, v.Ssh) + if d.HasError() { + diags = append(diags, d...) + return hub, diags + } + hub.Ssh, d = tfSshValue.(SshValue).ToHub(ctx) + diags = append(diags, d...) + } + + return hub, diags +} + +func (v NodesValue) FromHub(ctx context.Context, hub *infrapb.MksNode) (NodesValue, diag.Diagnostics) { + + var diags, d diag.Diagnostics + + v.Arch = types.StringValue(hub.Arch) + v.Hostname = types.StringValue(hub.Hostname) + v.OperatingSystem = types.StringValue(hub.OperatingSystem) + v.PrivateIp = types.StringValue(hub.PrivateIP) + + if hub.Interface != "" { + v.Interface = types.StringValue(hub.Interface) + } + + var tfRoles []attr.Value + for _, role := range hub.Roles { + tfRoles = append(tfRoles, types.StringValue(role)) + } + + v.Roles, d = types.SetValue(types.StringType, tfRoles) + diags = append(diags, d...) + + v.Labels = convertToTfMap(hub.Labels) + + var tfTaints []attr.Value + + tfTaintsType := TaintsType{ + ObjectType: types.ObjectType{ + AttrTypes: TaintsValue{}.AttributeTypes(ctx), + }, + } + + if hub.Taints != nil { + // loop through the hub taints and convert them to terraform taints + for _, hub := range hub.Taints { + tfTaint := &TaintsValue{} + h, d := tfTaint.FromHub(ctx, hub) + diags = append(diags, d...) + tfTaints = append(tfTaints, h) + } + + v.Taints, d = types.SetValue(tfTaintsType, tfTaints) + diags = append(diags, d...) + } else { + v.Taints = types.SetNull(tfTaintsType) + } + + if hub.Ssh != nil { + tfSsh, d := NewSshValue(v.Ssh.AttributeTypes(ctx), v.Ssh.Attributes()) + if d.HasError() { + tfSsh = NewSshValueNull() + } + v.Ssh, d = tfSsh.FromHub(ctx, hub.Ssh) + diags = append(diags, d...) + } else { + v.Ssh, d = NewSshValueNull().ToObjectValue(ctx) + diags = append(diags, d...) + } + + v.state = attr.ValueStateKnown + + return v, diags +} + +func (v ConfigValue) ToHub(ctx context.Context) (*infrapb.MksV3ConfigObject, diag.Diagnostics) { + var diags, d diag.Diagnostics + + hub := &infrapb.MksV3ConfigObject{} + + hub.Location = getStringValue(v.Location) + hub.AutoApproveNodes = getBoolValue(v.AutoApproveNodes) + hub.DedicatedControlPlane = getBoolValue(v.DedicatedControlPlane) + hub.HighAvailability = getBoolValue(v.HighAvailability) + hub.KubernetesVersion = getStringValue(v.KubernetesVersion) + hub.InstallerTtl = getInt64Value(v.InstallerTtl) + + var networkType NetworkType + + tfNetworkValue, d := networkType.ValueFromObject(ctx, v.Network) + if d.HasError() { + diags = append(diags, d...) + return hub, diags + } + + hub.Network, d = tfNetworkValue.(NetworkValue).ToHub(ctx) + diags = append(diags, d...) + + for tfHostName, node := range v.Nodes.Elements() { + h, d := node.(NodesValue).ToHub(ctx) + diags = append(diags, d...) + if tfHostName != h.Hostname { + diags.AddAttributeError(path.Root(fmt.Sprintf("spec.config.nodes.%s", tfHostName)), + "Mismatch in the node configuration", + "We strongly enforce using same hostname as key in node configuration") + } + hub.Nodes = append(hub.Nodes, h) + } + + if !v.ClusterSsh.IsNull() && !v.ClusterSsh.IsUnknown() { + var sshType ClusterSshType + tfSshValue, d := sshType.ValueFromObject(ctx, v.ClusterSsh) + if d.HasError() { + diags = append(diags, d...) + return hub, diags + } + hub.Ssh, d = tfSshValue.(ClusterSshValue).ToHub(ctx) + diags = append(diags, d...) + } + + return hub, diags + +} + +func (v ConfigValue) FromHub(ctx context.Context, hub *infrapb.MksV3ConfigObject) (basetypes.ObjectValue, diag.Diagnostics) { + var diags, d diag.Diagnostics + + if hub.Location != "" { + v.Location = types.StringValue(hub.Location) + } + if hub.AutoApproveNodes { + v.AutoApproveNodes = types.BoolValue(hub.AutoApproveNodes) + } + if hub.DedicatedControlPlane { + v.DedicatedControlPlane = types.BoolValue(hub.DedicatedControlPlane) + } + if hub.HighAvailability { + v.HighAvailability = types.BoolValue(hub.HighAvailability) + } + + v.InstallerTtl = types.Int64Value(hub.InstallerTtl) + v.KubernetesVersion = types.StringValue(hub.KubernetesVersion) + + network, d := NewNetworkValue(v.Network.AttributeTypes(ctx), v.Network.Attributes()) + if d.HasError() { + network = NewNetworkValueNull() + } + v.Network, d = network.FromHub(ctx, hub.Network) + diags = append(diags, d...) + + tfNodeMap := make(map[string]NodesValue) + for _, node := range v.Nodes.Elements() { + tfNode := node.(NodesValue) + tfNodeMap[getStringValue(tfNode.Hostname)] = tfNode + } + + newTfNodes := make(map[string]attr.Value) + // Compare the nodes in the hub and terraform + for _, hubNode := range hub.Nodes { + tfNode, ok := tfNodeMap[hubNode.Hostname] + if !ok { + tfNode = NewNodesValueNull() + } + h, d := tfNode.FromHub(ctx, hubNode) + diags = append(diags, d...) + newTfNodes[hubNode.Hostname] = h + } + + tfNodeType := NodesType{ + ObjectType: types.ObjectType{ + AttrTypes: NodesValue{}.AttributeTypes(ctx), + }, + } + + v.Nodes, d = types.MapValue(tfNodeType, newTfNodes) + diags = append(diags, d...) + + if hub.Ssh != nil { + tfSsh, d := NewClusterSshValue(v.ClusterSsh.AttributeTypes(ctx), v.ClusterSsh.Attributes()) + if d.HasError() { + tfSsh = NewClusterSshValueNull() + } + v.ClusterSsh, d = tfSsh.FromHub(ctx, hub.Ssh) + diags = append(diags, d...) + } else { + v.ClusterSsh, d = NewClusterSshValueNull().ToObjectValue(ctx) + diags = append(diags, d...) + } + + v.state = attr.ValueStateKnown + + obj, d := v.ToObjectValue(ctx) + diags = append(diags, d...) + + return obj, diags +} + +func (v BlueprintValue) ToHub(ctx context.Context) (*infrapb.ClusterBlueprint, diag.Diagnostics) { + hub := &infrapb.ClusterBlueprint{} + + hub.Name = getStringValue(v.Name) + hub.Version = getStringValue(v.Version) + + return hub, nil +} + +func (v BlueprintValue) FromHub(ctx context.Context, hub *infrapb.ClusterBlueprint) (basetypes.ObjectValue, diag.Diagnostics) { + v.Name = types.StringValue(hub.Name) + v.Version = types.StringValue(hub.Version) + + v.state = attr.ValueStateKnown + return v.ToObjectValue(ctx) +} + +func ConvertMksClusterToHub(ctx context.Context, v MksClusterModel) (*infrapb.Cluster, diag.Diagnostics) { + var diags, d diag.Diagnostics + + hub := &infrapb.Cluster{} + + hub.Kind = getStringValue(v.Kind) + hub.ApiVersion = getStringValue(v.ApiVersion) + + hub.Metadata, d = v.Metadata.ToHub(ctx) + diags = append(diags, d...) + + hub.Spec = &infrapb.ClusterSpec{} + hub.Spec.Type = getStringValue(v.Spec.SpecType) + + var blueprintType BlueprintType + + tfBlueprintValue, d := blueprintType.ValueFromObject(ctx, v.Spec.Blueprint) + if d.HasError() { + diags = append(diags, d...) + return hub, diags + } + + hub.Spec.Blueprint, d = tfBlueprintValue.(BlueprintValue).ToHub(ctx) + diags = append(diags, d...) + + if !v.Spec.CloudCredentials.IsNull() && !v.Spec.CloudCredentials.IsUnknown() { + hub.Spec.CloudCredentials = getStringValue(v.Spec.CloudCredentials) + } + + if !v.Spec.Sharing.IsNull() && !v.Spec.Sharing.IsUnknown() { + var sharingType SharingType + tfSharingValue, d := sharingType.ValueFromObject(ctx, v.Spec.Sharing) + if d.HasError() { + diags = append(diags, d...) + return hub, diags + } + hub.Spec.Sharing, d = tfSharingValue.(SharingValue).ToHub(ctx) + diags = append(diags, d...) + } + + if !v.Spec.SystemComponentsPlacement.IsNull() && !v.Spec.SystemComponentsPlacement.IsUnknown() { + var scpType SystemComponentsPlacementType + scp, d := scpType.ValueFromObject(ctx, v.Spec.SystemComponentsPlacement) + if d.HasError() { + diags = append(diags, d...) + return hub, diags + } + hub.Spec.SystemComponentsPlacement, d = scp.(SystemComponentsPlacementValue).ToHub(ctx) + diags = append(diags, d...) + } + + if !v.Spec.Proxy.IsNull() && !v.Spec.Proxy.IsUnknown() { + var proxyType ProxyType + tfProxyValue, d := proxyType.ValueFromObject(ctx, v.Spec.Proxy) + if d.HasError() { + diags = append(diags, d...) + return hub, diags + } + hub.Spec.Proxy, d = tfProxyValue.(ProxyValue).ToHub(ctx) + diags = append(diags, d...) + } + + var configType ConfigType + + tfConfigValue, d := configType.ValueFromObject(ctx, v.Spec.Config) + if d.HasError() { + diags = append(diags, d...) + return hub, diags + } + + mksConfig, d := tfConfigValue.(ConfigValue).ToHub(ctx) + diags = append(diags, d...) + + hub.Spec.Config = &infrapb.ClusterSpec_Mks{ + Mks: mksConfig, + } + + return hub, diags +} + +func ConvertMksClusterFromHub(ctx context.Context, hub *infrapb.Cluster, tf *MksClusterModel) diag.Diagnostics { + // Convert the hub object to terraform object + var diags, d diag.Diagnostics + + tf.Kind = types.StringValue(hub.Kind) + tf.ApiVersion = types.StringValue(hub.ApiVersion) + + tf.Metadata, d = tf.Metadata.FromHub(ctx, hub.Metadata) + diags = append(diags, d...) + + tf.Spec.SpecType = types.StringValue(hub.Spec.Type) + + if hub.Spec.Blueprint != nil { + tfBlueprint, d := NewBlueprintValue(tf.Spec.Blueprint.AttributeTypes(ctx), tf.Spec.Blueprint.Attributes()) + if d.HasError() { + tfBlueprint = NewBlueprintValueNull() + } + tf.Spec.Blueprint, d = tfBlueprint.FromHub(ctx, hub.Spec.Blueprint) + diags = append(diags, d...) + } + + if hub.Spec.CloudCredentials != "" { + tf.Spec.CloudCredentials = types.StringValue(hub.Spec.CloudCredentials) + } + + if hub.Spec.Sharing != nil { + tfSharing, d := NewSharingValue(tf.Spec.Sharing.AttributeTypes(ctx), tf.Spec.Sharing.Attributes()) + if d.HasError() { + tfSharing = NewSharingValueNull() + } + tf.Spec.Sharing, d = tfSharing.FromHub(ctx, hub.Spec.Sharing) + diags = append(diags, d...) + } else { + tf.Spec.Sharing, d = NewSharingValueNull().ToObjectValue(ctx) + diags = append(diags, d...) + } + + if hub.Spec.SystemComponentsPlacement != nil { + tfSystemComponentsPlacement, d := NewSystemComponentsPlacementValue( + tf.Spec.SystemComponentsPlacement.AttributeTypes(ctx), + tf.Spec.SystemComponentsPlacement.Attributes(), + ) + if d.HasError() { + tfSystemComponentsPlacement = NewSystemComponentsPlacementValueNull() + } + tf.Spec.SystemComponentsPlacement, d = tfSystemComponentsPlacement.FromHub(ctx, hub.Spec.SystemComponentsPlacement) + + diags = append(diags, d...) + } else { + tf.Spec.SystemComponentsPlacement, d = NewSystemComponentsPlacementValueNull().ToObjectValue(ctx) + diags = append(diags, d...) + } + + if hub.Spec.Proxy != nil { + tfProxy, d := NewProxyValue(tf.Spec.Proxy.AttributeTypes(ctx), tf.Spec.Proxy.Attributes()) + if d.HasError() { + tfProxy = NewProxyValueNull() + } + tf.Spec.Proxy, d = tfProxy.FromHub(ctx, hub.Spec.Proxy) + diags = append(diags, d...) + } else { + tf.Spec.Proxy, d = NewProxyValueNull().ToObjectValue(ctx) + diags = append(diags, d...) + } + + if hub.Spec.Config != nil { + hubConfig := hub.Spec.Config.(*infrapb.ClusterSpec_Mks).Mks + tfConfig, d := NewConfigValue(tf.Spec.Config.AttributeTypes(ctx), tf.Spec.Config.Attributes()) + if d.HasError() { + tfConfig = NewConfigValueNull() + } + tf.Spec.Config, d = tfConfig.FromHub(ctx, hubConfig) + diags = append(diags, d...) + } + + tf.Spec.state = attr.ValueStateKnown + + return diags +} + +func WaitForClusterApplyOperation(ctx context.Context, client typed.Client, cluster *infrapb.Cluster, timeout <-chan time.Time, ticker *time.Ticker) diag.Diagnostics { + var diags diag.Diagnostics + for { + select { + case <-timeout: + // Timeout reached + diags.AddError("Timeout reached while waiting for cluster operation to complete", "") + return diags + + case <-ticker.C: + uCluster, err := client.InfraV3().Cluster().Status(ctx, options.StatusOptions{ + Name: cluster.Metadata.Name, + Project: cluster.Metadata.Project, + }) + if err != nil { + // Error occurred while fetching cluster status + diags.AddError("Error occurred while fetching cluster status", err.Error()) + return diags + } + + if uCluster == nil { + continue + } + + if uCluster.Status != nil && uCluster.Status.Mks != nil { + uClusterCommonStatus := uCluster.Status.CommonStatus + switch uClusterCommonStatus.ConditionStatus { + case commonpb.ConditionStatus_StatusSubmitted: + // Submitted + continue + case commonpb.ConditionStatus_StatusOK: + // Completed + return diags + case commonpb.ConditionStatus_StatusFailed: + failureReason := uClusterCommonStatus.Reason + diags.AddError("Cluster operation failed", failureReason) + return diags + } + } + } + } +} + +func WaitForClusterDeleteOperation(ctx context.Context, client typed.Client, name string, project string, timeout <-chan time.Time, ticker *time.Ticker) diag.Diagnostics { + var diags diag.Diagnostics + for { + select { + case <-timeout: + // Timeout reached + diags.AddError("Timeout reached while deleting the cluster resource", "") + return diags + + case <-ticker.C: + _, err := client.InfraV3().Cluster().Get(ctx, options.GetOptions{ + Name: name, + Project: project, + }) + if err, ok := err.(*dynamic.DynamicClientGetError); ok && err != nil { + switch err.StatusCode { + case http.StatusNotFound: + return diags + default: + diags.AddError("Cluster Deletion failed", err.Error()) + return diags + } + } + } + } +} diff --git a/internal/resource_mks_cluster/mks_cluster_resource_gen.go b/internal/resource_mks_cluster/mks_cluster_resource_gen.go new file mode 100644 index 00000000..dd492ba5 --- /dev/null +++ b/internal/resource_mks_cluster/mks_cluster_resource_gen.go @@ -0,0 +1,11090 @@ +// Code generated by terraform-plugin-framework-generator DO NOT EDIT. + +package resource_mks_cluster + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/resource/schema" +) + +func MksClusterResourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "api_version": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "api version", + MarkdownDescription: "api version", + Default: stringdefault.StaticString("infra.k8smgmt.io/v3"), + }, + "kind": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "kind", + MarkdownDescription: "kind", + Default: stringdefault.StaticString("Cluster"), + }, + "metadata": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "annotations": schema.MapAttribute{ + ElementType: types.StringType, + Optional: true, + Computed: true, + Description: "annotations of the resource", + MarkdownDescription: "annotations of the resource", + }, + "description": schema.StringAttribute{ + Optional: true, + Description: "description of the resource", + MarkdownDescription: "description of the resource", + }, + "labels": schema.MapAttribute{ + ElementType: types.StringType, + Optional: true, + Computed: true, + Description: "labels of the resource", + MarkdownDescription: "labels of the resource", + }, + "name": schema.StringAttribute{ + Required: true, + Description: "name of the resource", + MarkdownDescription: "name of the resource", + }, + "project": schema.StringAttribute{ + Required: true, + Description: "Project of the resource", + MarkdownDescription: "Project of the resource", + }, + }, + CustomType: MetadataType{ + ObjectType: types.ObjectType{ + AttrTypes: MetadataValue{}.AttributeTypes(ctx), + }, + }, + Required: true, + Description: "metadata of the resource", + MarkdownDescription: "metadata of the resource", + }, + "spec": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "blueprint": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + "version": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Version of the blueprint", + MarkdownDescription: "Version of the blueprint", + Default: stringdefault.StaticString("latest"), + }, + }, + CustomType: BlueprintType{ + ObjectType: types.ObjectType{ + AttrTypes: BlueprintValue{}.AttributeTypes(ctx), + }, + }, + Required: true, + }, + "cloud_credentials": schema.StringAttribute{ + Optional: true, + Description: "The credentials to be used to ssh into the Clusster Nodes", + MarkdownDescription: "The credentials to be used to ssh into the Clusster Nodes", + }, + "config": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "auto_approve_nodes": schema.BoolAttribute{ + Optional: true, + Description: "Auto approves incoming nodes by default", + MarkdownDescription: "Auto approves incoming nodes by default", + }, + "cluster_ssh": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "passphrase": schema.StringAttribute{ + Optional: true, + Description: "Provide ssh passphrase", + MarkdownDescription: "Provide ssh passphrase", + }, + "port": schema.StringAttribute{ + Optional: true, + Description: "Provide ssh port", + MarkdownDescription: "Provide ssh port", + }, + "private_key_path": schema.StringAttribute{ + Optional: true, + Description: "Provide local path to the private key", + MarkdownDescription: "Provide local path to the private key", + }, + "username": schema.StringAttribute{ + Optional: true, + Description: "Provide the ssh username", + MarkdownDescription: "Provide the ssh username", + }, + }, + CustomType: ClusterSshType{ + ObjectType: types.ObjectType{ + AttrTypes: ClusterSshValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Description: "SSH config for all the nodes within the cluster", + MarkdownDescription: "SSH config for all the nodes within the cluster", + }, + "dedicated_control_plane": schema.BoolAttribute{ + Optional: true, + Description: "Select this option for preventing scheduling of user workloads on Control Plane nodes", + MarkdownDescription: "Select this option for preventing scheduling of user workloads on Control Plane nodes", + }, + "high_availability": schema.BoolAttribute{ + Optional: true, + Description: "Select this option for highly available control plane. Minimum three control plane nodes are required", + MarkdownDescription: "Select this option for highly available control plane. Minimum three control plane nodes are required", + }, + "installer_ttl": schema.Int64Attribute{ + Optional: true, + Description: "Installer TTL Configuration", + MarkdownDescription: "Installer TTL Configuration", + Default: int64default.StaticInt64(365), + }, + "kubernetes_upgrade": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "params": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "worker_concurrency": schema.StringAttribute{ + Required: true, + Description: "It can be number or percentage", + MarkdownDescription: "It can be number or percentage", + }, + }, + CustomType: ParamsType{ + ObjectType: types.ObjectType{ + AttrTypes: ParamsValue{}.AttributeTypes(ctx), + }, + }, + Required: true, + }, + "strategy": schema.StringAttribute{ + Required: true, + Description: "Kubernetes upgrade strategy for worker nodes and Valid options are: concurrent/sequential", + MarkdownDescription: "Kubernetes upgrade strategy for worker nodes and Valid options are: concurrent/sequential", + }, + }, + CustomType: KubernetesUpgradeType{ + ObjectType: types.ObjectType{ + AttrTypes: KubernetesUpgradeValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + }, + "kubernetes_version": schema.StringAttribute{ + Required: true, + Description: "Kubernetes version of the Control Plane", + MarkdownDescription: "Kubernetes version of the Control Plane", + }, + "location": schema.StringAttribute{ + Optional: true, + Description: "The data center location where the cluster nodes will be launched", + MarkdownDescription: "The data center location where the cluster nodes will be launched", + }, + "network": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "cni": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + Description: "Provide the CNI name, e.g., Calico or Cilium", + MarkdownDescription: "Provide the CNI name, e.g., Calico or Cilium", + }, + "version": schema.StringAttribute{ + Required: true, + Description: "Provide the CNI version, e.g., 3.26.1", + MarkdownDescription: "Provide the CNI version, e.g., 3.26.1", + }, + }, + CustomType: CniType{ + ObjectType: types.ObjectType{ + AttrTypes: CniValue{}.AttributeTypes(ctx), + }, + }, + Required: true, + Description: "MKS Cluster CNI Specification", + MarkdownDescription: "MKS Cluster CNI Specification", + }, + "ipv6": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "pod_subnet": schema.StringAttribute{ + Optional: true, + Description: "Kubernetes pod subnet", + MarkdownDescription: "Kubernetes pod subnet", + }, + "service_subnet": schema.StringAttribute{ + Optional: true, + Description: "Kubernetes service subnet", + MarkdownDescription: "Kubernetes service subnet", + }, + }, + CustomType: Ipv6Type{ + ObjectType: types.ObjectType{ + AttrTypes: Ipv6Value{}.AttributeTypes(ctx), + }, + }, + Optional: true, + }, + "pod_subnet": schema.StringAttribute{ + Required: true, + Description: "Kubernetes pod subnet", + MarkdownDescription: "Kubernetes pod subnet", + }, + "service_subnet": schema.StringAttribute{ + Required: true, + Description: "Kubernetes service subnet", + MarkdownDescription: "Kubernetes service subnet", + }, + }, + CustomType: NetworkType{ + ObjectType: types.ObjectType{ + AttrTypes: NetworkValue{}.AttributeTypes(ctx), + }, + }, + Required: true, + Description: "MKS Cluster Network Specification", + MarkdownDescription: "MKS Cluster Network Specification", + }, + "nodes": schema.MapNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "arch": schema.StringAttribute{ + Required: true, + Description: "System Architecture of the node", + MarkdownDescription: "System Architecture of the node", + }, + "hostname": schema.StringAttribute{ + Required: true, + Description: "Hostname of the node", + MarkdownDescription: "Hostname of the node", + }, + "interface": schema.StringAttribute{ + Optional: true, + Description: "Interface to be used on the node", + MarkdownDescription: "Interface to be used on the node", + }, + "labels": schema.MapAttribute{ + ElementType: types.StringType, + Optional: true, + Computed: true, + Description: "labels to be added to the node", + MarkdownDescription: "labels to be added to the node", + }, + "operating_system": schema.StringAttribute{ + Required: true, + Description: "OS of the node", + MarkdownDescription: "OS of the node", + }, + "private_ip": schema.StringAttribute{ + Required: true, + Description: "Private ip address of the node", + MarkdownDescription: "Private ip address of the node", + }, + "roles": schema.SetAttribute{ + ElementType: types.StringType, + Required: true, + Description: "Valid roles are: 'ControlPlane', 'Worker', 'Storage'", + MarkdownDescription: "Valid roles are: 'ControlPlane', 'Worker', 'Storage'", + }, + "ssh": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "ip_address": schema.StringAttribute{ + Optional: true, + Description: "Use this to override node level ssh details", + MarkdownDescription: "Use this to override node level ssh details", + }, + "passphrase": schema.StringAttribute{ + Optional: true, + Description: "SSH Passphrase", + MarkdownDescription: "SSH Passphrase", + }, + "port": schema.StringAttribute{ + Optional: true, + Description: "SSH Port", + MarkdownDescription: "SSH Port", + }, + "private_key_path": schema.StringAttribute{ + Optional: true, + Description: "Specify Path to SSH private key", + MarkdownDescription: "Specify Path to SSH private key", + }, + "username": schema.StringAttribute{ + Optional: true, + Description: "SSH Username", + MarkdownDescription: "SSH Username", + }, + }, + CustomType: SshType{ + ObjectType: types.ObjectType{ + AttrTypes: SshValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Description: "MKS Node SSH definition", + MarkdownDescription: "MKS Node SSH definition", + }, + "taints": schema.SetNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "effect": schema.StringAttribute{ + Required: true, + }, + "key": schema.StringAttribute{ + Required: true, + }, + "value": schema.StringAttribute{ + Optional: true, + }, + }, + CustomType: TaintsType{ + ObjectType: types.ObjectType{ + AttrTypes: TaintsValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + Description: "taints to be added to the node", + MarkdownDescription: "taints to be added to the node", + }, + }, + CustomType: NodesType{ + ObjectType: types.ObjectType{ + AttrTypes: NodesValue{}.AttributeTypes(ctx), + }, + }, + }, + Required: true, + Description: "holds node configuration for the cluster", + MarkdownDescription: "holds node configuration for the cluster", + }, + }, + CustomType: ConfigType{ + ObjectType: types.ObjectType{ + AttrTypes: ConfigValue{}.AttributeTypes(ctx), + }, + }, + Required: true, + Description: "MKS V3 cluster specification", + MarkdownDescription: "MKS V3 cluster specification", + }, + "proxy": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "allow_insecure_bootstrap": schema.BoolAttribute{ + Optional: true, + }, + "bootstrap_ca": schema.StringAttribute{ + Optional: true, + }, + "enabled": schema.BoolAttribute{ + Required: true, + }, + "http_proxy": schema.StringAttribute{ + Required: true, + }, + "https_proxy": schema.StringAttribute{ + Optional: true, + }, + "no_proxy": schema.StringAttribute{ + Optional: true, + }, + "proxy_auth": schema.StringAttribute{ + Optional: true, + }, + }, + CustomType: ProxyType{ + ObjectType: types.ObjectType{ + AttrTypes: ProxyValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + }, + "sharing": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Required: true, + }, + "projects": schema.SetNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + }, + CustomType: ProjectsType{ + ObjectType: types.ObjectType{ + AttrTypes: ProjectsValue{}.AttributeTypes(ctx), + }, + }, + }, + Required: true, + }, + }, + CustomType: SharingType{ + ObjectType: types.ObjectType{ + AttrTypes: SharingValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + }, + "system_components_placement": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "daemon_set_override": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "daemon_set_tolerations": schema.SetNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "effect": schema.StringAttribute{ + Optional: true, + }, + "key": schema.StringAttribute{ + Optional: true, + }, + "operator": schema.StringAttribute{ + Optional: true, + }, + "toleration_seconds": schema.Int64Attribute{ + Optional: true, + }, + "value": schema.StringAttribute{ + Optional: true, + }, + }, + CustomType: DaemonSetTolerationsType{ + ObjectType: types.ObjectType{ + AttrTypes: DaemonSetTolerationsValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + }, + "node_selection_enabled": schema.BoolAttribute{ + Optional: true, + }, + }, + CustomType: DaemonSetOverrideType{ + ObjectType: types.ObjectType{ + AttrTypes: DaemonSetOverrideValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + }, + "node_selector": schema.MapAttribute{ + ElementType: types.StringType, + Optional: true, + Computed: true, + }, + "tolerations": schema.SetNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "effect": schema.StringAttribute{ + Optional: true, + }, + "key": schema.StringAttribute{ + Optional: true, + }, + "operator": schema.StringAttribute{ + Optional: true, + }, + "toleration_seconds": schema.Int64Attribute{ + Optional: true, + }, + "value": schema.StringAttribute{ + Optional: true, + }, + }, + CustomType: TolerationsType{ + ObjectType: types.ObjectType{ + AttrTypes: TolerationsValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + }, + }, + CustomType: SystemComponentsPlacementType{ + ObjectType: types.ObjectType{ + AttrTypes: SystemComponentsPlacementValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + }, + "type": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "The type of the cluster this spec corresponds to", + MarkdownDescription: "The type of the cluster this spec corresponds to", + Default: stringdefault.StaticString("mks"), + }, + }, + CustomType: SpecType{ + ObjectType: types.ObjectType{ + AttrTypes: SpecValue{}.AttributeTypes(ctx), + }, + }, + Required: true, + Description: "cluster specification", + MarkdownDescription: "cluster specification", + }, + }, + } +} + +type MksClusterModel struct { + ApiVersion types.String `tfsdk:"api_version"` + Kind types.String `tfsdk:"kind"` + Metadata MetadataValue `tfsdk:"metadata"` + Spec SpecValue `tfsdk:"spec"` +} + +var _ basetypes.ObjectTypable = MetadataType{} + +type MetadataType struct { + basetypes.ObjectType +} + +func (t MetadataType) Equal(o attr.Type) bool { + other, ok := o.(MetadataType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t MetadataType) String() string { + return "MetadataType" +} + +func (t MetadataType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + annotationsAttribute, ok := attributes["annotations"] + + if !ok { + diags.AddError( + "Attribute Missing", + `annotations is missing from object`) + + return nil, diags + } + + annotationsVal, ok := annotationsAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`annotations expected to be basetypes.MapValue, was: %T`, annotationsAttribute)) + } + + descriptionAttribute, ok := attributes["description"] + + if !ok { + diags.AddError( + "Attribute Missing", + `description is missing from object`) + + return nil, diags + } + + descriptionVal, ok := descriptionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute)) + } + + labelsAttribute, ok := attributes["labels"] + + if !ok { + diags.AddError( + "Attribute Missing", + `labels is missing from object`) + + return nil, diags + } + + labelsVal, ok := labelsAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`labels expected to be basetypes.MapValue, was: %T`, labelsAttribute)) + } + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return nil, diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + projectAttribute, ok := attributes["project"] + + if !ok { + diags.AddError( + "Attribute Missing", + `project is missing from object`) + + return nil, diags + } + + projectVal, ok := projectAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`project expected to be basetypes.StringValue, was: %T`, projectAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return MetadataValue{ + Annotations: annotationsVal, + Description: descriptionVal, + Labels: labelsVal, + Name: nameVal, + Project: projectVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewMetadataValueNull() MetadataValue { + return MetadataValue{ + state: attr.ValueStateNull, + } +} + +func NewMetadataValueUnknown() MetadataValue { + return MetadataValue{ + state: attr.ValueStateUnknown, + } +} + +func NewMetadataValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (MetadataValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing MetadataValue Attribute Value", + "While creating a MetadataValue value, a missing attribute value was detected. "+ + "A MetadataValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("MetadataValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid MetadataValue Attribute Type", + "While creating a MetadataValue value, an invalid attribute value was detected. "+ + "A MetadataValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("MetadataValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("MetadataValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra MetadataValue Attribute Value", + "While creating a MetadataValue value, an extra attribute value was detected. "+ + "A MetadataValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra MetadataValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewMetadataValueUnknown(), diags + } + + annotationsAttribute, ok := attributes["annotations"] + + if !ok { + diags.AddError( + "Attribute Missing", + `annotations is missing from object`) + + return NewMetadataValueUnknown(), diags + } + + annotationsVal, ok := annotationsAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`annotations expected to be basetypes.MapValue, was: %T`, annotationsAttribute)) + } + + descriptionAttribute, ok := attributes["description"] + + if !ok { + diags.AddError( + "Attribute Missing", + `description is missing from object`) + + return NewMetadataValueUnknown(), diags + } + + descriptionVal, ok := descriptionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`description expected to be basetypes.StringValue, was: %T`, descriptionAttribute)) + } + + labelsAttribute, ok := attributes["labels"] + + if !ok { + diags.AddError( + "Attribute Missing", + `labels is missing from object`) + + return NewMetadataValueUnknown(), diags + } + + labelsVal, ok := labelsAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`labels expected to be basetypes.MapValue, was: %T`, labelsAttribute)) + } + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return NewMetadataValueUnknown(), diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + projectAttribute, ok := attributes["project"] + + if !ok { + diags.AddError( + "Attribute Missing", + `project is missing from object`) + + return NewMetadataValueUnknown(), diags + } + + projectVal, ok := projectAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`project expected to be basetypes.StringValue, was: %T`, projectAttribute)) + } + + if diags.HasError() { + return NewMetadataValueUnknown(), diags + } + + return MetadataValue{ + Annotations: annotationsVal, + Description: descriptionVal, + Labels: labelsVal, + Name: nameVal, + Project: projectVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewMetadataValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) MetadataValue { + object, diags := NewMetadataValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewMetadataValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t MetadataType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewMetadataValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewMetadataValueUnknown(), nil + } + + if in.IsNull() { + return NewMetadataValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewMetadataValueMust(MetadataValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t MetadataType) ValueType(ctx context.Context) attr.Value { + return MetadataValue{} +} + +var _ basetypes.ObjectValuable = MetadataValue{} + +type MetadataValue struct { + Annotations basetypes.MapValue `tfsdk:"annotations"` + Description basetypes.StringValue `tfsdk:"description"` + Labels basetypes.MapValue `tfsdk:"labels"` + Name basetypes.StringValue `tfsdk:"name"` + Project basetypes.StringValue `tfsdk:"project"` + state attr.ValueState +} + +func (v MetadataValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["annotations"] = basetypes.MapType{ + ElemType: types.StringType, + }.TerraformType(ctx) + attrTypes["description"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["labels"] = basetypes.MapType{ + ElemType: types.StringType, + }.TerraformType(ctx) + attrTypes["name"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["project"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.Annotations.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["annotations"] = val + + val, err = v.Description.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["description"] = val + + val, err = v.Labels.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["labels"] = val + + val, err = v.Name.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["name"] = val + + val, err = v.Project.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["project"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v MetadataValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v MetadataValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v MetadataValue) String() string { + return "MetadataValue" +} + +func (v MetadataValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var annotationsVal basetypes.MapValue + switch { + case v.Annotations.IsUnknown(): + annotationsVal = types.MapUnknown(types.StringType) + case v.Annotations.IsNull(): + annotationsVal = types.MapNull(types.StringType) + default: + var d diag.Diagnostics + annotationsVal, d = types.MapValue(types.StringType, v.Annotations.Elements()) + diags.Append(d...) + } + + if diags.HasError() { + return types.ObjectUnknown(map[string]attr.Type{ + "annotations": basetypes.MapType{ + ElemType: types.StringType, + }, + "description": basetypes.StringType{}, + "labels": basetypes.MapType{ + ElemType: types.StringType, + }, + "name": basetypes.StringType{}, + "project": basetypes.StringType{}, + }), diags + } + + var labelsVal basetypes.MapValue + switch { + case v.Labels.IsUnknown(): + labelsVal = types.MapUnknown(types.StringType) + case v.Labels.IsNull(): + labelsVal = types.MapNull(types.StringType) + default: + var d diag.Diagnostics + labelsVal, d = types.MapValue(types.StringType, v.Labels.Elements()) + diags.Append(d...) + } + + if diags.HasError() { + return types.ObjectUnknown(map[string]attr.Type{ + "annotations": basetypes.MapType{ + ElemType: types.StringType, + }, + "description": basetypes.StringType{}, + "labels": basetypes.MapType{ + ElemType: types.StringType, + }, + "name": basetypes.StringType{}, + "project": basetypes.StringType{}, + }), diags + } + + attributeTypes := map[string]attr.Type{ + "annotations": basetypes.MapType{ + ElemType: types.StringType, + }, + "description": basetypes.StringType{}, + "labels": basetypes.MapType{ + ElemType: types.StringType, + }, + "name": basetypes.StringType{}, + "project": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "annotations": annotationsVal, + "description": v.Description, + "labels": labelsVal, + "name": v.Name, + "project": v.Project, + }) + + return objVal, diags +} + +func (v MetadataValue) Equal(o attr.Value) bool { + other, ok := o.(MetadataValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Annotations.Equal(other.Annotations) { + return false + } + + if !v.Description.Equal(other.Description) { + return false + } + + if !v.Labels.Equal(other.Labels) { + return false + } + + if !v.Name.Equal(other.Name) { + return false + } + + if !v.Project.Equal(other.Project) { + return false + } + + return true +} + +func (v MetadataValue) Type(ctx context.Context) attr.Type { + return MetadataType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v MetadataValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "annotations": basetypes.MapType{ + ElemType: types.StringType, + }, + "description": basetypes.StringType{}, + "labels": basetypes.MapType{ + ElemType: types.StringType, + }, + "name": basetypes.StringType{}, + "project": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = SpecType{} + +type SpecType struct { + basetypes.ObjectType +} + +func (t SpecType) Equal(o attr.Type) bool { + other, ok := o.(SpecType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t SpecType) String() string { + return "SpecType" +} + +func (t SpecType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + blueprintAttribute, ok := attributes["blueprint"] + + if !ok { + diags.AddError( + "Attribute Missing", + `blueprint is missing from object`) + + return nil, diags + } + + blueprintVal, ok := blueprintAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`blueprint expected to be basetypes.ObjectValue, was: %T`, blueprintAttribute)) + } + + cloudCredentialsAttribute, ok := attributes["cloud_credentials"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cloud_credentials is missing from object`) + + return nil, diags + } + + cloudCredentialsVal, ok := cloudCredentialsAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cloud_credentials expected to be basetypes.StringValue, was: %T`, cloudCredentialsAttribute)) + } + + configAttribute, ok := attributes["config"] + + if !ok { + diags.AddError( + "Attribute Missing", + `config is missing from object`) + + return nil, diags + } + + configVal, ok := configAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`config expected to be basetypes.ObjectValue, was: %T`, configAttribute)) + } + + proxyAttribute, ok := attributes["proxy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `proxy is missing from object`) + + return nil, diags + } + + proxyVal, ok := proxyAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`proxy expected to be basetypes.ObjectValue, was: %T`, proxyAttribute)) + } + + sharingAttribute, ok := attributes["sharing"] + + if !ok { + diags.AddError( + "Attribute Missing", + `sharing is missing from object`) + + return nil, diags + } + + sharingVal, ok := sharingAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`sharing expected to be basetypes.ObjectValue, was: %T`, sharingAttribute)) + } + + systemComponentsPlacementAttribute, ok := attributes["system_components_placement"] + + if !ok { + diags.AddError( + "Attribute Missing", + `system_components_placement is missing from object`) + + return nil, diags + } + + systemComponentsPlacementVal, ok := systemComponentsPlacementAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`system_components_placement expected to be basetypes.ObjectValue, was: %T`, systemComponentsPlacementAttribute)) + } + + typeAttribute, ok := attributes["type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `type is missing from object`) + + return nil, diags + } + + typeVal, ok := typeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`type expected to be basetypes.StringValue, was: %T`, typeAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return SpecValue{ + Blueprint: blueprintVal, + CloudCredentials: cloudCredentialsVal, + Config: configVal, + Proxy: proxyVal, + Sharing: sharingVal, + SystemComponentsPlacement: systemComponentsPlacementVal, + SpecType: typeVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewSpecValueNull() SpecValue { + return SpecValue{ + state: attr.ValueStateNull, + } +} + +func NewSpecValueUnknown() SpecValue { + return SpecValue{ + state: attr.ValueStateUnknown, + } +} + +func NewSpecValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (SpecValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing SpecValue Attribute Value", + "While creating a SpecValue value, a missing attribute value was detected. "+ + "A SpecValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("SpecValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid SpecValue Attribute Type", + "While creating a SpecValue value, an invalid attribute value was detected. "+ + "A SpecValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("SpecValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("SpecValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra SpecValue Attribute Value", + "While creating a SpecValue value, an extra attribute value was detected. "+ + "A SpecValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra SpecValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewSpecValueUnknown(), diags + } + + blueprintAttribute, ok := attributes["blueprint"] + + if !ok { + diags.AddError( + "Attribute Missing", + `blueprint is missing from object`) + + return NewSpecValueUnknown(), diags + } + + blueprintVal, ok := blueprintAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`blueprint expected to be basetypes.ObjectValue, was: %T`, blueprintAttribute)) + } + + cloudCredentialsAttribute, ok := attributes["cloud_credentials"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cloud_credentials is missing from object`) + + return NewSpecValueUnknown(), diags + } + + cloudCredentialsVal, ok := cloudCredentialsAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cloud_credentials expected to be basetypes.StringValue, was: %T`, cloudCredentialsAttribute)) + } + + configAttribute, ok := attributes["config"] + + if !ok { + diags.AddError( + "Attribute Missing", + `config is missing from object`) + + return NewSpecValueUnknown(), diags + } + + configVal, ok := configAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`config expected to be basetypes.ObjectValue, was: %T`, configAttribute)) + } + + proxyAttribute, ok := attributes["proxy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `proxy is missing from object`) + + return NewSpecValueUnknown(), diags + } + + proxyVal, ok := proxyAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`proxy expected to be basetypes.ObjectValue, was: %T`, proxyAttribute)) + } + + sharingAttribute, ok := attributes["sharing"] + + if !ok { + diags.AddError( + "Attribute Missing", + `sharing is missing from object`) + + return NewSpecValueUnknown(), diags + } + + sharingVal, ok := sharingAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`sharing expected to be basetypes.ObjectValue, was: %T`, sharingAttribute)) + } + + systemComponentsPlacementAttribute, ok := attributes["system_components_placement"] + + if !ok { + diags.AddError( + "Attribute Missing", + `system_components_placement is missing from object`) + + return NewSpecValueUnknown(), diags + } + + systemComponentsPlacementVal, ok := systemComponentsPlacementAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`system_components_placement expected to be basetypes.ObjectValue, was: %T`, systemComponentsPlacementAttribute)) + } + + typeAttribute, ok := attributes["type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `type is missing from object`) + + return NewSpecValueUnknown(), diags + } + + typeVal, ok := typeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`type expected to be basetypes.StringValue, was: %T`, typeAttribute)) + } + + if diags.HasError() { + return NewSpecValueUnknown(), diags + } + + return SpecValue{ + Blueprint: blueprintVal, + CloudCredentials: cloudCredentialsVal, + Config: configVal, + Proxy: proxyVal, + Sharing: sharingVal, + SystemComponentsPlacement: systemComponentsPlacementVal, + SpecType: typeVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewSpecValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) SpecValue { + object, diags := NewSpecValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewSpecValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t SpecType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewSpecValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewSpecValueUnknown(), nil + } + + if in.IsNull() { + return NewSpecValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewSpecValueMust(SpecValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t SpecType) ValueType(ctx context.Context) attr.Value { + return SpecValue{} +} + +var _ basetypes.ObjectValuable = SpecValue{} + +type SpecValue struct { + Blueprint basetypes.ObjectValue `tfsdk:"blueprint"` + CloudCredentials basetypes.StringValue `tfsdk:"cloud_credentials"` + Config basetypes.ObjectValue `tfsdk:"config"` + Proxy basetypes.ObjectValue `tfsdk:"proxy"` + Sharing basetypes.ObjectValue `tfsdk:"sharing"` + SystemComponentsPlacement basetypes.ObjectValue `tfsdk:"system_components_placement"` + SpecType basetypes.StringValue `tfsdk:"type"` + state attr.ValueState +} + +func (v SpecValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 7) + + var val tftypes.Value + var err error + + attrTypes["blueprint"] = basetypes.ObjectType{ + AttrTypes: BlueprintValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["cloud_credentials"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["config"] = basetypes.ObjectType{ + AttrTypes: ConfigValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["proxy"] = basetypes.ObjectType{ + AttrTypes: ProxyValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["sharing"] = basetypes.ObjectType{ + AttrTypes: SharingValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["system_components_placement"] = basetypes.ObjectType{ + AttrTypes: SystemComponentsPlacementValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["type"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 7) + + val, err = v.Blueprint.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["blueprint"] = val + + val, err = v.CloudCredentials.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["cloud_credentials"] = val + + val, err = v.Config.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["config"] = val + + val, err = v.Proxy.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["proxy"] = val + + val, err = v.Sharing.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["sharing"] = val + + val, err = v.SystemComponentsPlacement.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["system_components_placement"] = val + + val, err = v.SpecType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["type"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v SpecValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v SpecValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v SpecValue) String() string { + return "SpecValue" +} + +func (v SpecValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var blueprint basetypes.ObjectValue + + if v.Blueprint.IsNull() { + blueprint = types.ObjectNull( + BlueprintValue{}.AttributeTypes(ctx), + ) + } + + if v.Blueprint.IsUnknown() { + blueprint = types.ObjectUnknown( + BlueprintValue{}.AttributeTypes(ctx), + ) + } + + if !v.Blueprint.IsNull() && !v.Blueprint.IsUnknown() { + blueprint = types.ObjectValueMust( + BlueprintValue{}.AttributeTypes(ctx), + v.Blueprint.Attributes(), + ) + } + + var config basetypes.ObjectValue + + if v.Config.IsNull() { + config = types.ObjectNull( + ConfigValue{}.AttributeTypes(ctx), + ) + } + + if v.Config.IsUnknown() { + config = types.ObjectUnknown( + ConfigValue{}.AttributeTypes(ctx), + ) + } + + if !v.Config.IsNull() && !v.Config.IsUnknown() { + config = types.ObjectValueMust( + ConfigValue{}.AttributeTypes(ctx), + v.Config.Attributes(), + ) + } + + var proxy basetypes.ObjectValue + + if v.Proxy.IsNull() { + proxy = types.ObjectNull( + ProxyValue{}.AttributeTypes(ctx), + ) + } + + if v.Proxy.IsUnknown() { + proxy = types.ObjectUnknown( + ProxyValue{}.AttributeTypes(ctx), + ) + } + + if !v.Proxy.IsNull() && !v.Proxy.IsUnknown() { + proxy = types.ObjectValueMust( + ProxyValue{}.AttributeTypes(ctx), + v.Proxy.Attributes(), + ) + } + + var sharing basetypes.ObjectValue + + if v.Sharing.IsNull() { + sharing = types.ObjectNull( + SharingValue{}.AttributeTypes(ctx), + ) + } + + if v.Sharing.IsUnknown() { + sharing = types.ObjectUnknown( + SharingValue{}.AttributeTypes(ctx), + ) + } + + if !v.Sharing.IsNull() && !v.Sharing.IsUnknown() { + sharing = types.ObjectValueMust( + SharingValue{}.AttributeTypes(ctx), + v.Sharing.Attributes(), + ) + } + + var systemComponentsPlacement basetypes.ObjectValue + + if v.SystemComponentsPlacement.IsNull() { + systemComponentsPlacement = types.ObjectNull( + SystemComponentsPlacementValue{}.AttributeTypes(ctx), + ) + } + + if v.SystemComponentsPlacement.IsUnknown() { + systemComponentsPlacement = types.ObjectUnknown( + SystemComponentsPlacementValue{}.AttributeTypes(ctx), + ) + } + + if !v.SystemComponentsPlacement.IsNull() && !v.SystemComponentsPlacement.IsUnknown() { + systemComponentsPlacement = types.ObjectValueMust( + SystemComponentsPlacementValue{}.AttributeTypes(ctx), + v.SystemComponentsPlacement.Attributes(), + ) + } + + attributeTypes := map[string]attr.Type{ + "blueprint": basetypes.ObjectType{ + AttrTypes: BlueprintValue{}.AttributeTypes(ctx), + }, + "cloud_credentials": basetypes.StringType{}, + "config": basetypes.ObjectType{ + AttrTypes: ConfigValue{}.AttributeTypes(ctx), + }, + "proxy": basetypes.ObjectType{ + AttrTypes: ProxyValue{}.AttributeTypes(ctx), + }, + "sharing": basetypes.ObjectType{ + AttrTypes: SharingValue{}.AttributeTypes(ctx), + }, + "system_components_placement": basetypes.ObjectType{ + AttrTypes: SystemComponentsPlacementValue{}.AttributeTypes(ctx), + }, + "type": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "blueprint": blueprint, + "cloud_credentials": v.CloudCredentials, + "config": config, + "proxy": proxy, + "sharing": sharing, + "system_components_placement": systemComponentsPlacement, + "type": v.SpecType, + }) + + return objVal, diags +} + +func (v SpecValue) Equal(o attr.Value) bool { + other, ok := o.(SpecValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Blueprint.Equal(other.Blueprint) { + return false + } + + if !v.CloudCredentials.Equal(other.CloudCredentials) { + return false + } + + if !v.Config.Equal(other.Config) { + return false + } + + if !v.Proxy.Equal(other.Proxy) { + return false + } + + if !v.Sharing.Equal(other.Sharing) { + return false + } + + if !v.SystemComponentsPlacement.Equal(other.SystemComponentsPlacement) { + return false + } + + if !v.SpecType.Equal(other.SpecType) { + return false + } + + return true +} + +func (v SpecValue) Type(ctx context.Context) attr.Type { + return SpecType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v SpecValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "blueprint": basetypes.ObjectType{ + AttrTypes: BlueprintValue{}.AttributeTypes(ctx), + }, + "cloud_credentials": basetypes.StringType{}, + "config": basetypes.ObjectType{ + AttrTypes: ConfigValue{}.AttributeTypes(ctx), + }, + "proxy": basetypes.ObjectType{ + AttrTypes: ProxyValue{}.AttributeTypes(ctx), + }, + "sharing": basetypes.ObjectType{ + AttrTypes: SharingValue{}.AttributeTypes(ctx), + }, + "system_components_placement": basetypes.ObjectType{ + AttrTypes: SystemComponentsPlacementValue{}.AttributeTypes(ctx), + }, + "type": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = BlueprintType{} + +type BlueprintType struct { + basetypes.ObjectType +} + +func (t BlueprintType) Equal(o attr.Type) bool { + other, ok := o.(BlueprintType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t BlueprintType) String() string { + return "BlueprintType" +} + +func (t BlueprintType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return nil, diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + versionAttribute, ok := attributes["version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `version is missing from object`) + + return nil, diags + } + + versionVal, ok := versionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`version expected to be basetypes.StringValue, was: %T`, versionAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return BlueprintValue{ + Name: nameVal, + Version: versionVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewBlueprintValueNull() BlueprintValue { + return BlueprintValue{ + state: attr.ValueStateNull, + } +} + +func NewBlueprintValueUnknown() BlueprintValue { + return BlueprintValue{ + state: attr.ValueStateUnknown, + } +} + +func NewBlueprintValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (BlueprintValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing BlueprintValue Attribute Value", + "While creating a BlueprintValue value, a missing attribute value was detected. "+ + "A BlueprintValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("BlueprintValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid BlueprintValue Attribute Type", + "While creating a BlueprintValue value, an invalid attribute value was detected. "+ + "A BlueprintValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("BlueprintValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("BlueprintValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra BlueprintValue Attribute Value", + "While creating a BlueprintValue value, an extra attribute value was detected. "+ + "A BlueprintValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra BlueprintValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewBlueprintValueUnknown(), diags + } + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return NewBlueprintValueUnknown(), diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + versionAttribute, ok := attributes["version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `version is missing from object`) + + return NewBlueprintValueUnknown(), diags + } + + versionVal, ok := versionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`version expected to be basetypes.StringValue, was: %T`, versionAttribute)) + } + + if diags.HasError() { + return NewBlueprintValueUnknown(), diags + } + + return BlueprintValue{ + Name: nameVal, + Version: versionVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewBlueprintValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) BlueprintValue { + object, diags := NewBlueprintValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewBlueprintValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t BlueprintType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewBlueprintValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewBlueprintValueUnknown(), nil + } + + if in.IsNull() { + return NewBlueprintValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewBlueprintValueMust(BlueprintValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t BlueprintType) ValueType(ctx context.Context) attr.Value { + return BlueprintValue{} +} + +var _ basetypes.ObjectValuable = BlueprintValue{} + +type BlueprintValue struct { + Name basetypes.StringValue `tfsdk:"name"` + Version basetypes.StringValue `tfsdk:"version"` + state attr.ValueState +} + +func (v BlueprintValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["name"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["version"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Name.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["name"] = val + + val, err = v.Version.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["version"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v BlueprintValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v BlueprintValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v BlueprintValue) String() string { + return "BlueprintValue" +} + +func (v BlueprintValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "name": basetypes.StringType{}, + "version": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "name": v.Name, + "version": v.Version, + }) + + return objVal, diags +} + +func (v BlueprintValue) Equal(o attr.Value) bool { + other, ok := o.(BlueprintValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Name.Equal(other.Name) { + return false + } + + if !v.Version.Equal(other.Version) { + return false + } + + return true +} + +func (v BlueprintValue) Type(ctx context.Context) attr.Type { + return BlueprintType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v BlueprintValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "name": basetypes.StringType{}, + "version": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = ConfigType{} + +type ConfigType struct { + basetypes.ObjectType +} + +func (t ConfigType) Equal(o attr.Type) bool { + other, ok := o.(ConfigType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ConfigType) String() string { + return "ConfigType" +} + +func (t ConfigType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + autoApproveNodesAttribute, ok := attributes["auto_approve_nodes"] + + if !ok { + diags.AddError( + "Attribute Missing", + `auto_approve_nodes is missing from object`) + + return nil, diags + } + + autoApproveNodesVal, ok := autoApproveNodesAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`auto_approve_nodes expected to be basetypes.BoolValue, was: %T`, autoApproveNodesAttribute)) + } + + clusterSshAttribute, ok := attributes["cluster_ssh"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cluster_ssh is missing from object`) + + return nil, diags + } + + clusterSshVal, ok := clusterSshAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cluster_ssh expected to be basetypes.ObjectValue, was: %T`, clusterSshAttribute)) + } + + dedicatedControlPlaneAttribute, ok := attributes["dedicated_control_plane"] + + if !ok { + diags.AddError( + "Attribute Missing", + `dedicated_control_plane is missing from object`) + + return nil, diags + } + + dedicatedControlPlaneVal, ok := dedicatedControlPlaneAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`dedicated_control_plane expected to be basetypes.BoolValue, was: %T`, dedicatedControlPlaneAttribute)) + } + + highAvailabilityAttribute, ok := attributes["high_availability"] + + if !ok { + diags.AddError( + "Attribute Missing", + `high_availability is missing from object`) + + return nil, diags + } + + highAvailabilityVal, ok := highAvailabilityAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`high_availability expected to be basetypes.BoolValue, was: %T`, highAvailabilityAttribute)) + } + + installerTtlAttribute, ok := attributes["installer_ttl"] + + if !ok { + diags.AddError( + "Attribute Missing", + `installer_ttl is missing from object`) + + return nil, diags + } + + installerTtlVal, ok := installerTtlAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`installer_ttl expected to be basetypes.Int64Value, was: %T`, installerTtlAttribute)) + } + + kubernetesUpgradeAttribute, ok := attributes["kubernetes_upgrade"] + + if !ok { + diags.AddError( + "Attribute Missing", + `kubernetes_upgrade is missing from object`) + + return nil, diags + } + + kubernetesUpgradeVal, ok := kubernetesUpgradeAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`kubernetes_upgrade expected to be basetypes.ObjectValue, was: %T`, kubernetesUpgradeAttribute)) + } + + kubernetesVersionAttribute, ok := attributes["kubernetes_version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `kubernetes_version is missing from object`) + + return nil, diags + } + + kubernetesVersionVal, ok := kubernetesVersionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`kubernetes_version expected to be basetypes.StringValue, was: %T`, kubernetesVersionAttribute)) + } + + locationAttribute, ok := attributes["location"] + + if !ok { + diags.AddError( + "Attribute Missing", + `location is missing from object`) + + return nil, diags + } + + locationVal, ok := locationAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`location expected to be basetypes.StringValue, was: %T`, locationAttribute)) + } + + networkAttribute, ok := attributes["network"] + + if !ok { + diags.AddError( + "Attribute Missing", + `network is missing from object`) + + return nil, diags + } + + networkVal, ok := networkAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`network expected to be basetypes.ObjectValue, was: %T`, networkAttribute)) + } + + nodesAttribute, ok := attributes["nodes"] + + if !ok { + diags.AddError( + "Attribute Missing", + `nodes is missing from object`) + + return nil, diags + } + + nodesVal, ok := nodesAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`nodes expected to be basetypes.MapValue, was: %T`, nodesAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ConfigValue{ + AutoApproveNodes: autoApproveNodesVal, + ClusterSsh: clusterSshVal, + DedicatedControlPlane: dedicatedControlPlaneVal, + HighAvailability: highAvailabilityVal, + InstallerTtl: installerTtlVal, + KubernetesUpgrade: kubernetesUpgradeVal, + KubernetesVersion: kubernetesVersionVal, + Location: locationVal, + Network: networkVal, + Nodes: nodesVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewConfigValueNull() ConfigValue { + return ConfigValue{ + state: attr.ValueStateNull, + } +} + +func NewConfigValueUnknown() ConfigValue { + return ConfigValue{ + state: attr.ValueStateUnknown, + } +} + +func NewConfigValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ConfigValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ConfigValue Attribute Value", + "While creating a ConfigValue value, a missing attribute value was detected. "+ + "A ConfigValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ConfigValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ConfigValue Attribute Type", + "While creating a ConfigValue value, an invalid attribute value was detected. "+ + "A ConfigValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ConfigValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ConfigValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ConfigValue Attribute Value", + "While creating a ConfigValue value, an extra attribute value was detected. "+ + "A ConfigValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ConfigValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewConfigValueUnknown(), diags + } + + autoApproveNodesAttribute, ok := attributes["auto_approve_nodes"] + + if !ok { + diags.AddError( + "Attribute Missing", + `auto_approve_nodes is missing from object`) + + return NewConfigValueUnknown(), diags + } + + autoApproveNodesVal, ok := autoApproveNodesAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`auto_approve_nodes expected to be basetypes.BoolValue, was: %T`, autoApproveNodesAttribute)) + } + + clusterSshAttribute, ok := attributes["cluster_ssh"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cluster_ssh is missing from object`) + + return NewConfigValueUnknown(), diags + } + + clusterSshVal, ok := clusterSshAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cluster_ssh expected to be basetypes.ObjectValue, was: %T`, clusterSshAttribute)) + } + + dedicatedControlPlaneAttribute, ok := attributes["dedicated_control_plane"] + + if !ok { + diags.AddError( + "Attribute Missing", + `dedicated_control_plane is missing from object`) + + return NewConfigValueUnknown(), diags + } + + dedicatedControlPlaneVal, ok := dedicatedControlPlaneAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`dedicated_control_plane expected to be basetypes.BoolValue, was: %T`, dedicatedControlPlaneAttribute)) + } + + highAvailabilityAttribute, ok := attributes["high_availability"] + + if !ok { + diags.AddError( + "Attribute Missing", + `high_availability is missing from object`) + + return NewConfigValueUnknown(), diags + } + + highAvailabilityVal, ok := highAvailabilityAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`high_availability expected to be basetypes.BoolValue, was: %T`, highAvailabilityAttribute)) + } + + installerTtlAttribute, ok := attributes["installer_ttl"] + + if !ok { + diags.AddError( + "Attribute Missing", + `installer_ttl is missing from object`) + + return NewConfigValueUnknown(), diags + } + + installerTtlVal, ok := installerTtlAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`installer_ttl expected to be basetypes.Int64Value, was: %T`, installerTtlAttribute)) + } + + kubernetesUpgradeAttribute, ok := attributes["kubernetes_upgrade"] + + if !ok { + diags.AddError( + "Attribute Missing", + `kubernetes_upgrade is missing from object`) + + return NewConfigValueUnknown(), diags + } + + kubernetesUpgradeVal, ok := kubernetesUpgradeAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`kubernetes_upgrade expected to be basetypes.ObjectValue, was: %T`, kubernetesUpgradeAttribute)) + } + + kubernetesVersionAttribute, ok := attributes["kubernetes_version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `kubernetes_version is missing from object`) + + return NewConfigValueUnknown(), diags + } + + kubernetesVersionVal, ok := kubernetesVersionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`kubernetes_version expected to be basetypes.StringValue, was: %T`, kubernetesVersionAttribute)) + } + + locationAttribute, ok := attributes["location"] + + if !ok { + diags.AddError( + "Attribute Missing", + `location is missing from object`) + + return NewConfigValueUnknown(), diags + } + + locationVal, ok := locationAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`location expected to be basetypes.StringValue, was: %T`, locationAttribute)) + } + + networkAttribute, ok := attributes["network"] + + if !ok { + diags.AddError( + "Attribute Missing", + `network is missing from object`) + + return NewConfigValueUnknown(), diags + } + + networkVal, ok := networkAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`network expected to be basetypes.ObjectValue, was: %T`, networkAttribute)) + } + + nodesAttribute, ok := attributes["nodes"] + + if !ok { + diags.AddError( + "Attribute Missing", + `nodes is missing from object`) + + return NewConfigValueUnknown(), diags + } + + nodesVal, ok := nodesAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`nodes expected to be basetypes.MapValue, was: %T`, nodesAttribute)) + } + + if diags.HasError() { + return NewConfigValueUnknown(), diags + } + + return ConfigValue{ + AutoApproveNodes: autoApproveNodesVal, + ClusterSsh: clusterSshVal, + DedicatedControlPlane: dedicatedControlPlaneVal, + HighAvailability: highAvailabilityVal, + InstallerTtl: installerTtlVal, + KubernetesUpgrade: kubernetesUpgradeVal, + KubernetesVersion: kubernetesVersionVal, + Location: locationVal, + Network: networkVal, + Nodes: nodesVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewConfigValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ConfigValue { + object, diags := NewConfigValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewConfigValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ConfigType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewConfigValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewConfigValueUnknown(), nil + } + + if in.IsNull() { + return NewConfigValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewConfigValueMust(ConfigValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ConfigType) ValueType(ctx context.Context) attr.Value { + return ConfigValue{} +} + +var _ basetypes.ObjectValuable = ConfigValue{} + +type ConfigValue struct { + AutoApproveNodes basetypes.BoolValue `tfsdk:"auto_approve_nodes"` + ClusterSsh basetypes.ObjectValue `tfsdk:"cluster_ssh"` + DedicatedControlPlane basetypes.BoolValue `tfsdk:"dedicated_control_plane"` + HighAvailability basetypes.BoolValue `tfsdk:"high_availability"` + InstallerTtl basetypes.Int64Value `tfsdk:"installer_ttl"` + KubernetesUpgrade basetypes.ObjectValue `tfsdk:"kubernetes_upgrade"` + KubernetesVersion basetypes.StringValue `tfsdk:"kubernetes_version"` + Location basetypes.StringValue `tfsdk:"location"` + Network basetypes.ObjectValue `tfsdk:"network"` + Nodes basetypes.MapValue `tfsdk:"nodes"` + state attr.ValueState +} + +func (v ConfigValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 10) + + var val tftypes.Value + var err error + + attrTypes["auto_approve_nodes"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["cluster_ssh"] = basetypes.ObjectType{ + AttrTypes: ClusterSshValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["dedicated_control_plane"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["high_availability"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["installer_ttl"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["kubernetes_upgrade"] = basetypes.ObjectType{ + AttrTypes: KubernetesUpgradeValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["kubernetes_version"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["location"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["network"] = basetypes.ObjectType{ + AttrTypes: NetworkValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["nodes"] = basetypes.MapType{ + ElemType: NodesValue{}.Type(ctx), + }.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 10) + + val, err = v.AutoApproveNodes.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["auto_approve_nodes"] = val + + val, err = v.ClusterSsh.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["cluster_ssh"] = val + + val, err = v.DedicatedControlPlane.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["dedicated_control_plane"] = val + + val, err = v.HighAvailability.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["high_availability"] = val + + val, err = v.InstallerTtl.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["installer_ttl"] = val + + val, err = v.KubernetesUpgrade.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["kubernetes_upgrade"] = val + + val, err = v.KubernetesVersion.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["kubernetes_version"] = val + + val, err = v.Location.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["location"] = val + + val, err = v.Network.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["network"] = val + + val, err = v.Nodes.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["nodes"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ConfigValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ConfigValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ConfigValue) String() string { + return "ConfigValue" +} + +func (v ConfigValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var clusterSsh basetypes.ObjectValue + + if v.ClusterSsh.IsNull() { + clusterSsh = types.ObjectNull( + ClusterSshValue{}.AttributeTypes(ctx), + ) + } + + if v.ClusterSsh.IsUnknown() { + clusterSsh = types.ObjectUnknown( + ClusterSshValue{}.AttributeTypes(ctx), + ) + } + + if !v.ClusterSsh.IsNull() && !v.ClusterSsh.IsUnknown() { + clusterSsh = types.ObjectValueMust( + ClusterSshValue{}.AttributeTypes(ctx), + v.ClusterSsh.Attributes(), + ) + } + + var kubernetesUpgrade basetypes.ObjectValue + + if v.KubernetesUpgrade.IsNull() { + kubernetesUpgrade = types.ObjectNull( + KubernetesUpgradeValue{}.AttributeTypes(ctx), + ) + } + + if v.KubernetesUpgrade.IsUnknown() { + kubernetesUpgrade = types.ObjectUnknown( + KubernetesUpgradeValue{}.AttributeTypes(ctx), + ) + } + + if !v.KubernetesUpgrade.IsNull() && !v.KubernetesUpgrade.IsUnknown() { + kubernetesUpgrade = types.ObjectValueMust( + KubernetesUpgradeValue{}.AttributeTypes(ctx), + v.KubernetesUpgrade.Attributes(), + ) + } + + var network basetypes.ObjectValue + + if v.Network.IsNull() { + network = types.ObjectNull( + NetworkValue{}.AttributeTypes(ctx), + ) + } + + if v.Network.IsUnknown() { + network = types.ObjectUnknown( + NetworkValue{}.AttributeTypes(ctx), + ) + } + + if !v.Network.IsNull() && !v.Network.IsUnknown() { + network = types.ObjectValueMust( + NetworkValue{}.AttributeTypes(ctx), + v.Network.Attributes(), + ) + } + + nodes := types.MapValueMust( + NodesType{ + basetypes.ObjectType{ + AttrTypes: NodesValue{}.AttributeTypes(ctx), + }, + }, + v.Nodes.Elements(), + ) + + if v.Nodes.IsNull() { + nodes = types.MapNull( + NodesType{ + basetypes.ObjectType{ + AttrTypes: NodesValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.Nodes.IsUnknown() { + nodes = types.MapUnknown( + NodesType{ + basetypes.ObjectType{ + AttrTypes: NodesValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + attributeTypes := map[string]attr.Type{ + "auto_approve_nodes": basetypes.BoolType{}, + "cluster_ssh": basetypes.ObjectType{ + AttrTypes: ClusterSshValue{}.AttributeTypes(ctx), + }, + "dedicated_control_plane": basetypes.BoolType{}, + "high_availability": basetypes.BoolType{}, + "installer_ttl": basetypes.Int64Type{}, + "kubernetes_upgrade": basetypes.ObjectType{ + AttrTypes: KubernetesUpgradeValue{}.AttributeTypes(ctx), + }, + "kubernetes_version": basetypes.StringType{}, + "location": basetypes.StringType{}, + "network": basetypes.ObjectType{ + AttrTypes: NetworkValue{}.AttributeTypes(ctx), + }, + "nodes": basetypes.MapType{ + ElemType: NodesValue{}.Type(ctx), + }, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "auto_approve_nodes": v.AutoApproveNodes, + "cluster_ssh": clusterSsh, + "dedicated_control_plane": v.DedicatedControlPlane, + "high_availability": v.HighAvailability, + "installer_ttl": v.InstallerTtl, + "kubernetes_upgrade": kubernetesUpgrade, + "kubernetes_version": v.KubernetesVersion, + "location": v.Location, + "network": network, + "nodes": nodes, + }) + + return objVal, diags +} + +func (v ConfigValue) Equal(o attr.Value) bool { + other, ok := o.(ConfigValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.AutoApproveNodes.Equal(other.AutoApproveNodes) { + return false + } + + if !v.ClusterSsh.Equal(other.ClusterSsh) { + return false + } + + if !v.DedicatedControlPlane.Equal(other.DedicatedControlPlane) { + return false + } + + if !v.HighAvailability.Equal(other.HighAvailability) { + return false + } + + if !v.InstallerTtl.Equal(other.InstallerTtl) { + return false + } + + if !v.KubernetesUpgrade.Equal(other.KubernetesUpgrade) { + return false + } + + if !v.KubernetesVersion.Equal(other.KubernetesVersion) { + return false + } + + if !v.Location.Equal(other.Location) { + return false + } + + if !v.Network.Equal(other.Network) { + return false + } + + if !v.Nodes.Equal(other.Nodes) { + return false + } + + return true +} + +func (v ConfigValue) Type(ctx context.Context) attr.Type { + return ConfigType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ConfigValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "auto_approve_nodes": basetypes.BoolType{}, + "cluster_ssh": basetypes.ObjectType{ + AttrTypes: ClusterSshValue{}.AttributeTypes(ctx), + }, + "dedicated_control_plane": basetypes.BoolType{}, + "high_availability": basetypes.BoolType{}, + "installer_ttl": basetypes.Int64Type{}, + "kubernetes_upgrade": basetypes.ObjectType{ + AttrTypes: KubernetesUpgradeValue{}.AttributeTypes(ctx), + }, + "kubernetes_version": basetypes.StringType{}, + "location": basetypes.StringType{}, + "network": basetypes.ObjectType{ + AttrTypes: NetworkValue{}.AttributeTypes(ctx), + }, + "nodes": basetypes.MapType{ + ElemType: NodesValue{}.Type(ctx), + }, + } +} + +var _ basetypes.ObjectTypable = ClusterSshType{} + +type ClusterSshType struct { + basetypes.ObjectType +} + +func (t ClusterSshType) Equal(o attr.Type) bool { + other, ok := o.(ClusterSshType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ClusterSshType) String() string { + return "ClusterSshType" +} + +func (t ClusterSshType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + passphraseAttribute, ok := attributes["passphrase"] + + if !ok { + diags.AddError( + "Attribute Missing", + `passphrase is missing from object`) + + return nil, diags + } + + passphraseVal, ok := passphraseAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`passphrase expected to be basetypes.StringValue, was: %T`, passphraseAttribute)) + } + + portAttribute, ok := attributes["port"] + + if !ok { + diags.AddError( + "Attribute Missing", + `port is missing from object`) + + return nil, diags + } + + portVal, ok := portAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`port expected to be basetypes.StringValue, was: %T`, portAttribute)) + } + + privateKeyPathAttribute, ok := attributes["private_key_path"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_key_path is missing from object`) + + return nil, diags + } + + privateKeyPathVal, ok := privateKeyPathAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_key_path expected to be basetypes.StringValue, was: %T`, privateKeyPathAttribute)) + } + + usernameAttribute, ok := attributes["username"] + + if !ok { + diags.AddError( + "Attribute Missing", + `username is missing from object`) + + return nil, diags + } + + usernameVal, ok := usernameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`username expected to be basetypes.StringValue, was: %T`, usernameAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ClusterSshValue{ + Passphrase: passphraseVal, + Port: portVal, + PrivateKeyPath: privateKeyPathVal, + Username: usernameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewClusterSshValueNull() ClusterSshValue { + return ClusterSshValue{ + state: attr.ValueStateNull, + } +} + +func NewClusterSshValueUnknown() ClusterSshValue { + return ClusterSshValue{ + state: attr.ValueStateUnknown, + } +} + +func NewClusterSshValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ClusterSshValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ClusterSshValue Attribute Value", + "While creating a ClusterSshValue value, a missing attribute value was detected. "+ + "A ClusterSshValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ClusterSshValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ClusterSshValue Attribute Type", + "While creating a ClusterSshValue value, an invalid attribute value was detected. "+ + "A ClusterSshValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ClusterSshValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ClusterSshValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ClusterSshValue Attribute Value", + "While creating a ClusterSshValue value, an extra attribute value was detected. "+ + "A ClusterSshValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ClusterSshValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewClusterSshValueUnknown(), diags + } + + passphraseAttribute, ok := attributes["passphrase"] + + if !ok { + diags.AddError( + "Attribute Missing", + `passphrase is missing from object`) + + return NewClusterSshValueUnknown(), diags + } + + passphraseVal, ok := passphraseAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`passphrase expected to be basetypes.StringValue, was: %T`, passphraseAttribute)) + } + + portAttribute, ok := attributes["port"] + + if !ok { + diags.AddError( + "Attribute Missing", + `port is missing from object`) + + return NewClusterSshValueUnknown(), diags + } + + portVal, ok := portAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`port expected to be basetypes.StringValue, was: %T`, portAttribute)) + } + + privateKeyPathAttribute, ok := attributes["private_key_path"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_key_path is missing from object`) + + return NewClusterSshValueUnknown(), diags + } + + privateKeyPathVal, ok := privateKeyPathAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_key_path expected to be basetypes.StringValue, was: %T`, privateKeyPathAttribute)) + } + + usernameAttribute, ok := attributes["username"] + + if !ok { + diags.AddError( + "Attribute Missing", + `username is missing from object`) + + return NewClusterSshValueUnknown(), diags + } + + usernameVal, ok := usernameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`username expected to be basetypes.StringValue, was: %T`, usernameAttribute)) + } + + if diags.HasError() { + return NewClusterSshValueUnknown(), diags + } + + return ClusterSshValue{ + Passphrase: passphraseVal, + Port: portVal, + PrivateKeyPath: privateKeyPathVal, + Username: usernameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewClusterSshValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ClusterSshValue { + object, diags := NewClusterSshValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewClusterSshValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ClusterSshType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewClusterSshValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewClusterSshValueUnknown(), nil + } + + if in.IsNull() { + return NewClusterSshValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewClusterSshValueMust(ClusterSshValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ClusterSshType) ValueType(ctx context.Context) attr.Value { + return ClusterSshValue{} +} + +var _ basetypes.ObjectValuable = ClusterSshValue{} + +type ClusterSshValue struct { + Passphrase basetypes.StringValue `tfsdk:"passphrase"` + Port basetypes.StringValue `tfsdk:"port"` + PrivateKeyPath basetypes.StringValue `tfsdk:"private_key_path"` + Username basetypes.StringValue `tfsdk:"username"` + state attr.ValueState +} + +func (v ClusterSshValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 4) + + var val tftypes.Value + var err error + + attrTypes["passphrase"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["port"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["private_key_path"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["username"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 4) + + val, err = v.Passphrase.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["passphrase"] = val + + val, err = v.Port.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["port"] = val + + val, err = v.PrivateKeyPath.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["private_key_path"] = val + + val, err = v.Username.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["username"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ClusterSshValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ClusterSshValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ClusterSshValue) String() string { + return "ClusterSshValue" +} + +func (v ClusterSshValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "passphrase": basetypes.StringType{}, + "port": basetypes.StringType{}, + "private_key_path": basetypes.StringType{}, + "username": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "passphrase": v.Passphrase, + "port": v.Port, + "private_key_path": v.PrivateKeyPath, + "username": v.Username, + }) + + return objVal, diags +} + +func (v ClusterSshValue) Equal(o attr.Value) bool { + other, ok := o.(ClusterSshValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Passphrase.Equal(other.Passphrase) { + return false + } + + if !v.Port.Equal(other.Port) { + return false + } + + if !v.PrivateKeyPath.Equal(other.PrivateKeyPath) { + return false + } + + if !v.Username.Equal(other.Username) { + return false + } + + return true +} + +func (v ClusterSshValue) Type(ctx context.Context) attr.Type { + return ClusterSshType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ClusterSshValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "passphrase": basetypes.StringType{}, + "port": basetypes.StringType{}, + "private_key_path": basetypes.StringType{}, + "username": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = KubernetesUpgradeType{} + +type KubernetesUpgradeType struct { + basetypes.ObjectType +} + +func (t KubernetesUpgradeType) Equal(o attr.Type) bool { + other, ok := o.(KubernetesUpgradeType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t KubernetesUpgradeType) String() string { + return "KubernetesUpgradeType" +} + +func (t KubernetesUpgradeType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + paramsAttribute, ok := attributes["params"] + + if !ok { + diags.AddError( + "Attribute Missing", + `params is missing from object`) + + return nil, diags + } + + paramsVal, ok := paramsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`params expected to be basetypes.ObjectValue, was: %T`, paramsAttribute)) + } + + strategyAttribute, ok := attributes["strategy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `strategy is missing from object`) + + return nil, diags + } + + strategyVal, ok := strategyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`strategy expected to be basetypes.StringValue, was: %T`, strategyAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return KubernetesUpgradeValue{ + Params: paramsVal, + Strategy: strategyVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewKubernetesUpgradeValueNull() KubernetesUpgradeValue { + return KubernetesUpgradeValue{ + state: attr.ValueStateNull, + } +} + +func NewKubernetesUpgradeValueUnknown() KubernetesUpgradeValue { + return KubernetesUpgradeValue{ + state: attr.ValueStateUnknown, + } +} + +func NewKubernetesUpgradeValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (KubernetesUpgradeValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing KubernetesUpgradeValue Attribute Value", + "While creating a KubernetesUpgradeValue value, a missing attribute value was detected. "+ + "A KubernetesUpgradeValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("KubernetesUpgradeValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid KubernetesUpgradeValue Attribute Type", + "While creating a KubernetesUpgradeValue value, an invalid attribute value was detected. "+ + "A KubernetesUpgradeValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("KubernetesUpgradeValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("KubernetesUpgradeValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra KubernetesUpgradeValue Attribute Value", + "While creating a KubernetesUpgradeValue value, an extra attribute value was detected. "+ + "A KubernetesUpgradeValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra KubernetesUpgradeValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewKubernetesUpgradeValueUnknown(), diags + } + + paramsAttribute, ok := attributes["params"] + + if !ok { + diags.AddError( + "Attribute Missing", + `params is missing from object`) + + return NewKubernetesUpgradeValueUnknown(), diags + } + + paramsVal, ok := paramsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`params expected to be basetypes.ObjectValue, was: %T`, paramsAttribute)) + } + + strategyAttribute, ok := attributes["strategy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `strategy is missing from object`) + + return NewKubernetesUpgradeValueUnknown(), diags + } + + strategyVal, ok := strategyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`strategy expected to be basetypes.StringValue, was: %T`, strategyAttribute)) + } + + if diags.HasError() { + return NewKubernetesUpgradeValueUnknown(), diags + } + + return KubernetesUpgradeValue{ + Params: paramsVal, + Strategy: strategyVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewKubernetesUpgradeValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) KubernetesUpgradeValue { + object, diags := NewKubernetesUpgradeValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewKubernetesUpgradeValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t KubernetesUpgradeType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewKubernetesUpgradeValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewKubernetesUpgradeValueUnknown(), nil + } + + if in.IsNull() { + return NewKubernetesUpgradeValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewKubernetesUpgradeValueMust(KubernetesUpgradeValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t KubernetesUpgradeType) ValueType(ctx context.Context) attr.Value { + return KubernetesUpgradeValue{} +} + +var _ basetypes.ObjectValuable = KubernetesUpgradeValue{} + +type KubernetesUpgradeValue struct { + Params basetypes.ObjectValue `tfsdk:"params"` + Strategy basetypes.StringValue `tfsdk:"strategy"` + state attr.ValueState +} + +func (v KubernetesUpgradeValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["params"] = basetypes.ObjectType{ + AttrTypes: ParamsValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["strategy"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Params.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["params"] = val + + val, err = v.Strategy.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["strategy"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v KubernetesUpgradeValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v KubernetesUpgradeValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v KubernetesUpgradeValue) String() string { + return "KubernetesUpgradeValue" +} + +func (v KubernetesUpgradeValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var params basetypes.ObjectValue + + if v.Params.IsNull() { + params = types.ObjectNull( + ParamsValue{}.AttributeTypes(ctx), + ) + } + + if v.Params.IsUnknown() { + params = types.ObjectUnknown( + ParamsValue{}.AttributeTypes(ctx), + ) + } + + if !v.Params.IsNull() && !v.Params.IsUnknown() { + params = types.ObjectValueMust( + ParamsValue{}.AttributeTypes(ctx), + v.Params.Attributes(), + ) + } + + attributeTypes := map[string]attr.Type{ + "params": basetypes.ObjectType{ + AttrTypes: ParamsValue{}.AttributeTypes(ctx), + }, + "strategy": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "params": params, + "strategy": v.Strategy, + }) + + return objVal, diags +} + +func (v KubernetesUpgradeValue) Equal(o attr.Value) bool { + other, ok := o.(KubernetesUpgradeValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Params.Equal(other.Params) { + return false + } + + if !v.Strategy.Equal(other.Strategy) { + return false + } + + return true +} + +func (v KubernetesUpgradeValue) Type(ctx context.Context) attr.Type { + return KubernetesUpgradeType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v KubernetesUpgradeValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "params": basetypes.ObjectType{ + AttrTypes: ParamsValue{}.AttributeTypes(ctx), + }, + "strategy": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = ParamsType{} + +type ParamsType struct { + basetypes.ObjectType +} + +func (t ParamsType) Equal(o attr.Type) bool { + other, ok := o.(ParamsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ParamsType) String() string { + return "ParamsType" +} + +func (t ParamsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + workerConcurrencyAttribute, ok := attributes["worker_concurrency"] + + if !ok { + diags.AddError( + "Attribute Missing", + `worker_concurrency is missing from object`) + + return nil, diags + } + + workerConcurrencyVal, ok := workerConcurrencyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`worker_concurrency expected to be basetypes.StringValue, was: %T`, workerConcurrencyAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ParamsValue{ + WorkerConcurrency: workerConcurrencyVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewParamsValueNull() ParamsValue { + return ParamsValue{ + state: attr.ValueStateNull, + } +} + +func NewParamsValueUnknown() ParamsValue { + return ParamsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewParamsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ParamsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ParamsValue Attribute Value", + "While creating a ParamsValue value, a missing attribute value was detected. "+ + "A ParamsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ParamsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ParamsValue Attribute Type", + "While creating a ParamsValue value, an invalid attribute value was detected. "+ + "A ParamsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ParamsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ParamsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ParamsValue Attribute Value", + "While creating a ParamsValue value, an extra attribute value was detected. "+ + "A ParamsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ParamsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewParamsValueUnknown(), diags + } + + workerConcurrencyAttribute, ok := attributes["worker_concurrency"] + + if !ok { + diags.AddError( + "Attribute Missing", + `worker_concurrency is missing from object`) + + return NewParamsValueUnknown(), diags + } + + workerConcurrencyVal, ok := workerConcurrencyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`worker_concurrency expected to be basetypes.StringValue, was: %T`, workerConcurrencyAttribute)) + } + + if diags.HasError() { + return NewParamsValueUnknown(), diags + } + + return ParamsValue{ + WorkerConcurrency: workerConcurrencyVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewParamsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ParamsValue { + object, diags := NewParamsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewParamsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ParamsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewParamsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewParamsValueUnknown(), nil + } + + if in.IsNull() { + return NewParamsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewParamsValueMust(ParamsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ParamsType) ValueType(ctx context.Context) attr.Value { + return ParamsValue{} +} + +var _ basetypes.ObjectValuable = ParamsValue{} + +type ParamsValue struct { + WorkerConcurrency basetypes.StringValue `tfsdk:"worker_concurrency"` + state attr.ValueState +} + +func (v ParamsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 1) + + var val tftypes.Value + var err error + + attrTypes["worker_concurrency"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 1) + + val, err = v.WorkerConcurrency.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["worker_concurrency"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ParamsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ParamsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ParamsValue) String() string { + return "ParamsValue" +} + +func (v ParamsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "worker_concurrency": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "worker_concurrency": v.WorkerConcurrency, + }) + + return objVal, diags +} + +func (v ParamsValue) Equal(o attr.Value) bool { + other, ok := o.(ParamsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.WorkerConcurrency.Equal(other.WorkerConcurrency) { + return false + } + + return true +} + +func (v ParamsValue) Type(ctx context.Context) attr.Type { + return ParamsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ParamsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "worker_concurrency": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = NetworkType{} + +type NetworkType struct { + basetypes.ObjectType +} + +func (t NetworkType) Equal(o attr.Type) bool { + other, ok := o.(NetworkType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t NetworkType) String() string { + return "NetworkType" +} + +func (t NetworkType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + cniAttribute, ok := attributes["cni"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cni is missing from object`) + + return nil, diags + } + + cniVal, ok := cniAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cni expected to be basetypes.ObjectValue, was: %T`, cniAttribute)) + } + + ipv6Attribute, ok := attributes["ipv6"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ipv6 is missing from object`) + + return nil, diags + } + + ipv6Val, ok := ipv6Attribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ipv6 expected to be basetypes.ObjectValue, was: %T`, ipv6Attribute)) + } + + podSubnetAttribute, ok := attributes["pod_subnet"] + + if !ok { + diags.AddError( + "Attribute Missing", + `pod_subnet is missing from object`) + + return nil, diags + } + + podSubnetVal, ok := podSubnetAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`pod_subnet expected to be basetypes.StringValue, was: %T`, podSubnetAttribute)) + } + + serviceSubnetAttribute, ok := attributes["service_subnet"] + + if !ok { + diags.AddError( + "Attribute Missing", + `service_subnet is missing from object`) + + return nil, diags + } + + serviceSubnetVal, ok := serviceSubnetAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`service_subnet expected to be basetypes.StringValue, was: %T`, serviceSubnetAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return NetworkValue{ + Cni: cniVal, + Ipv6: ipv6Val, + PodSubnet: podSubnetVal, + ServiceSubnet: serviceSubnetVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewNetworkValueNull() NetworkValue { + return NetworkValue{ + state: attr.ValueStateNull, + } +} + +func NewNetworkValueUnknown() NetworkValue { + return NetworkValue{ + state: attr.ValueStateUnknown, + } +} + +func NewNetworkValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (NetworkValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing NetworkValue Attribute Value", + "While creating a NetworkValue value, a missing attribute value was detected. "+ + "A NetworkValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("NetworkValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid NetworkValue Attribute Type", + "While creating a NetworkValue value, an invalid attribute value was detected. "+ + "A NetworkValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("NetworkValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("NetworkValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra NetworkValue Attribute Value", + "While creating a NetworkValue value, an extra attribute value was detected. "+ + "A NetworkValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra NetworkValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewNetworkValueUnknown(), diags + } + + cniAttribute, ok := attributes["cni"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cni is missing from object`) + + return NewNetworkValueUnknown(), diags + } + + cniVal, ok := cniAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cni expected to be basetypes.ObjectValue, was: %T`, cniAttribute)) + } + + ipv6Attribute, ok := attributes["ipv6"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ipv6 is missing from object`) + + return NewNetworkValueUnknown(), diags + } + + ipv6Val, ok := ipv6Attribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ipv6 expected to be basetypes.ObjectValue, was: %T`, ipv6Attribute)) + } + + podSubnetAttribute, ok := attributes["pod_subnet"] + + if !ok { + diags.AddError( + "Attribute Missing", + `pod_subnet is missing from object`) + + return NewNetworkValueUnknown(), diags + } + + podSubnetVal, ok := podSubnetAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`pod_subnet expected to be basetypes.StringValue, was: %T`, podSubnetAttribute)) + } + + serviceSubnetAttribute, ok := attributes["service_subnet"] + + if !ok { + diags.AddError( + "Attribute Missing", + `service_subnet is missing from object`) + + return NewNetworkValueUnknown(), diags + } + + serviceSubnetVal, ok := serviceSubnetAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`service_subnet expected to be basetypes.StringValue, was: %T`, serviceSubnetAttribute)) + } + + if diags.HasError() { + return NewNetworkValueUnknown(), diags + } + + return NetworkValue{ + Cni: cniVal, + Ipv6: ipv6Val, + PodSubnet: podSubnetVal, + ServiceSubnet: serviceSubnetVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewNetworkValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) NetworkValue { + object, diags := NewNetworkValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewNetworkValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t NetworkType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewNetworkValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewNetworkValueUnknown(), nil + } + + if in.IsNull() { + return NewNetworkValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewNetworkValueMust(NetworkValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t NetworkType) ValueType(ctx context.Context) attr.Value { + return NetworkValue{} +} + +var _ basetypes.ObjectValuable = NetworkValue{} + +type NetworkValue struct { + Cni basetypes.ObjectValue `tfsdk:"cni"` + Ipv6 basetypes.ObjectValue `tfsdk:"ipv6"` + PodSubnet basetypes.StringValue `tfsdk:"pod_subnet"` + ServiceSubnet basetypes.StringValue `tfsdk:"service_subnet"` + state attr.ValueState +} + +func (v NetworkValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 4) + + var val tftypes.Value + var err error + + attrTypes["cni"] = basetypes.ObjectType{ + AttrTypes: CniValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["ipv6"] = basetypes.ObjectType{ + AttrTypes: Ipv6Value{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["pod_subnet"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["service_subnet"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 4) + + val, err = v.Cni.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["cni"] = val + + val, err = v.Ipv6.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["ipv6"] = val + + val, err = v.PodSubnet.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["pod_subnet"] = val + + val, err = v.ServiceSubnet.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["service_subnet"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v NetworkValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v NetworkValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v NetworkValue) String() string { + return "NetworkValue" +} + +func (v NetworkValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var cni basetypes.ObjectValue + + if v.Cni.IsNull() { + cni = types.ObjectNull( + CniValue{}.AttributeTypes(ctx), + ) + } + + if v.Cni.IsUnknown() { + cni = types.ObjectUnknown( + CniValue{}.AttributeTypes(ctx), + ) + } + + if !v.Cni.IsNull() && !v.Cni.IsUnknown() { + cni = types.ObjectValueMust( + CniValue{}.AttributeTypes(ctx), + v.Cni.Attributes(), + ) + } + + var ipv6 basetypes.ObjectValue + + if v.Ipv6.IsNull() { + ipv6 = types.ObjectNull( + Ipv6Value{}.AttributeTypes(ctx), + ) + } + + if v.Ipv6.IsUnknown() { + ipv6 = types.ObjectUnknown( + Ipv6Value{}.AttributeTypes(ctx), + ) + } + + if !v.Ipv6.IsNull() && !v.Ipv6.IsUnknown() { + ipv6 = types.ObjectValueMust( + Ipv6Value{}.AttributeTypes(ctx), + v.Ipv6.Attributes(), + ) + } + + attributeTypes := map[string]attr.Type{ + "cni": basetypes.ObjectType{ + AttrTypes: CniValue{}.AttributeTypes(ctx), + }, + "ipv6": basetypes.ObjectType{ + AttrTypes: Ipv6Value{}.AttributeTypes(ctx), + }, + "pod_subnet": basetypes.StringType{}, + "service_subnet": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "cni": cni, + "ipv6": ipv6, + "pod_subnet": v.PodSubnet, + "service_subnet": v.ServiceSubnet, + }) + + return objVal, diags +} + +func (v NetworkValue) Equal(o attr.Value) bool { + other, ok := o.(NetworkValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Cni.Equal(other.Cni) { + return false + } + + if !v.Ipv6.Equal(other.Ipv6) { + return false + } + + if !v.PodSubnet.Equal(other.PodSubnet) { + return false + } + + if !v.ServiceSubnet.Equal(other.ServiceSubnet) { + return false + } + + return true +} + +func (v NetworkValue) Type(ctx context.Context) attr.Type { + return NetworkType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v NetworkValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "cni": basetypes.ObjectType{ + AttrTypes: CniValue{}.AttributeTypes(ctx), + }, + "ipv6": basetypes.ObjectType{ + AttrTypes: Ipv6Value{}.AttributeTypes(ctx), + }, + "pod_subnet": basetypes.StringType{}, + "service_subnet": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = CniType{} + +type CniType struct { + basetypes.ObjectType +} + +func (t CniType) Equal(o attr.Type) bool { + other, ok := o.(CniType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t CniType) String() string { + return "CniType" +} + +func (t CniType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return nil, diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + versionAttribute, ok := attributes["version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `version is missing from object`) + + return nil, diags + } + + versionVal, ok := versionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`version expected to be basetypes.StringValue, was: %T`, versionAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return CniValue{ + Name: nameVal, + Version: versionVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewCniValueNull() CniValue { + return CniValue{ + state: attr.ValueStateNull, + } +} + +func NewCniValueUnknown() CniValue { + return CniValue{ + state: attr.ValueStateUnknown, + } +} + +func NewCniValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (CniValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing CniValue Attribute Value", + "While creating a CniValue value, a missing attribute value was detected. "+ + "A CniValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("CniValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid CniValue Attribute Type", + "While creating a CniValue value, an invalid attribute value was detected. "+ + "A CniValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("CniValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("CniValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra CniValue Attribute Value", + "While creating a CniValue value, an extra attribute value was detected. "+ + "A CniValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra CniValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewCniValueUnknown(), diags + } + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return NewCniValueUnknown(), diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + versionAttribute, ok := attributes["version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `version is missing from object`) + + return NewCniValueUnknown(), diags + } + + versionVal, ok := versionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`version expected to be basetypes.StringValue, was: %T`, versionAttribute)) + } + + if diags.HasError() { + return NewCniValueUnknown(), diags + } + + return CniValue{ + Name: nameVal, + Version: versionVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewCniValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) CniValue { + object, diags := NewCniValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewCniValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t CniType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewCniValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewCniValueUnknown(), nil + } + + if in.IsNull() { + return NewCniValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewCniValueMust(CniValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t CniType) ValueType(ctx context.Context) attr.Value { + return CniValue{} +} + +var _ basetypes.ObjectValuable = CniValue{} + +type CniValue struct { + Name basetypes.StringValue `tfsdk:"name"` + Version basetypes.StringValue `tfsdk:"version"` + state attr.ValueState +} + +func (v CniValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["name"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["version"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Name.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["name"] = val + + val, err = v.Version.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["version"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v CniValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v CniValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v CniValue) String() string { + return "CniValue" +} + +func (v CniValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "name": basetypes.StringType{}, + "version": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "name": v.Name, + "version": v.Version, + }) + + return objVal, diags +} + +func (v CniValue) Equal(o attr.Value) bool { + other, ok := o.(CniValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Name.Equal(other.Name) { + return false + } + + if !v.Version.Equal(other.Version) { + return false + } + + return true +} + +func (v CniValue) Type(ctx context.Context) attr.Type { + return CniType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v CniValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "name": basetypes.StringType{}, + "version": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = Ipv6Type{} + +type Ipv6Type struct { + basetypes.ObjectType +} + +func (t Ipv6Type) Equal(o attr.Type) bool { + other, ok := o.(Ipv6Type) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t Ipv6Type) String() string { + return "Ipv6Type" +} + +func (t Ipv6Type) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + podSubnetAttribute, ok := attributes["pod_subnet"] + + if !ok { + diags.AddError( + "Attribute Missing", + `pod_subnet is missing from object`) + + return nil, diags + } + + podSubnetVal, ok := podSubnetAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`pod_subnet expected to be basetypes.StringValue, was: %T`, podSubnetAttribute)) + } + + serviceSubnetAttribute, ok := attributes["service_subnet"] + + if !ok { + diags.AddError( + "Attribute Missing", + `service_subnet is missing from object`) + + return nil, diags + } + + serviceSubnetVal, ok := serviceSubnetAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`service_subnet expected to be basetypes.StringValue, was: %T`, serviceSubnetAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return Ipv6Value{ + PodSubnet: podSubnetVal, + ServiceSubnet: serviceSubnetVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewIpv6ValueNull() Ipv6Value { + return Ipv6Value{ + state: attr.ValueStateNull, + } +} + +func NewIpv6ValueUnknown() Ipv6Value { + return Ipv6Value{ + state: attr.ValueStateUnknown, + } +} + +func NewIpv6Value(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (Ipv6Value, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing Ipv6Value Attribute Value", + "While creating a Ipv6Value value, a missing attribute value was detected. "+ + "A Ipv6Value must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Ipv6Value Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid Ipv6Value Attribute Type", + "While creating a Ipv6Value value, an invalid attribute value was detected. "+ + "A Ipv6Value must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Ipv6Value Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("Ipv6Value Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra Ipv6Value Attribute Value", + "While creating a Ipv6Value value, an extra attribute value was detected. "+ + "A Ipv6Value must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra Ipv6Value Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewIpv6ValueUnknown(), diags + } + + podSubnetAttribute, ok := attributes["pod_subnet"] + + if !ok { + diags.AddError( + "Attribute Missing", + `pod_subnet is missing from object`) + + return NewIpv6ValueUnknown(), diags + } + + podSubnetVal, ok := podSubnetAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`pod_subnet expected to be basetypes.StringValue, was: %T`, podSubnetAttribute)) + } + + serviceSubnetAttribute, ok := attributes["service_subnet"] + + if !ok { + diags.AddError( + "Attribute Missing", + `service_subnet is missing from object`) + + return NewIpv6ValueUnknown(), diags + } + + serviceSubnetVal, ok := serviceSubnetAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`service_subnet expected to be basetypes.StringValue, was: %T`, serviceSubnetAttribute)) + } + + if diags.HasError() { + return NewIpv6ValueUnknown(), diags + } + + return Ipv6Value{ + PodSubnet: podSubnetVal, + ServiceSubnet: serviceSubnetVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewIpv6ValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) Ipv6Value { + object, diags := NewIpv6Value(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewIpv6ValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t Ipv6Type) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewIpv6ValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewIpv6ValueUnknown(), nil + } + + if in.IsNull() { + return NewIpv6ValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewIpv6ValueMust(Ipv6Value{}.AttributeTypes(ctx), attributes), nil +} + +func (t Ipv6Type) ValueType(ctx context.Context) attr.Value { + return Ipv6Value{} +} + +var _ basetypes.ObjectValuable = Ipv6Value{} + +type Ipv6Value struct { + PodSubnet basetypes.StringValue `tfsdk:"pod_subnet"` + ServiceSubnet basetypes.StringValue `tfsdk:"service_subnet"` + state attr.ValueState +} + +func (v Ipv6Value) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["pod_subnet"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["service_subnet"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.PodSubnet.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["pod_subnet"] = val + + val, err = v.ServiceSubnet.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["service_subnet"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v Ipv6Value) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v Ipv6Value) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v Ipv6Value) String() string { + return "Ipv6Value" +} + +func (v Ipv6Value) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "pod_subnet": basetypes.StringType{}, + "service_subnet": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "pod_subnet": v.PodSubnet, + "service_subnet": v.ServiceSubnet, + }) + + return objVal, diags +} + +func (v Ipv6Value) Equal(o attr.Value) bool { + other, ok := o.(Ipv6Value) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.PodSubnet.Equal(other.PodSubnet) { + return false + } + + if !v.ServiceSubnet.Equal(other.ServiceSubnet) { + return false + } + + return true +} + +func (v Ipv6Value) Type(ctx context.Context) attr.Type { + return Ipv6Type{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v Ipv6Value) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "pod_subnet": basetypes.StringType{}, + "service_subnet": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = NodesType{} + +type NodesType struct { + basetypes.ObjectType +} + +func (t NodesType) Equal(o attr.Type) bool { + other, ok := o.(NodesType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t NodesType) String() string { + return "NodesType" +} + +func (t NodesType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + archAttribute, ok := attributes["arch"] + + if !ok { + diags.AddError( + "Attribute Missing", + `arch is missing from object`) + + return nil, diags + } + + archVal, ok := archAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`arch expected to be basetypes.StringValue, was: %T`, archAttribute)) + } + + hostnameAttribute, ok := attributes["hostname"] + + if !ok { + diags.AddError( + "Attribute Missing", + `hostname is missing from object`) + + return nil, diags + } + + hostnameVal, ok := hostnameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`hostname expected to be basetypes.StringValue, was: %T`, hostnameAttribute)) + } + + interfaceAttribute, ok := attributes["interface"] + + if !ok { + diags.AddError( + "Attribute Missing", + `interface is missing from object`) + + return nil, diags + } + + interfaceVal, ok := interfaceAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`interface expected to be basetypes.StringValue, was: %T`, interfaceAttribute)) + } + + labelsAttribute, ok := attributes["labels"] + + if !ok { + diags.AddError( + "Attribute Missing", + `labels is missing from object`) + + return nil, diags + } + + labelsVal, ok := labelsAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`labels expected to be basetypes.MapValue, was: %T`, labelsAttribute)) + } + + operatingSystemAttribute, ok := attributes["operating_system"] + + if !ok { + diags.AddError( + "Attribute Missing", + `operating_system is missing from object`) + + return nil, diags + } + + operatingSystemVal, ok := operatingSystemAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`operating_system expected to be basetypes.StringValue, was: %T`, operatingSystemAttribute)) + } + + privateIpAttribute, ok := attributes["private_ip"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_ip is missing from object`) + + return nil, diags + } + + privateIpVal, ok := privateIpAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_ip expected to be basetypes.StringValue, was: %T`, privateIpAttribute)) + } + + rolesAttribute, ok := attributes["roles"] + + if !ok { + diags.AddError( + "Attribute Missing", + `roles is missing from object`) + + return nil, diags + } + + rolesVal, ok := rolesAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`roles expected to be basetypes.SetValue, was: %T`, rolesAttribute)) + } + + sshAttribute, ok := attributes["ssh"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ssh is missing from object`) + + return nil, diags + } + + sshVal, ok := sshAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ssh expected to be basetypes.ObjectValue, was: %T`, sshAttribute)) + } + + taintsAttribute, ok := attributes["taints"] + + if !ok { + diags.AddError( + "Attribute Missing", + `taints is missing from object`) + + return nil, diags + } + + taintsVal, ok := taintsAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`taints expected to be basetypes.SetValue, was: %T`, taintsAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return NodesValue{ + Arch: archVal, + Hostname: hostnameVal, + Interface: interfaceVal, + Labels: labelsVal, + OperatingSystem: operatingSystemVal, + PrivateIp: privateIpVal, + Roles: rolesVal, + Ssh: sshVal, + Taints: taintsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewNodesValueNull() NodesValue { + return NodesValue{ + state: attr.ValueStateNull, + } +} + +func NewNodesValueUnknown() NodesValue { + return NodesValue{ + state: attr.ValueStateUnknown, + } +} + +func NewNodesValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (NodesValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing NodesValue Attribute Value", + "While creating a NodesValue value, a missing attribute value was detected. "+ + "A NodesValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("NodesValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid NodesValue Attribute Type", + "While creating a NodesValue value, an invalid attribute value was detected. "+ + "A NodesValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("NodesValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("NodesValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra NodesValue Attribute Value", + "While creating a NodesValue value, an extra attribute value was detected. "+ + "A NodesValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra NodesValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewNodesValueUnknown(), diags + } + + archAttribute, ok := attributes["arch"] + + if !ok { + diags.AddError( + "Attribute Missing", + `arch is missing from object`) + + return NewNodesValueUnknown(), diags + } + + archVal, ok := archAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`arch expected to be basetypes.StringValue, was: %T`, archAttribute)) + } + + hostnameAttribute, ok := attributes["hostname"] + + if !ok { + diags.AddError( + "Attribute Missing", + `hostname is missing from object`) + + return NewNodesValueUnknown(), diags + } + + hostnameVal, ok := hostnameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`hostname expected to be basetypes.StringValue, was: %T`, hostnameAttribute)) + } + + interfaceAttribute, ok := attributes["interface"] + + if !ok { + diags.AddError( + "Attribute Missing", + `interface is missing from object`) + + return NewNodesValueUnknown(), diags + } + + interfaceVal, ok := interfaceAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`interface expected to be basetypes.StringValue, was: %T`, interfaceAttribute)) + } + + labelsAttribute, ok := attributes["labels"] + + if !ok { + diags.AddError( + "Attribute Missing", + `labels is missing from object`) + + return NewNodesValueUnknown(), diags + } + + labelsVal, ok := labelsAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`labels expected to be basetypes.MapValue, was: %T`, labelsAttribute)) + } + + operatingSystemAttribute, ok := attributes["operating_system"] + + if !ok { + diags.AddError( + "Attribute Missing", + `operating_system is missing from object`) + + return NewNodesValueUnknown(), diags + } + + operatingSystemVal, ok := operatingSystemAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`operating_system expected to be basetypes.StringValue, was: %T`, operatingSystemAttribute)) + } + + privateIpAttribute, ok := attributes["private_ip"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_ip is missing from object`) + + return NewNodesValueUnknown(), diags + } + + privateIpVal, ok := privateIpAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_ip expected to be basetypes.StringValue, was: %T`, privateIpAttribute)) + } + + rolesAttribute, ok := attributes["roles"] + + if !ok { + diags.AddError( + "Attribute Missing", + `roles is missing from object`) + + return NewNodesValueUnknown(), diags + } + + rolesVal, ok := rolesAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`roles expected to be basetypes.SetValue, was: %T`, rolesAttribute)) + } + + sshAttribute, ok := attributes["ssh"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ssh is missing from object`) + + return NewNodesValueUnknown(), diags + } + + sshVal, ok := sshAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ssh expected to be basetypes.ObjectValue, was: %T`, sshAttribute)) + } + + taintsAttribute, ok := attributes["taints"] + + if !ok { + diags.AddError( + "Attribute Missing", + `taints is missing from object`) + + return NewNodesValueUnknown(), diags + } + + taintsVal, ok := taintsAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`taints expected to be basetypes.SetValue, was: %T`, taintsAttribute)) + } + + if diags.HasError() { + return NewNodesValueUnknown(), diags + } + + return NodesValue{ + Arch: archVal, + Hostname: hostnameVal, + Interface: interfaceVal, + Labels: labelsVal, + OperatingSystem: operatingSystemVal, + PrivateIp: privateIpVal, + Roles: rolesVal, + Ssh: sshVal, + Taints: taintsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewNodesValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) NodesValue { + object, diags := NewNodesValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewNodesValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t NodesType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewNodesValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewNodesValueUnknown(), nil + } + + if in.IsNull() { + return NewNodesValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewNodesValueMust(NodesValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t NodesType) ValueType(ctx context.Context) attr.Value { + return NodesValue{} +} + +var _ basetypes.ObjectValuable = NodesValue{} + +type NodesValue struct { + Arch basetypes.StringValue `tfsdk:"arch"` + Hostname basetypes.StringValue `tfsdk:"hostname"` + Interface basetypes.StringValue `tfsdk:"interface"` + Labels basetypes.MapValue `tfsdk:"labels"` + OperatingSystem basetypes.StringValue `tfsdk:"operating_system"` + PrivateIp basetypes.StringValue `tfsdk:"private_ip"` + Roles basetypes.SetValue `tfsdk:"roles"` + Ssh basetypes.ObjectValue `tfsdk:"ssh"` + Taints basetypes.SetValue `tfsdk:"taints"` + state attr.ValueState +} + +func (v NodesValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 9) + + var val tftypes.Value + var err error + + attrTypes["arch"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["hostname"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["interface"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["labels"] = basetypes.MapType{ + ElemType: types.StringType, + }.TerraformType(ctx) + attrTypes["operating_system"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["private_ip"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["roles"] = basetypes.SetType{ + ElemType: types.StringType, + }.TerraformType(ctx) + attrTypes["ssh"] = basetypes.ObjectType{ + AttrTypes: SshValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["taints"] = basetypes.SetType{ + ElemType: TaintsValue{}.Type(ctx), + }.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 9) + + val, err = v.Arch.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["arch"] = val + + val, err = v.Hostname.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["hostname"] = val + + val, err = v.Interface.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["interface"] = val + + val, err = v.Labels.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["labels"] = val + + val, err = v.OperatingSystem.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["operating_system"] = val + + val, err = v.PrivateIp.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["private_ip"] = val + + val, err = v.Roles.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["roles"] = val + + val, err = v.Ssh.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["ssh"] = val + + val, err = v.Taints.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["taints"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v NodesValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v NodesValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v NodesValue) String() string { + return "NodesValue" +} + +func (v NodesValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var ssh basetypes.ObjectValue + + if v.Ssh.IsNull() { + ssh = types.ObjectNull( + SshValue{}.AttributeTypes(ctx), + ) + } + + if v.Ssh.IsUnknown() { + ssh = types.ObjectUnknown( + SshValue{}.AttributeTypes(ctx), + ) + } + + if !v.Ssh.IsNull() && !v.Ssh.IsUnknown() { + ssh = types.ObjectValueMust( + SshValue{}.AttributeTypes(ctx), + v.Ssh.Attributes(), + ) + } + + taints := types.SetValueMust( + TaintsType{ + basetypes.ObjectType{ + AttrTypes: TaintsValue{}.AttributeTypes(ctx), + }, + }, + v.Taints.Elements(), + ) + + if v.Taints.IsNull() { + taints = types.SetNull( + TaintsType{ + basetypes.ObjectType{ + AttrTypes: TaintsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.Taints.IsUnknown() { + taints = types.SetUnknown( + TaintsType{ + basetypes.ObjectType{ + AttrTypes: TaintsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + var labelsVal basetypes.MapValue + switch { + case v.Labels.IsUnknown(): + labelsVal = types.MapUnknown(types.StringType) + case v.Labels.IsNull(): + labelsVal = types.MapNull(types.StringType) + default: + var d diag.Diagnostics + labelsVal, d = types.MapValue(types.StringType, v.Labels.Elements()) + diags.Append(d...) + } + + if diags.HasError() { + return types.ObjectUnknown(map[string]attr.Type{ + "arch": basetypes.StringType{}, + "hostname": basetypes.StringType{}, + "interface": basetypes.StringType{}, + "labels": basetypes.MapType{ + ElemType: types.StringType, + }, + "operating_system": basetypes.StringType{}, + "private_ip": basetypes.StringType{}, + "roles": basetypes.SetType{ + ElemType: types.StringType, + }, + "ssh": basetypes.ObjectType{ + AttrTypes: SshValue{}.AttributeTypes(ctx), + }, + "taints": basetypes.SetType{ + ElemType: TaintsValue{}.Type(ctx), + }, + }), diags + } + + var rolesVal basetypes.SetValue + switch { + case v.Roles.IsUnknown(): + rolesVal = types.SetUnknown(types.StringType) + case v.Roles.IsNull(): + rolesVal = types.SetNull(types.StringType) + default: + var d diag.Diagnostics + rolesVal, d = types.SetValue(types.StringType, v.Roles.Elements()) + diags.Append(d...) + } + + if diags.HasError() { + return types.ObjectUnknown(map[string]attr.Type{ + "arch": basetypes.StringType{}, + "hostname": basetypes.StringType{}, + "interface": basetypes.StringType{}, + "labels": basetypes.MapType{ + ElemType: types.StringType, + }, + "operating_system": basetypes.StringType{}, + "private_ip": basetypes.StringType{}, + "roles": basetypes.SetType{ + ElemType: types.StringType, + }, + "ssh": basetypes.ObjectType{ + AttrTypes: SshValue{}.AttributeTypes(ctx), + }, + "taints": basetypes.SetType{ + ElemType: TaintsValue{}.Type(ctx), + }, + }), diags + } + + attributeTypes := map[string]attr.Type{ + "arch": basetypes.StringType{}, + "hostname": basetypes.StringType{}, + "interface": basetypes.StringType{}, + "labels": basetypes.MapType{ + ElemType: types.StringType, + }, + "operating_system": basetypes.StringType{}, + "private_ip": basetypes.StringType{}, + "roles": basetypes.SetType{ + ElemType: types.StringType, + }, + "ssh": basetypes.ObjectType{ + AttrTypes: SshValue{}.AttributeTypes(ctx), + }, + "taints": basetypes.SetType{ + ElemType: TaintsValue{}.Type(ctx), + }, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "arch": v.Arch, + "hostname": v.Hostname, + "interface": v.Interface, + "labels": labelsVal, + "operating_system": v.OperatingSystem, + "private_ip": v.PrivateIp, + "roles": rolesVal, + "ssh": ssh, + "taints": taints, + }) + + return objVal, diags +} + +func (v NodesValue) Equal(o attr.Value) bool { + other, ok := o.(NodesValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Arch.Equal(other.Arch) { + return false + } + + if !v.Hostname.Equal(other.Hostname) { + return false + } + + if !v.Interface.Equal(other.Interface) { + return false + } + + if !v.Labels.Equal(other.Labels) { + return false + } + + if !v.OperatingSystem.Equal(other.OperatingSystem) { + return false + } + + if !v.PrivateIp.Equal(other.PrivateIp) { + return false + } + + if !v.Roles.Equal(other.Roles) { + return false + } + + if !v.Ssh.Equal(other.Ssh) { + return false + } + + if !v.Taints.Equal(other.Taints) { + return false + } + + return true +} + +func (v NodesValue) Type(ctx context.Context) attr.Type { + return NodesType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v NodesValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "arch": basetypes.StringType{}, + "hostname": basetypes.StringType{}, + "interface": basetypes.StringType{}, + "labels": basetypes.MapType{ + ElemType: types.StringType, + }, + "operating_system": basetypes.StringType{}, + "private_ip": basetypes.StringType{}, + "roles": basetypes.SetType{ + ElemType: types.StringType, + }, + "ssh": basetypes.ObjectType{ + AttrTypes: SshValue{}.AttributeTypes(ctx), + }, + "taints": basetypes.SetType{ + ElemType: TaintsValue{}.Type(ctx), + }, + } +} + +var _ basetypes.ObjectTypable = SshType{} + +type SshType struct { + basetypes.ObjectType +} + +func (t SshType) Equal(o attr.Type) bool { + other, ok := o.(SshType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t SshType) String() string { + return "SshType" +} + +func (t SshType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + ipAddressAttribute, ok := attributes["ip_address"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ip_address is missing from object`) + + return nil, diags + } + + ipAddressVal, ok := ipAddressAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ip_address expected to be basetypes.StringValue, was: %T`, ipAddressAttribute)) + } + + passphraseAttribute, ok := attributes["passphrase"] + + if !ok { + diags.AddError( + "Attribute Missing", + `passphrase is missing from object`) + + return nil, diags + } + + passphraseVal, ok := passphraseAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`passphrase expected to be basetypes.StringValue, was: %T`, passphraseAttribute)) + } + + portAttribute, ok := attributes["port"] + + if !ok { + diags.AddError( + "Attribute Missing", + `port is missing from object`) + + return nil, diags + } + + portVal, ok := portAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`port expected to be basetypes.StringValue, was: %T`, portAttribute)) + } + + privateKeyPathAttribute, ok := attributes["private_key_path"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_key_path is missing from object`) + + return nil, diags + } + + privateKeyPathVal, ok := privateKeyPathAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_key_path expected to be basetypes.StringValue, was: %T`, privateKeyPathAttribute)) + } + + usernameAttribute, ok := attributes["username"] + + if !ok { + diags.AddError( + "Attribute Missing", + `username is missing from object`) + + return nil, diags + } + + usernameVal, ok := usernameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`username expected to be basetypes.StringValue, was: %T`, usernameAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return SshValue{ + IpAddress: ipAddressVal, + Passphrase: passphraseVal, + Port: portVal, + PrivateKeyPath: privateKeyPathVal, + Username: usernameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewSshValueNull() SshValue { + return SshValue{ + state: attr.ValueStateNull, + } +} + +func NewSshValueUnknown() SshValue { + return SshValue{ + state: attr.ValueStateUnknown, + } +} + +func NewSshValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (SshValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing SshValue Attribute Value", + "While creating a SshValue value, a missing attribute value was detected. "+ + "A SshValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("SshValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid SshValue Attribute Type", + "While creating a SshValue value, an invalid attribute value was detected. "+ + "A SshValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("SshValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("SshValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra SshValue Attribute Value", + "While creating a SshValue value, an extra attribute value was detected. "+ + "A SshValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra SshValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewSshValueUnknown(), diags + } + + ipAddressAttribute, ok := attributes["ip_address"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ip_address is missing from object`) + + return NewSshValueUnknown(), diags + } + + ipAddressVal, ok := ipAddressAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ip_address expected to be basetypes.StringValue, was: %T`, ipAddressAttribute)) + } + + passphraseAttribute, ok := attributes["passphrase"] + + if !ok { + diags.AddError( + "Attribute Missing", + `passphrase is missing from object`) + + return NewSshValueUnknown(), diags + } + + passphraseVal, ok := passphraseAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`passphrase expected to be basetypes.StringValue, was: %T`, passphraseAttribute)) + } + + portAttribute, ok := attributes["port"] + + if !ok { + diags.AddError( + "Attribute Missing", + `port is missing from object`) + + return NewSshValueUnknown(), diags + } + + portVal, ok := portAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`port expected to be basetypes.StringValue, was: %T`, portAttribute)) + } + + privateKeyPathAttribute, ok := attributes["private_key_path"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_key_path is missing from object`) + + return NewSshValueUnknown(), diags + } + + privateKeyPathVal, ok := privateKeyPathAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_key_path expected to be basetypes.StringValue, was: %T`, privateKeyPathAttribute)) + } + + usernameAttribute, ok := attributes["username"] + + if !ok { + diags.AddError( + "Attribute Missing", + `username is missing from object`) + + return NewSshValueUnknown(), diags + } + + usernameVal, ok := usernameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`username expected to be basetypes.StringValue, was: %T`, usernameAttribute)) + } + + if diags.HasError() { + return NewSshValueUnknown(), diags + } + + return SshValue{ + IpAddress: ipAddressVal, + Passphrase: passphraseVal, + Port: portVal, + PrivateKeyPath: privateKeyPathVal, + Username: usernameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewSshValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) SshValue { + object, diags := NewSshValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewSshValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t SshType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewSshValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewSshValueUnknown(), nil + } + + if in.IsNull() { + return NewSshValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewSshValueMust(SshValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t SshType) ValueType(ctx context.Context) attr.Value { + return SshValue{} +} + +var _ basetypes.ObjectValuable = SshValue{} + +type SshValue struct { + IpAddress basetypes.StringValue `tfsdk:"ip_address"` + Passphrase basetypes.StringValue `tfsdk:"passphrase"` + Port basetypes.StringValue `tfsdk:"port"` + PrivateKeyPath basetypes.StringValue `tfsdk:"private_key_path"` + Username basetypes.StringValue `tfsdk:"username"` + state attr.ValueState +} + +func (v SshValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["ip_address"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["passphrase"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["port"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["private_key_path"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["username"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.IpAddress.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["ip_address"] = val + + val, err = v.Passphrase.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["passphrase"] = val + + val, err = v.Port.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["port"] = val + + val, err = v.PrivateKeyPath.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["private_key_path"] = val + + val, err = v.Username.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["username"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v SshValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v SshValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v SshValue) String() string { + return "SshValue" +} + +func (v SshValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "ip_address": basetypes.StringType{}, + "passphrase": basetypes.StringType{}, + "port": basetypes.StringType{}, + "private_key_path": basetypes.StringType{}, + "username": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "ip_address": v.IpAddress, + "passphrase": v.Passphrase, + "port": v.Port, + "private_key_path": v.PrivateKeyPath, + "username": v.Username, + }) + + return objVal, diags +} + +func (v SshValue) Equal(o attr.Value) bool { + other, ok := o.(SshValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.IpAddress.Equal(other.IpAddress) { + return false + } + + if !v.Passphrase.Equal(other.Passphrase) { + return false + } + + if !v.Port.Equal(other.Port) { + return false + } + + if !v.PrivateKeyPath.Equal(other.PrivateKeyPath) { + return false + } + + if !v.Username.Equal(other.Username) { + return false + } + + return true +} + +func (v SshValue) Type(ctx context.Context) attr.Type { + return SshType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v SshValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "ip_address": basetypes.StringType{}, + "passphrase": basetypes.StringType{}, + "port": basetypes.StringType{}, + "private_key_path": basetypes.StringType{}, + "username": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = TaintsType{} + +type TaintsType struct { + basetypes.ObjectType +} + +func (t TaintsType) Equal(o attr.Type) bool { + other, ok := o.(TaintsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t TaintsType) String() string { + return "TaintsType" +} + +func (t TaintsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + effectAttribute, ok := attributes["effect"] + + if !ok { + diags.AddError( + "Attribute Missing", + `effect is missing from object`) + + return nil, diags + } + + effectVal, ok := effectAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`effect expected to be basetypes.StringValue, was: %T`, effectAttribute)) + } + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return nil, diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return nil, diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return TaintsValue{ + Effect: effectVal, + Key: keyVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewTaintsValueNull() TaintsValue { + return TaintsValue{ + state: attr.ValueStateNull, + } +} + +func NewTaintsValueUnknown() TaintsValue { + return TaintsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewTaintsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (TaintsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing TaintsValue Attribute Value", + "While creating a TaintsValue value, a missing attribute value was detected. "+ + "A TaintsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("TaintsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid TaintsValue Attribute Type", + "While creating a TaintsValue value, an invalid attribute value was detected. "+ + "A TaintsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("TaintsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("TaintsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra TaintsValue Attribute Value", + "While creating a TaintsValue value, an extra attribute value was detected. "+ + "A TaintsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra TaintsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewTaintsValueUnknown(), diags + } + + effectAttribute, ok := attributes["effect"] + + if !ok { + diags.AddError( + "Attribute Missing", + `effect is missing from object`) + + return NewTaintsValueUnknown(), diags + } + + effectVal, ok := effectAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`effect expected to be basetypes.StringValue, was: %T`, effectAttribute)) + } + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return NewTaintsValueUnknown(), diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return NewTaintsValueUnknown(), diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return NewTaintsValueUnknown(), diags + } + + return TaintsValue{ + Effect: effectVal, + Key: keyVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewTaintsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) TaintsValue { + object, diags := NewTaintsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewTaintsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t TaintsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewTaintsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewTaintsValueUnknown(), nil + } + + if in.IsNull() { + return NewTaintsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewTaintsValueMust(TaintsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t TaintsType) ValueType(ctx context.Context) attr.Value { + return TaintsValue{} +} + +var _ basetypes.ObjectValuable = TaintsValue{} + +type TaintsValue struct { + Effect basetypes.StringValue `tfsdk:"effect"` + Key basetypes.StringValue `tfsdk:"key"` + Value basetypes.StringValue `tfsdk:"value"` + state attr.ValueState +} + +func (v TaintsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 3) + + var val tftypes.Value + var err error + + attrTypes["effect"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["key"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["value"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 3) + + val, err = v.Effect.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["effect"] = val + + val, err = v.Key.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["key"] = val + + val, err = v.Value.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["value"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v TaintsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v TaintsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v TaintsValue) String() string { + return "TaintsValue" +} + +func (v TaintsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "effect": basetypes.StringType{}, + "key": basetypes.StringType{}, + "value": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "effect": v.Effect, + "key": v.Key, + "value": v.Value, + }) + + return objVal, diags +} + +func (v TaintsValue) Equal(o attr.Value) bool { + other, ok := o.(TaintsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Effect.Equal(other.Effect) { + return false + } + + if !v.Key.Equal(other.Key) { + return false + } + + if !v.Value.Equal(other.Value) { + return false + } + + return true +} + +func (v TaintsValue) Type(ctx context.Context) attr.Type { + return TaintsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v TaintsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "effect": basetypes.StringType{}, + "key": basetypes.StringType{}, + "value": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = ProxyType{} + +type ProxyType struct { + basetypes.ObjectType +} + +func (t ProxyType) Equal(o attr.Type) bool { + other, ok := o.(ProxyType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ProxyType) String() string { + return "ProxyType" +} + +func (t ProxyType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + allowInsecureBootstrapAttribute, ok := attributes["allow_insecure_bootstrap"] + + if !ok { + diags.AddError( + "Attribute Missing", + `allow_insecure_bootstrap is missing from object`) + + return nil, diags + } + + allowInsecureBootstrapVal, ok := allowInsecureBootstrapAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`allow_insecure_bootstrap expected to be basetypes.BoolValue, was: %T`, allowInsecureBootstrapAttribute)) + } + + bootstrapCaAttribute, ok := attributes["bootstrap_ca"] + + if !ok { + diags.AddError( + "Attribute Missing", + `bootstrap_ca is missing from object`) + + return nil, diags + } + + bootstrapCaVal, ok := bootstrapCaAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`bootstrap_ca expected to be basetypes.StringValue, was: %T`, bootstrapCaAttribute)) + } + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return nil, diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + httpProxyAttribute, ok := attributes["http_proxy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `http_proxy is missing from object`) + + return nil, diags + } + + httpProxyVal, ok := httpProxyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`http_proxy expected to be basetypes.StringValue, was: %T`, httpProxyAttribute)) + } + + httpsProxyAttribute, ok := attributes["https_proxy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `https_proxy is missing from object`) + + return nil, diags + } + + httpsProxyVal, ok := httpsProxyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`https_proxy expected to be basetypes.StringValue, was: %T`, httpsProxyAttribute)) + } + + noProxyAttribute, ok := attributes["no_proxy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `no_proxy is missing from object`) + + return nil, diags + } + + noProxyVal, ok := noProxyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`no_proxy expected to be basetypes.StringValue, was: %T`, noProxyAttribute)) + } + + proxyAuthAttribute, ok := attributes["proxy_auth"] + + if !ok { + diags.AddError( + "Attribute Missing", + `proxy_auth is missing from object`) + + return nil, diags + } + + proxyAuthVal, ok := proxyAuthAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`proxy_auth expected to be basetypes.StringValue, was: %T`, proxyAuthAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ProxyValue{ + AllowInsecureBootstrap: allowInsecureBootstrapVal, + BootstrapCa: bootstrapCaVal, + Enabled: enabledVal, + HttpProxy: httpProxyVal, + HttpsProxy: httpsProxyVal, + NoProxy: noProxyVal, + ProxyAuth: proxyAuthVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewProxyValueNull() ProxyValue { + return ProxyValue{ + state: attr.ValueStateNull, + } +} + +func NewProxyValueUnknown() ProxyValue { + return ProxyValue{ + state: attr.ValueStateUnknown, + } +} + +func NewProxyValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ProxyValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ProxyValue Attribute Value", + "While creating a ProxyValue value, a missing attribute value was detected. "+ + "A ProxyValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ProxyValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ProxyValue Attribute Type", + "While creating a ProxyValue value, an invalid attribute value was detected. "+ + "A ProxyValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ProxyValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ProxyValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ProxyValue Attribute Value", + "While creating a ProxyValue value, an extra attribute value was detected. "+ + "A ProxyValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ProxyValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewProxyValueUnknown(), diags + } + + allowInsecureBootstrapAttribute, ok := attributes["allow_insecure_bootstrap"] + + if !ok { + diags.AddError( + "Attribute Missing", + `allow_insecure_bootstrap is missing from object`) + + return NewProxyValueUnknown(), diags + } + + allowInsecureBootstrapVal, ok := allowInsecureBootstrapAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`allow_insecure_bootstrap expected to be basetypes.BoolValue, was: %T`, allowInsecureBootstrapAttribute)) + } + + bootstrapCaAttribute, ok := attributes["bootstrap_ca"] + + if !ok { + diags.AddError( + "Attribute Missing", + `bootstrap_ca is missing from object`) + + return NewProxyValueUnknown(), diags + } + + bootstrapCaVal, ok := bootstrapCaAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`bootstrap_ca expected to be basetypes.StringValue, was: %T`, bootstrapCaAttribute)) + } + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return NewProxyValueUnknown(), diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + httpProxyAttribute, ok := attributes["http_proxy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `http_proxy is missing from object`) + + return NewProxyValueUnknown(), diags + } + + httpProxyVal, ok := httpProxyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`http_proxy expected to be basetypes.StringValue, was: %T`, httpProxyAttribute)) + } + + httpsProxyAttribute, ok := attributes["https_proxy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `https_proxy is missing from object`) + + return NewProxyValueUnknown(), diags + } + + httpsProxyVal, ok := httpsProxyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`https_proxy expected to be basetypes.StringValue, was: %T`, httpsProxyAttribute)) + } + + noProxyAttribute, ok := attributes["no_proxy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `no_proxy is missing from object`) + + return NewProxyValueUnknown(), diags + } + + noProxyVal, ok := noProxyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`no_proxy expected to be basetypes.StringValue, was: %T`, noProxyAttribute)) + } + + proxyAuthAttribute, ok := attributes["proxy_auth"] + + if !ok { + diags.AddError( + "Attribute Missing", + `proxy_auth is missing from object`) + + return NewProxyValueUnknown(), diags + } + + proxyAuthVal, ok := proxyAuthAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`proxy_auth expected to be basetypes.StringValue, was: %T`, proxyAuthAttribute)) + } + + if diags.HasError() { + return NewProxyValueUnknown(), diags + } + + return ProxyValue{ + AllowInsecureBootstrap: allowInsecureBootstrapVal, + BootstrapCa: bootstrapCaVal, + Enabled: enabledVal, + HttpProxy: httpProxyVal, + HttpsProxy: httpsProxyVal, + NoProxy: noProxyVal, + ProxyAuth: proxyAuthVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewProxyValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ProxyValue { + object, diags := NewProxyValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewProxyValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ProxyType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewProxyValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewProxyValueUnknown(), nil + } + + if in.IsNull() { + return NewProxyValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewProxyValueMust(ProxyValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ProxyType) ValueType(ctx context.Context) attr.Value { + return ProxyValue{} +} + +var _ basetypes.ObjectValuable = ProxyValue{} + +type ProxyValue struct { + AllowInsecureBootstrap basetypes.BoolValue `tfsdk:"allow_insecure_bootstrap"` + BootstrapCa basetypes.StringValue `tfsdk:"bootstrap_ca"` + Enabled basetypes.BoolValue `tfsdk:"enabled"` + HttpProxy basetypes.StringValue `tfsdk:"http_proxy"` + HttpsProxy basetypes.StringValue `tfsdk:"https_proxy"` + NoProxy basetypes.StringValue `tfsdk:"no_proxy"` + ProxyAuth basetypes.StringValue `tfsdk:"proxy_auth"` + state attr.ValueState +} + +func (v ProxyValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 7) + + var val tftypes.Value + var err error + + attrTypes["allow_insecure_bootstrap"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["bootstrap_ca"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["enabled"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["http_proxy"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["https_proxy"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["no_proxy"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["proxy_auth"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 7) + + val, err = v.AllowInsecureBootstrap.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["allow_insecure_bootstrap"] = val + + val, err = v.BootstrapCa.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["bootstrap_ca"] = val + + val, err = v.Enabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["enabled"] = val + + val, err = v.HttpProxy.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["http_proxy"] = val + + val, err = v.HttpsProxy.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["https_proxy"] = val + + val, err = v.NoProxy.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["no_proxy"] = val + + val, err = v.ProxyAuth.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["proxy_auth"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ProxyValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ProxyValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ProxyValue) String() string { + return "ProxyValue" +} + +func (v ProxyValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "allow_insecure_bootstrap": basetypes.BoolType{}, + "bootstrap_ca": basetypes.StringType{}, + "enabled": basetypes.BoolType{}, + "http_proxy": basetypes.StringType{}, + "https_proxy": basetypes.StringType{}, + "no_proxy": basetypes.StringType{}, + "proxy_auth": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "allow_insecure_bootstrap": v.AllowInsecureBootstrap, + "bootstrap_ca": v.BootstrapCa, + "enabled": v.Enabled, + "http_proxy": v.HttpProxy, + "https_proxy": v.HttpsProxy, + "no_proxy": v.NoProxy, + "proxy_auth": v.ProxyAuth, + }) + + return objVal, diags +} + +func (v ProxyValue) Equal(o attr.Value) bool { + other, ok := o.(ProxyValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.AllowInsecureBootstrap.Equal(other.AllowInsecureBootstrap) { + return false + } + + if !v.BootstrapCa.Equal(other.BootstrapCa) { + return false + } + + if !v.Enabled.Equal(other.Enabled) { + return false + } + + if !v.HttpProxy.Equal(other.HttpProxy) { + return false + } + + if !v.HttpsProxy.Equal(other.HttpsProxy) { + return false + } + + if !v.NoProxy.Equal(other.NoProxy) { + return false + } + + if !v.ProxyAuth.Equal(other.ProxyAuth) { + return false + } + + return true +} + +func (v ProxyValue) Type(ctx context.Context) attr.Type { + return ProxyType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ProxyValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "allow_insecure_bootstrap": basetypes.BoolType{}, + "bootstrap_ca": basetypes.StringType{}, + "enabled": basetypes.BoolType{}, + "http_proxy": basetypes.StringType{}, + "https_proxy": basetypes.StringType{}, + "no_proxy": basetypes.StringType{}, + "proxy_auth": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = SharingType{} + +type SharingType struct { + basetypes.ObjectType +} + +func (t SharingType) Equal(o attr.Type) bool { + other, ok := o.(SharingType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t SharingType) String() string { + return "SharingType" +} + +func (t SharingType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return nil, diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + projectsAttribute, ok := attributes["projects"] + + if !ok { + diags.AddError( + "Attribute Missing", + `projects is missing from object`) + + return nil, diags + } + + projectsVal, ok := projectsAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`projects expected to be basetypes.SetValue, was: %T`, projectsAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return SharingValue{ + Enabled: enabledVal, + Projects: projectsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewSharingValueNull() SharingValue { + return SharingValue{ + state: attr.ValueStateNull, + } +} + +func NewSharingValueUnknown() SharingValue { + return SharingValue{ + state: attr.ValueStateUnknown, + } +} + +func NewSharingValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (SharingValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing SharingValue Attribute Value", + "While creating a SharingValue value, a missing attribute value was detected. "+ + "A SharingValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("SharingValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid SharingValue Attribute Type", + "While creating a SharingValue value, an invalid attribute value was detected. "+ + "A SharingValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("SharingValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("SharingValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra SharingValue Attribute Value", + "While creating a SharingValue value, an extra attribute value was detected. "+ + "A SharingValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra SharingValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewSharingValueUnknown(), diags + } + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return NewSharingValueUnknown(), diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + projectsAttribute, ok := attributes["projects"] + + if !ok { + diags.AddError( + "Attribute Missing", + `projects is missing from object`) + + return NewSharingValueUnknown(), diags + } + + projectsVal, ok := projectsAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`projects expected to be basetypes.SetValue, was: %T`, projectsAttribute)) + } + + if diags.HasError() { + return NewSharingValueUnknown(), diags + } + + return SharingValue{ + Enabled: enabledVal, + Projects: projectsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewSharingValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) SharingValue { + object, diags := NewSharingValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewSharingValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t SharingType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewSharingValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewSharingValueUnknown(), nil + } + + if in.IsNull() { + return NewSharingValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewSharingValueMust(SharingValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t SharingType) ValueType(ctx context.Context) attr.Value { + return SharingValue{} +} + +var _ basetypes.ObjectValuable = SharingValue{} + +type SharingValue struct { + Enabled basetypes.BoolValue `tfsdk:"enabled"` + Projects basetypes.SetValue `tfsdk:"projects"` + state attr.ValueState +} + +func (v SharingValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["enabled"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["projects"] = basetypes.SetType{ + ElemType: ProjectsValue{}.Type(ctx), + }.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Enabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["enabled"] = val + + val, err = v.Projects.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["projects"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v SharingValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v SharingValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v SharingValue) String() string { + return "SharingValue" +} + +func (v SharingValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + projects := types.SetValueMust( + ProjectsType{ + basetypes.ObjectType{ + AttrTypes: ProjectsValue{}.AttributeTypes(ctx), + }, + }, + v.Projects.Elements(), + ) + + if v.Projects.IsNull() { + projects = types.SetNull( + ProjectsType{ + basetypes.ObjectType{ + AttrTypes: ProjectsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.Projects.IsUnknown() { + projects = types.SetUnknown( + ProjectsType{ + basetypes.ObjectType{ + AttrTypes: ProjectsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + attributeTypes := map[string]attr.Type{ + "enabled": basetypes.BoolType{}, + "projects": basetypes.SetType{ + ElemType: ProjectsValue{}.Type(ctx), + }, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "enabled": v.Enabled, + "projects": projects, + }) + + return objVal, diags +} + +func (v SharingValue) Equal(o attr.Value) bool { + other, ok := o.(SharingValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Enabled.Equal(other.Enabled) { + return false + } + + if !v.Projects.Equal(other.Projects) { + return false + } + + return true +} + +func (v SharingValue) Type(ctx context.Context) attr.Type { + return SharingType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v SharingValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "enabled": basetypes.BoolType{}, + "projects": basetypes.SetType{ + ElemType: ProjectsValue{}.Type(ctx), + }, + } +} + +var _ basetypes.ObjectTypable = ProjectsType{} + +type ProjectsType struct { + basetypes.ObjectType +} + +func (t ProjectsType) Equal(o attr.Type) bool { + other, ok := o.(ProjectsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ProjectsType) String() string { + return "ProjectsType" +} + +func (t ProjectsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return nil, diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ProjectsValue{ + Name: nameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewProjectsValueNull() ProjectsValue { + return ProjectsValue{ + state: attr.ValueStateNull, + } +} + +func NewProjectsValueUnknown() ProjectsValue { + return ProjectsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewProjectsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ProjectsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ProjectsValue Attribute Value", + "While creating a ProjectsValue value, a missing attribute value was detected. "+ + "A ProjectsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ProjectsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ProjectsValue Attribute Type", + "While creating a ProjectsValue value, an invalid attribute value was detected. "+ + "A ProjectsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ProjectsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ProjectsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ProjectsValue Attribute Value", + "While creating a ProjectsValue value, an extra attribute value was detected. "+ + "A ProjectsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ProjectsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewProjectsValueUnknown(), diags + } + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return NewProjectsValueUnknown(), diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + if diags.HasError() { + return NewProjectsValueUnknown(), diags + } + + return ProjectsValue{ + Name: nameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewProjectsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ProjectsValue { + object, diags := NewProjectsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewProjectsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ProjectsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewProjectsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewProjectsValueUnknown(), nil + } + + if in.IsNull() { + return NewProjectsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewProjectsValueMust(ProjectsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ProjectsType) ValueType(ctx context.Context) attr.Value { + return ProjectsValue{} +} + +var _ basetypes.ObjectValuable = ProjectsValue{} + +type ProjectsValue struct { + Name basetypes.StringValue `tfsdk:"name"` + state attr.ValueState +} + +func (v ProjectsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 1) + + var val tftypes.Value + var err error + + attrTypes["name"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 1) + + val, err = v.Name.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["name"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ProjectsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ProjectsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ProjectsValue) String() string { + return "ProjectsValue" +} + +func (v ProjectsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "name": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "name": v.Name, + }) + + return objVal, diags +} + +func (v ProjectsValue) Equal(o attr.Value) bool { + other, ok := o.(ProjectsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Name.Equal(other.Name) { + return false + } + + return true +} + +func (v ProjectsValue) Type(ctx context.Context) attr.Type { + return ProjectsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ProjectsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "name": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = SystemComponentsPlacementType{} + +type SystemComponentsPlacementType struct { + basetypes.ObjectType +} + +func (t SystemComponentsPlacementType) Equal(o attr.Type) bool { + other, ok := o.(SystemComponentsPlacementType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t SystemComponentsPlacementType) String() string { + return "SystemComponentsPlacementType" +} + +func (t SystemComponentsPlacementType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + daemonSetOverrideAttribute, ok := attributes["daemon_set_override"] + + if !ok { + diags.AddError( + "Attribute Missing", + `daemon_set_override is missing from object`) + + return nil, diags + } + + daemonSetOverrideVal, ok := daemonSetOverrideAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`daemon_set_override expected to be basetypes.ObjectValue, was: %T`, daemonSetOverrideAttribute)) + } + + nodeSelectorAttribute, ok := attributes["node_selector"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_selector is missing from object`) + + return nil, diags + } + + nodeSelectorVal, ok := nodeSelectorAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_selector expected to be basetypes.MapValue, was: %T`, nodeSelectorAttribute)) + } + + tolerationsAttribute, ok := attributes["tolerations"] + + if !ok { + diags.AddError( + "Attribute Missing", + `tolerations is missing from object`) + + return nil, diags + } + + tolerationsVal, ok := tolerationsAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`tolerations expected to be basetypes.SetValue, was: %T`, tolerationsAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return SystemComponentsPlacementValue{ + DaemonSetOverride: daemonSetOverrideVal, + NodeSelector: nodeSelectorVal, + Tolerations: tolerationsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewSystemComponentsPlacementValueNull() SystemComponentsPlacementValue { + return SystemComponentsPlacementValue{ + state: attr.ValueStateNull, + } +} + +func NewSystemComponentsPlacementValueUnknown() SystemComponentsPlacementValue { + return SystemComponentsPlacementValue{ + state: attr.ValueStateUnknown, + } +} + +func NewSystemComponentsPlacementValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (SystemComponentsPlacementValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing SystemComponentsPlacementValue Attribute Value", + "While creating a SystemComponentsPlacementValue value, a missing attribute value was detected. "+ + "A SystemComponentsPlacementValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("SystemComponentsPlacementValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid SystemComponentsPlacementValue Attribute Type", + "While creating a SystemComponentsPlacementValue value, an invalid attribute value was detected. "+ + "A SystemComponentsPlacementValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("SystemComponentsPlacementValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("SystemComponentsPlacementValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra SystemComponentsPlacementValue Attribute Value", + "While creating a SystemComponentsPlacementValue value, an extra attribute value was detected. "+ + "A SystemComponentsPlacementValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra SystemComponentsPlacementValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewSystemComponentsPlacementValueUnknown(), diags + } + + daemonSetOverrideAttribute, ok := attributes["daemon_set_override"] + + if !ok { + diags.AddError( + "Attribute Missing", + `daemon_set_override is missing from object`) + + return NewSystemComponentsPlacementValueUnknown(), diags + } + + daemonSetOverrideVal, ok := daemonSetOverrideAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`daemon_set_override expected to be basetypes.ObjectValue, was: %T`, daemonSetOverrideAttribute)) + } + + nodeSelectorAttribute, ok := attributes["node_selector"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_selector is missing from object`) + + return NewSystemComponentsPlacementValueUnknown(), diags + } + + nodeSelectorVal, ok := nodeSelectorAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_selector expected to be basetypes.MapValue, was: %T`, nodeSelectorAttribute)) + } + + tolerationsAttribute, ok := attributes["tolerations"] + + if !ok { + diags.AddError( + "Attribute Missing", + `tolerations is missing from object`) + + return NewSystemComponentsPlacementValueUnknown(), diags + } + + tolerationsVal, ok := tolerationsAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`tolerations expected to be basetypes.SetValue, was: %T`, tolerationsAttribute)) + } + + if diags.HasError() { + return NewSystemComponentsPlacementValueUnknown(), diags + } + + return SystemComponentsPlacementValue{ + DaemonSetOverride: daemonSetOverrideVal, + NodeSelector: nodeSelectorVal, + Tolerations: tolerationsVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewSystemComponentsPlacementValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) SystemComponentsPlacementValue { + object, diags := NewSystemComponentsPlacementValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewSystemComponentsPlacementValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t SystemComponentsPlacementType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewSystemComponentsPlacementValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewSystemComponentsPlacementValueUnknown(), nil + } + + if in.IsNull() { + return NewSystemComponentsPlacementValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewSystemComponentsPlacementValueMust(SystemComponentsPlacementValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t SystemComponentsPlacementType) ValueType(ctx context.Context) attr.Value { + return SystemComponentsPlacementValue{} +} + +var _ basetypes.ObjectValuable = SystemComponentsPlacementValue{} + +type SystemComponentsPlacementValue struct { + DaemonSetOverride basetypes.ObjectValue `tfsdk:"daemon_set_override"` + NodeSelector basetypes.MapValue `tfsdk:"node_selector"` + Tolerations basetypes.SetValue `tfsdk:"tolerations"` + state attr.ValueState +} + +func (v SystemComponentsPlacementValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 3) + + var val tftypes.Value + var err error + + attrTypes["daemon_set_override"] = basetypes.ObjectType{ + AttrTypes: DaemonSetOverrideValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["node_selector"] = basetypes.MapType{ + ElemType: types.StringType, + }.TerraformType(ctx) + attrTypes["tolerations"] = basetypes.SetType{ + ElemType: TolerationsValue{}.Type(ctx), + }.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 3) + + val, err = v.DaemonSetOverride.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["daemon_set_override"] = val + + val, err = v.NodeSelector.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["node_selector"] = val + + val, err = v.Tolerations.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["tolerations"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v SystemComponentsPlacementValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v SystemComponentsPlacementValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v SystemComponentsPlacementValue) String() string { + return "SystemComponentsPlacementValue" +} + +func (v SystemComponentsPlacementValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var daemonSetOverride basetypes.ObjectValue + + if v.DaemonSetOverride.IsNull() { + daemonSetOverride = types.ObjectNull( + DaemonSetOverrideValue{}.AttributeTypes(ctx), + ) + } + + if v.DaemonSetOverride.IsUnknown() { + daemonSetOverride = types.ObjectUnknown( + DaemonSetOverrideValue{}.AttributeTypes(ctx), + ) + } + + if !v.DaemonSetOverride.IsNull() && !v.DaemonSetOverride.IsUnknown() { + daemonSetOverride = types.ObjectValueMust( + DaemonSetOverrideValue{}.AttributeTypes(ctx), + v.DaemonSetOverride.Attributes(), + ) + } + + tolerations := types.SetValueMust( + TolerationsType{ + basetypes.ObjectType{ + AttrTypes: TolerationsValue{}.AttributeTypes(ctx), + }, + }, + v.Tolerations.Elements(), + ) + + if v.Tolerations.IsNull() { + tolerations = types.SetNull( + TolerationsType{ + basetypes.ObjectType{ + AttrTypes: TolerationsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.Tolerations.IsUnknown() { + tolerations = types.SetUnknown( + TolerationsType{ + basetypes.ObjectType{ + AttrTypes: TolerationsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + var nodeSelectorVal basetypes.MapValue + switch { + case v.NodeSelector.IsUnknown(): + nodeSelectorVal = types.MapUnknown(types.StringType) + case v.NodeSelector.IsNull(): + nodeSelectorVal = types.MapNull(types.StringType) + default: + var d diag.Diagnostics + nodeSelectorVal, d = types.MapValue(types.StringType, v.NodeSelector.Elements()) + diags.Append(d...) + } + + if diags.HasError() { + return types.ObjectUnknown(map[string]attr.Type{ + "daemon_set_override": basetypes.ObjectType{ + AttrTypes: DaemonSetOverrideValue{}.AttributeTypes(ctx), + }, + "node_selector": basetypes.MapType{ + ElemType: types.StringType, + }, + "tolerations": basetypes.SetType{ + ElemType: TolerationsValue{}.Type(ctx), + }, + }), diags + } + + attributeTypes := map[string]attr.Type{ + "daemon_set_override": basetypes.ObjectType{ + AttrTypes: DaemonSetOverrideValue{}.AttributeTypes(ctx), + }, + "node_selector": basetypes.MapType{ + ElemType: types.StringType, + }, + "tolerations": basetypes.SetType{ + ElemType: TolerationsValue{}.Type(ctx), + }, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "daemon_set_override": daemonSetOverride, + "node_selector": nodeSelectorVal, + "tolerations": tolerations, + }) + + return objVal, diags +} + +func (v SystemComponentsPlacementValue) Equal(o attr.Value) bool { + other, ok := o.(SystemComponentsPlacementValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.DaemonSetOverride.Equal(other.DaemonSetOverride) { + return false + } + + if !v.NodeSelector.Equal(other.NodeSelector) { + return false + } + + if !v.Tolerations.Equal(other.Tolerations) { + return false + } + + return true +} + +func (v SystemComponentsPlacementValue) Type(ctx context.Context) attr.Type { + return SystemComponentsPlacementType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v SystemComponentsPlacementValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "daemon_set_override": basetypes.ObjectType{ + AttrTypes: DaemonSetOverrideValue{}.AttributeTypes(ctx), + }, + "node_selector": basetypes.MapType{ + ElemType: types.StringType, + }, + "tolerations": basetypes.SetType{ + ElemType: TolerationsValue{}.Type(ctx), + }, + } +} + +var _ basetypes.ObjectTypable = DaemonSetOverrideType{} + +type DaemonSetOverrideType struct { + basetypes.ObjectType +} + +func (t DaemonSetOverrideType) Equal(o attr.Type) bool { + other, ok := o.(DaemonSetOverrideType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t DaemonSetOverrideType) String() string { + return "DaemonSetOverrideType" +} + +func (t DaemonSetOverrideType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + daemonSetTolerationsAttribute, ok := attributes["daemon_set_tolerations"] + + if !ok { + diags.AddError( + "Attribute Missing", + `daemon_set_tolerations is missing from object`) + + return nil, diags + } + + daemonSetTolerationsVal, ok := daemonSetTolerationsAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`daemon_set_tolerations expected to be basetypes.SetValue, was: %T`, daemonSetTolerationsAttribute)) + } + + nodeSelectionEnabledAttribute, ok := attributes["node_selection_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_selection_enabled is missing from object`) + + return nil, diags + } + + nodeSelectionEnabledVal, ok := nodeSelectionEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_selection_enabled expected to be basetypes.BoolValue, was: %T`, nodeSelectionEnabledAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return DaemonSetOverrideValue{ + DaemonSetTolerations: daemonSetTolerationsVal, + NodeSelectionEnabled: nodeSelectionEnabledVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewDaemonSetOverrideValueNull() DaemonSetOverrideValue { + return DaemonSetOverrideValue{ + state: attr.ValueStateNull, + } +} + +func NewDaemonSetOverrideValueUnknown() DaemonSetOverrideValue { + return DaemonSetOverrideValue{ + state: attr.ValueStateUnknown, + } +} + +func NewDaemonSetOverrideValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (DaemonSetOverrideValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing DaemonSetOverrideValue Attribute Value", + "While creating a DaemonSetOverrideValue value, a missing attribute value was detected. "+ + "A DaemonSetOverrideValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("DaemonSetOverrideValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid DaemonSetOverrideValue Attribute Type", + "While creating a DaemonSetOverrideValue value, an invalid attribute value was detected. "+ + "A DaemonSetOverrideValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("DaemonSetOverrideValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("DaemonSetOverrideValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra DaemonSetOverrideValue Attribute Value", + "While creating a DaemonSetOverrideValue value, an extra attribute value was detected. "+ + "A DaemonSetOverrideValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra DaemonSetOverrideValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewDaemonSetOverrideValueUnknown(), diags + } + + daemonSetTolerationsAttribute, ok := attributes["daemon_set_tolerations"] + + if !ok { + diags.AddError( + "Attribute Missing", + `daemon_set_tolerations is missing from object`) + + return NewDaemonSetOverrideValueUnknown(), diags + } + + daemonSetTolerationsVal, ok := daemonSetTolerationsAttribute.(basetypes.SetValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`daemon_set_tolerations expected to be basetypes.SetValue, was: %T`, daemonSetTolerationsAttribute)) + } + + nodeSelectionEnabledAttribute, ok := attributes["node_selection_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_selection_enabled is missing from object`) + + return NewDaemonSetOverrideValueUnknown(), diags + } + + nodeSelectionEnabledVal, ok := nodeSelectionEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_selection_enabled expected to be basetypes.BoolValue, was: %T`, nodeSelectionEnabledAttribute)) + } + + if diags.HasError() { + return NewDaemonSetOverrideValueUnknown(), diags + } + + return DaemonSetOverrideValue{ + DaemonSetTolerations: daemonSetTolerationsVal, + NodeSelectionEnabled: nodeSelectionEnabledVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewDaemonSetOverrideValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) DaemonSetOverrideValue { + object, diags := NewDaemonSetOverrideValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewDaemonSetOverrideValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t DaemonSetOverrideType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewDaemonSetOverrideValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewDaemonSetOverrideValueUnknown(), nil + } + + if in.IsNull() { + return NewDaemonSetOverrideValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewDaemonSetOverrideValueMust(DaemonSetOverrideValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t DaemonSetOverrideType) ValueType(ctx context.Context) attr.Value { + return DaemonSetOverrideValue{} +} + +var _ basetypes.ObjectValuable = DaemonSetOverrideValue{} + +type DaemonSetOverrideValue struct { + DaemonSetTolerations basetypes.SetValue `tfsdk:"daemon_set_tolerations"` + NodeSelectionEnabled basetypes.BoolValue `tfsdk:"node_selection_enabled"` + state attr.ValueState +} + +func (v DaemonSetOverrideValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["daemon_set_tolerations"] = basetypes.SetType{ + ElemType: DaemonSetTolerationsValue{}.Type(ctx), + }.TerraformType(ctx) + attrTypes["node_selection_enabled"] = basetypes.BoolType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.DaemonSetTolerations.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["daemon_set_tolerations"] = val + + val, err = v.NodeSelectionEnabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["node_selection_enabled"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v DaemonSetOverrideValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v DaemonSetOverrideValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v DaemonSetOverrideValue) String() string { + return "DaemonSetOverrideValue" +} + +func (v DaemonSetOverrideValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + daemonSetTolerations := types.SetValueMust( + DaemonSetTolerationsType{ + basetypes.ObjectType{ + AttrTypes: DaemonSetTolerationsValue{}.AttributeTypes(ctx), + }, + }, + v.DaemonSetTolerations.Elements(), + ) + + if v.DaemonSetTolerations.IsNull() { + daemonSetTolerations = types.SetNull( + DaemonSetTolerationsType{ + basetypes.ObjectType{ + AttrTypes: DaemonSetTolerationsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.DaemonSetTolerations.IsUnknown() { + daemonSetTolerations = types.SetUnknown( + DaemonSetTolerationsType{ + basetypes.ObjectType{ + AttrTypes: DaemonSetTolerationsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + attributeTypes := map[string]attr.Type{ + "daemon_set_tolerations": basetypes.SetType{ + ElemType: DaemonSetTolerationsValue{}.Type(ctx), + }, + "node_selection_enabled": basetypes.BoolType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "daemon_set_tolerations": daemonSetTolerations, + "node_selection_enabled": v.NodeSelectionEnabled, + }) + + return objVal, diags +} + +func (v DaemonSetOverrideValue) Equal(o attr.Value) bool { + other, ok := o.(DaemonSetOverrideValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.DaemonSetTolerations.Equal(other.DaemonSetTolerations) { + return false + } + + if !v.NodeSelectionEnabled.Equal(other.NodeSelectionEnabled) { + return false + } + + return true +} + +func (v DaemonSetOverrideValue) Type(ctx context.Context) attr.Type { + return DaemonSetOverrideType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v DaemonSetOverrideValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "daemon_set_tolerations": basetypes.SetType{ + ElemType: DaemonSetTolerationsValue{}.Type(ctx), + }, + "node_selection_enabled": basetypes.BoolType{}, + } +} + +var _ basetypes.ObjectTypable = DaemonSetTolerationsType{} + +type DaemonSetTolerationsType struct { + basetypes.ObjectType +} + +func (t DaemonSetTolerationsType) Equal(o attr.Type) bool { + other, ok := o.(DaemonSetTolerationsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t DaemonSetTolerationsType) String() string { + return "DaemonSetTolerationsType" +} + +func (t DaemonSetTolerationsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + effectAttribute, ok := attributes["effect"] + + if !ok { + diags.AddError( + "Attribute Missing", + `effect is missing from object`) + + return nil, diags + } + + effectVal, ok := effectAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`effect expected to be basetypes.StringValue, was: %T`, effectAttribute)) + } + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return nil, diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + operatorAttribute, ok := attributes["operator"] + + if !ok { + diags.AddError( + "Attribute Missing", + `operator is missing from object`) + + return nil, diags + } + + operatorVal, ok := operatorAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`operator expected to be basetypes.StringValue, was: %T`, operatorAttribute)) + } + + tolerationSecondsAttribute, ok := attributes["toleration_seconds"] + + if !ok { + diags.AddError( + "Attribute Missing", + `toleration_seconds is missing from object`) + + return nil, diags + } + + tolerationSecondsVal, ok := tolerationSecondsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`toleration_seconds expected to be basetypes.Int64Value, was: %T`, tolerationSecondsAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return nil, diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return DaemonSetTolerationsValue{ + Effect: effectVal, + Key: keyVal, + Operator: operatorVal, + TolerationSeconds: tolerationSecondsVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewDaemonSetTolerationsValueNull() DaemonSetTolerationsValue { + return DaemonSetTolerationsValue{ + state: attr.ValueStateNull, + } +} + +func NewDaemonSetTolerationsValueUnknown() DaemonSetTolerationsValue { + return DaemonSetTolerationsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewDaemonSetTolerationsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (DaemonSetTolerationsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing DaemonSetTolerationsValue Attribute Value", + "While creating a DaemonSetTolerationsValue value, a missing attribute value was detected. "+ + "A DaemonSetTolerationsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("DaemonSetTolerationsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid DaemonSetTolerationsValue Attribute Type", + "While creating a DaemonSetTolerationsValue value, an invalid attribute value was detected. "+ + "A DaemonSetTolerationsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("DaemonSetTolerationsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("DaemonSetTolerationsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra DaemonSetTolerationsValue Attribute Value", + "While creating a DaemonSetTolerationsValue value, an extra attribute value was detected. "+ + "A DaemonSetTolerationsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra DaemonSetTolerationsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewDaemonSetTolerationsValueUnknown(), diags + } + + effectAttribute, ok := attributes["effect"] + + if !ok { + diags.AddError( + "Attribute Missing", + `effect is missing from object`) + + return NewDaemonSetTolerationsValueUnknown(), diags + } + + effectVal, ok := effectAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`effect expected to be basetypes.StringValue, was: %T`, effectAttribute)) + } + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return NewDaemonSetTolerationsValueUnknown(), diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + operatorAttribute, ok := attributes["operator"] + + if !ok { + diags.AddError( + "Attribute Missing", + `operator is missing from object`) + + return NewDaemonSetTolerationsValueUnknown(), diags + } + + operatorVal, ok := operatorAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`operator expected to be basetypes.StringValue, was: %T`, operatorAttribute)) + } + + tolerationSecondsAttribute, ok := attributes["toleration_seconds"] + + if !ok { + diags.AddError( + "Attribute Missing", + `toleration_seconds is missing from object`) + + return NewDaemonSetTolerationsValueUnknown(), diags + } + + tolerationSecondsVal, ok := tolerationSecondsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`toleration_seconds expected to be basetypes.Int64Value, was: %T`, tolerationSecondsAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return NewDaemonSetTolerationsValueUnknown(), diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return NewDaemonSetTolerationsValueUnknown(), diags + } + + return DaemonSetTolerationsValue{ + Effect: effectVal, + Key: keyVal, + Operator: operatorVal, + TolerationSeconds: tolerationSecondsVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewDaemonSetTolerationsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) DaemonSetTolerationsValue { + object, diags := NewDaemonSetTolerationsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewDaemonSetTolerationsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t DaemonSetTolerationsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewDaemonSetTolerationsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewDaemonSetTolerationsValueUnknown(), nil + } + + if in.IsNull() { + return NewDaemonSetTolerationsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewDaemonSetTolerationsValueMust(DaemonSetTolerationsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t DaemonSetTolerationsType) ValueType(ctx context.Context) attr.Value { + return DaemonSetTolerationsValue{} +} + +var _ basetypes.ObjectValuable = DaemonSetTolerationsValue{} + +type DaemonSetTolerationsValue struct { + Effect basetypes.StringValue `tfsdk:"effect"` + Key basetypes.StringValue `tfsdk:"key"` + Operator basetypes.StringValue `tfsdk:"operator"` + TolerationSeconds basetypes.Int64Value `tfsdk:"toleration_seconds"` + Value basetypes.StringValue `tfsdk:"value"` + state attr.ValueState +} + +func (v DaemonSetTolerationsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["effect"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["key"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["operator"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["toleration_seconds"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["value"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.Effect.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["effect"] = val + + val, err = v.Key.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["key"] = val + + val, err = v.Operator.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["operator"] = val + + val, err = v.TolerationSeconds.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["toleration_seconds"] = val + + val, err = v.Value.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["value"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v DaemonSetTolerationsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v DaemonSetTolerationsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v DaemonSetTolerationsValue) String() string { + return "DaemonSetTolerationsValue" +} + +func (v DaemonSetTolerationsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "effect": basetypes.StringType{}, + "key": basetypes.StringType{}, + "operator": basetypes.StringType{}, + "toleration_seconds": basetypes.Int64Type{}, + "value": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "effect": v.Effect, + "key": v.Key, + "operator": v.Operator, + "toleration_seconds": v.TolerationSeconds, + "value": v.Value, + }) + + return objVal, diags +} + +func (v DaemonSetTolerationsValue) Equal(o attr.Value) bool { + other, ok := o.(DaemonSetTolerationsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Effect.Equal(other.Effect) { + return false + } + + if !v.Key.Equal(other.Key) { + return false + } + + if !v.Operator.Equal(other.Operator) { + return false + } + + if !v.TolerationSeconds.Equal(other.TolerationSeconds) { + return false + } + + if !v.Value.Equal(other.Value) { + return false + } + + return true +} + +func (v DaemonSetTolerationsValue) Type(ctx context.Context) attr.Type { + return DaemonSetTolerationsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v DaemonSetTolerationsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "effect": basetypes.StringType{}, + "key": basetypes.StringType{}, + "operator": basetypes.StringType{}, + "toleration_seconds": basetypes.Int64Type{}, + "value": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = TolerationsType{} + +type TolerationsType struct { + basetypes.ObjectType +} + +func (t TolerationsType) Equal(o attr.Type) bool { + other, ok := o.(TolerationsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t TolerationsType) String() string { + return "TolerationsType" +} + +func (t TolerationsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + effectAttribute, ok := attributes["effect"] + + if !ok { + diags.AddError( + "Attribute Missing", + `effect is missing from object`) + + return nil, diags + } + + effectVal, ok := effectAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`effect expected to be basetypes.StringValue, was: %T`, effectAttribute)) + } + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return nil, diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + operatorAttribute, ok := attributes["operator"] + + if !ok { + diags.AddError( + "Attribute Missing", + `operator is missing from object`) + + return nil, diags + } + + operatorVal, ok := operatorAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`operator expected to be basetypes.StringValue, was: %T`, operatorAttribute)) + } + + tolerationSecondsAttribute, ok := attributes["toleration_seconds"] + + if !ok { + diags.AddError( + "Attribute Missing", + `toleration_seconds is missing from object`) + + return nil, diags + } + + tolerationSecondsVal, ok := tolerationSecondsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`toleration_seconds expected to be basetypes.Int64Value, was: %T`, tolerationSecondsAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return nil, diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return TolerationsValue{ + Effect: effectVal, + Key: keyVal, + Operator: operatorVal, + TolerationSeconds: tolerationSecondsVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewTolerationsValueNull() TolerationsValue { + return TolerationsValue{ + state: attr.ValueStateNull, + } +} + +func NewTolerationsValueUnknown() TolerationsValue { + return TolerationsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewTolerationsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (TolerationsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing TolerationsValue Attribute Value", + "While creating a TolerationsValue value, a missing attribute value was detected. "+ + "A TolerationsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("TolerationsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid TolerationsValue Attribute Type", + "While creating a TolerationsValue value, an invalid attribute value was detected. "+ + "A TolerationsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("TolerationsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("TolerationsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra TolerationsValue Attribute Value", + "While creating a TolerationsValue value, an extra attribute value was detected. "+ + "A TolerationsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra TolerationsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewTolerationsValueUnknown(), diags + } + + effectAttribute, ok := attributes["effect"] + + if !ok { + diags.AddError( + "Attribute Missing", + `effect is missing from object`) + + return NewTolerationsValueUnknown(), diags + } + + effectVal, ok := effectAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`effect expected to be basetypes.StringValue, was: %T`, effectAttribute)) + } + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return NewTolerationsValueUnknown(), diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + operatorAttribute, ok := attributes["operator"] + + if !ok { + diags.AddError( + "Attribute Missing", + `operator is missing from object`) + + return NewTolerationsValueUnknown(), diags + } + + operatorVal, ok := operatorAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`operator expected to be basetypes.StringValue, was: %T`, operatorAttribute)) + } + + tolerationSecondsAttribute, ok := attributes["toleration_seconds"] + + if !ok { + diags.AddError( + "Attribute Missing", + `toleration_seconds is missing from object`) + + return NewTolerationsValueUnknown(), diags + } + + tolerationSecondsVal, ok := tolerationSecondsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`toleration_seconds expected to be basetypes.Int64Value, was: %T`, tolerationSecondsAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return NewTolerationsValueUnknown(), diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return NewTolerationsValueUnknown(), diags + } + + return TolerationsValue{ + Effect: effectVal, + Key: keyVal, + Operator: operatorVal, + TolerationSeconds: tolerationSecondsVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewTolerationsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) TolerationsValue { + object, diags := NewTolerationsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewTolerationsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t TolerationsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewTolerationsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewTolerationsValueUnknown(), nil + } + + if in.IsNull() { + return NewTolerationsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewTolerationsValueMust(TolerationsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t TolerationsType) ValueType(ctx context.Context) attr.Value { + return TolerationsValue{} +} + +var _ basetypes.ObjectValuable = TolerationsValue{} + +type TolerationsValue struct { + Effect basetypes.StringValue `tfsdk:"effect"` + Key basetypes.StringValue `tfsdk:"key"` + Operator basetypes.StringValue `tfsdk:"operator"` + TolerationSeconds basetypes.Int64Value `tfsdk:"toleration_seconds"` + Value basetypes.StringValue `tfsdk:"value"` + state attr.ValueState +} + +func (v TolerationsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["effect"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["key"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["operator"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["toleration_seconds"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["value"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.Effect.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["effect"] = val + + val, err = v.Key.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["key"] = val + + val, err = v.Operator.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["operator"] = val + + val, err = v.TolerationSeconds.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["toleration_seconds"] = val + + val, err = v.Value.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["value"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v TolerationsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v TolerationsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v TolerationsValue) String() string { + return "TolerationsValue" +} + +func (v TolerationsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "effect": basetypes.StringType{}, + "key": basetypes.StringType{}, + "operator": basetypes.StringType{}, + "toleration_seconds": basetypes.Int64Type{}, + "value": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "effect": v.Effect, + "key": v.Key, + "operator": v.Operator, + "toleration_seconds": v.TolerationSeconds, + "value": v.Value, + }) + + return objVal, diags +} + +func (v TolerationsValue) Equal(o attr.Value) bool { + other, ok := o.(TolerationsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Effect.Equal(other.Effect) { + return false + } + + if !v.Key.Equal(other.Key) { + return false + } + + if !v.Operator.Equal(other.Operator) { + return false + } + + if !v.TolerationSeconds.Equal(other.TolerationSeconds) { + return false + } + + if !v.Value.Equal(other.Value) { + return false + } + + return true +} + +func (v TolerationsValue) Type(ctx context.Context) attr.Type { + return TolerationsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v TolerationsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "effect": basetypes.StringType{}, + "key": basetypes.StringType{}, + "operator": basetypes.StringType{}, + "toleration_seconds": basetypes.Int64Type{}, + "value": basetypes.StringType{}, + } +} diff --git a/internal/resource_mks_cluster/mks_cluster_resource_spec.json b/internal/resource_mks_cluster/mks_cluster_resource_spec.json new file mode 100644 index 00000000..dde73711 --- /dev/null +++ b/internal/resource_mks_cluster/mks_cluster_resource_spec.json @@ -0,0 +1,654 @@ +{ + "provider": { + "name": "rafay" + }, + "resources": [ + { + "name": "mks_cluster", + "schema": { + "attributes": [ + { + "name": "api_version", + "string": { + "computed_optional_required": "computed_optional", + "default": { + "static": "infra.k8smgmt.io/v3" + }, + "description": "api version" + } + }, + { + "name": "kind", + "string": { + "computed_optional_required": "computed_optional", + "default": { + "static": "Cluster" + }, + "description": "kind" + } + }, + { + "name": "metadata", + "single_nested": { + "computed_optional_required": "required", + "attributes": [ + { + "name": "name", + "string": { + "computed_optional_required": "required", + "description": "name of the resource" + } + }, + { + "name": "project", + "string": { + "computed_optional_required": "required", + "description": "Project of the resource" + } + }, + { + "name": "annotations", + "map": { + "computed_optional_required": "computed_optional", + "element_type": { + "string": {} + }, + "description": "annotations of the resource" + } + }, + { + "name": "description", + "string": { + "computed_optional_required": "optional", + "description": "description of the resource" + } + }, + { + "name": "labels", + "map": { + "computed_optional_required": "computed_optional", + "element_type": { + "string": {} + }, + "description": "labels of the resource" + } + } + ], + "description": "metadata of the resource" + } + }, + { + "name": "spec", + "single_nested": { + "computed_optional_required": "required", + "attributes": [ + { + "name": "blueprint", + "single_nested": { + "computed_optional_required": "required", + "attributes": [ + { + "name": "name", + "string": { + "computed_optional_required": "required" + } + }, + { + "name": "version", + "string": { + "computed_optional_required": "computed_optional", + "default": { + "static": "latest" + }, + "description": "Version of the blueprint" + } + } + ] + } + }, + { + "name": "cloud_credentials", + "string": { + "computed_optional_required": "optional", + "description": "The credentials to be used to ssh into the Clusster Nodes" + } + }, + { + "name": "config", + "single_nested": { + "computed_optional_required": "required", + "attributes": [ + { + "name": "auto_approve_nodes", + "bool": { + "computed_optional_required": "optional", + "description": "Auto approves incoming nodes by default" + } + }, + { + "name": "dedicated_control_plane", + "bool": { + "computed_optional_required": "optional", + "description": "Select this option for preventing scheduling of user workloads on Control Plane nodes" + } + }, + { + "name": "high_availability", + "bool": { + "computed_optional_required": "optional", + "description": "Select this option for highly available control plane. Minimum three control plane nodes are required" + } + }, + { + "name": "kubernetes_upgrade", + "single_nested": { + "computed_optional_required": "optional", + "attributes": [ + { + "name": "params", + "single_nested": { + "computed_optional_required": "required", + "attributes": [ + { + "name": "worker_concurrency", + "string": { + "computed_optional_required": "required", + "description": "It can be number or percentage" + } + } + ] + } + }, + { + "name": "strategy", + "string": { + "computed_optional_required": "required", + "description": "Kubernetes upgrade strategy for worker nodes and Valid options are: concurrent/sequential" + } + } + ] + } + }, + { + "name": "kubernetes_version", + "string": { + "computed_optional_required": "required", + "description": "Kubernetes version of the Control Plane" + } + }, + { + "name": "installer_ttl", + "int64": { + "computed_optional_required": "optional", + "default": { + "static": 365 + }, + "description": "Installer TTL Configuration" + } + }, + { + "name": "location", + "string": { + "computed_optional_required": "optional", + "description": "The data center location where the cluster nodes will be launched" + } + }, + { + "name": "network", + "single_nested": { + "computed_optional_required": "required", + "attributes": [ + { + "name": "cni", + "single_nested": { + "computed_optional_required": "required", + "attributes": [ + { + "name": "name", + "string": { + "computed_optional_required": "required", + "description": "Provide the CNI name, e.g., Calico or Cilium" + } + }, + { + "name": "version", + "string": { + "computed_optional_required": "required", + "description": "Provide the CNI version, e.g., 3.26.1" + } + } + ], + "description": "MKS Cluster CNI Specification" + } + }, + { + "name": "ipv6", + "single_nested": { + "computed_optional_required": "optional", + "attributes": [ + { + "name": "pod_subnet", + "string": { + "computed_optional_required": "optional", + "description": "Kubernetes pod subnet" + } + }, + { + "name": "service_subnet", + "string": { + "computed_optional_required": "optional", + "description": "Kubernetes service subnet" + } + } + ] + } + }, + { + "name": "pod_subnet", + "string": { + "computed_optional_required": "required", + "description": "Kubernetes pod subnet" + } + }, + { + "name": "service_subnet", + "string": { + "computed_optional_required": "required", + "description": "Kubernetes service subnet" + } + } + ], + "description": "MKS Cluster Network Specification" + } + }, + { + "name": "nodes", + "map_nested": { + "computed_optional_required": "required", + "nested_object": { + "attributes": [ + { + "name": "arch", + "string": { + "computed_optional_required": "required", + "description": "System Architecture of the node" + } + }, + { + "name": "hostname", + "string": { + "computed_optional_required": "required", + "description": "Hostname of the node" + } + }, + { + "name": "interface", + "string": { + "computed_optional_required": "optional", + "description": "Interface to be used on the node" + } + }, + { + "name": "labels", + "map": { + "computed_optional_required": "computed_optional", + "element_type": { + "string": {} + }, + "description": "labels to be added to the node" + } + }, + { + "name": "operating_system", + "string": { + "computed_optional_required": "required", + "description": "OS of the node" + } + }, + { + "name": "private_ip", + "string": { + "computed_optional_required": "required", + "description": "Private ip address of the node" + } + }, + { + "name": "roles", + "set": { + "computed_optional_required": "required", + "element_type": { + "string": {} + }, + "description": "Valid roles are: 'ControlPlane', 'Worker', 'Storage'" + } + }, + { + "name": "ssh", + "single_nested": { + "computed_optional_required": "optional", + "attributes": [ + { + "name": "ip_address", + "string": { + "computed_optional_required": "optional", + "description": "Use this to override node level ssh details" + } + }, + { + "name": "passphrase", + "string": { + "computed_optional_required": "optional", + "description": "SSH Passphrase" + } + }, + { + "name": "port", + "string": { + "computed_optional_required": "optional", + "description": "SSH Port" + } + }, + { + "name": "private_key_path", + "string": { + "computed_optional_required": "optional", + "description": "Specify Path to SSH private key" + } + }, + { + "name": "username", + "string": { + "computed_optional_required": "optional", + "description": "SSH Username" + } + } + ], + "description": "MKS Node SSH definition" + } + }, + { + "name": "taints", + "set_nested": { + "computed_optional_required": "optional", + "nested_object": { + "attributes": [ + { + "name": "effect", + "string": { + "computed_optional_required": "required" + } + }, + { + "name": "key", + "string": { + "computed_optional_required": "required" + } + }, + { + "name": "value", + "string": { + "computed_optional_required": "optional" + } + } + ] + }, + "description": "taints to be added to the node" + } + } + ] + }, + "description": "holds node configuration for the cluster" + } + }, + { + "name": "cluster_ssh", + "single_nested": { + "computed_optional_required": "optional", + "attributes": [ + { + "name": "passphrase", + "string": { + "computed_optional_required": "optional", + "description": "Provide ssh passphrase" + } + }, + { + "name": "port", + "string": { + "computed_optional_required": "optional", + "description": "Provide ssh port" + } + }, + { + "name": "private_key_path", + "string": { + "computed_optional_required": "optional", + "description": "Provide local path to the private key" + } + }, + { + "name": "username", + "string": { + "computed_optional_required": "optional", + "description": "Provide the ssh username" + } + } + ], + "description": "SSH config for all the nodes within the cluster" + } + } + ], + "description": "MKS V3 cluster specification" + } + }, + { + "name": "proxy", + "single_nested": { + "computed_optional_required": "optional", + "attributes": [ + { + "name": "allow_insecure_bootstrap", + "bool": { + "computed_optional_required": "optional" + } + }, + { + "name": "bootstrap_ca", + "string": { + "computed_optional_required": "optional" + } + }, + { + "name": "enabled", + "bool": { + "computed_optional_required": "required" + } + }, + { + "name": "http_proxy", + "string": { + "computed_optional_required": "required" + } + }, + { + "name": "https_proxy", + "string": { + "computed_optional_required": "optional" + } + }, + { + "name": "no_proxy", + "string": { + "computed_optional_required": "optional" + } + }, + { + "name": "proxy_auth", + "string": { + "computed_optional_required": "optional" + } + } + ] + } + }, + { + "name": "sharing", + "single_nested": { + "computed_optional_required": "optional", + "attributes": [ + { + "name": "enabled", + "bool": { + "computed_optional_required": "required" + } + }, + { + "name": "projects", + "set_nested": { + "computed_optional_required": "required", + "nested_object": { + "attributes": [ + { + "name": "name", + "string": { + "computed_optional_required": "required" + } + } + ] + } + } + } + ] + } + }, + { + "name": "system_components_placement", + "single_nested": { + "computed_optional_required": "optional", + "attributes": [ + { + "name": "daemon_set_override", + "single_nested": { + "computed_optional_required": "optional", + "attributes": [ + { + "name": "node_selection_enabled", + "bool": { + "computed_optional_required": "optional" + } + }, + { + "name": "daemon_set_tolerations", + "set_nested": { + "computed_optional_required": "optional", + "nested_object": { + "attributes": [ + { + "name": "effect", + "string": { + "computed_optional_required": "optional" + } + }, + { + "name": "key", + "string": { + "computed_optional_required": "optional" + } + }, + { + "name": "operator", + "string": { + "computed_optional_required": "optional" + } + }, + { + "name": "toleration_seconds", + "int64": { + "computed_optional_required": "optional" + } + }, + { + "name": "value", + "string": { + "computed_optional_required": "optional" + } + } + ] + } + } + } + ] + } + }, + { + "name": "node_selector", + "map": { + "computed_optional_required": "computed_optional", + "element_type": { + "string": {} + } + } + }, + { + "name": "tolerations", + "set_nested": { + "computed_optional_required": "optional", + "nested_object": { + "attributes": [ + { + "name": "effect", + "string": { + "computed_optional_required": "optional" + } + }, + { + "name": "key", + "string": { + "computed_optional_required": "optional" + } + }, + { + "name": "operator", + "string": { + "computed_optional_required": "optional" + } + }, + { + "name": "toleration_seconds", + "int64": { + "computed_optional_required": "optional" + } + }, + { + "name": "value", + "string": { + "computed_optional_required": "optional" + } + } + ] + } + } + } + ] + } + }, + { + "name": "type", + "string": { + "computed_optional_required": "computed_optional", + "default": { + "static": "mks" + }, + "description": "The type of the cluster this spec corresponds to" + } + } + ], + "description": "cluster specification" + } + } + ] + } + } + ], + "version": "0.1" +} \ No newline at end of file diff --git a/internal/scripts/fwgen.sh b/internal/scripts/fwgen.sh new file mode 100755 index 00000000..9418ce7f --- /dev/null +++ b/internal/scripts/fwgen.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Install the Terraform Plugin Code Generator Framework +go install github.com/hashicorp/terraform-plugin-codegen-framework/cmd/tfplugingen-framework@v0.4.1 + +for file in $(find internal/resource_* -type f -name "*.json"); do + echo "Generating framework provider code for ${file}..." + tfplugingen-framework generate resources \ + --input=${file} \ + --output=internal/ \ + ${file} +done \ No newline at end of file diff --git a/main.go b/main.go index 1b579fc6..096444f4 100644 --- a/main.go +++ b/main.go @@ -5,8 +5,14 @@ import ( "flag" "log" - "github.com/RafaySystems/terraform-provider-rafay/rafay" - "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server" + "github.com/hashicorp/terraform-plugin-mux/tf5to6server" + "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" + + framework "github.com/RafaySystems/terraform-provider-rafay/internal/provider" + legacySDK "github.com/RafaySystems/terraform-provider-rafay/rafay" ) // Run "go generate" to format example terraform files and generate the docs for the registry/website @@ -29,21 +35,48 @@ var ( ) func main() { - var debugMode bool + ctx := context.Background() + + var debug bool - flag.BoolVar(&debugMode, "debug", false, "set to true to run the provider with support for debuggers like delve") + flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") flag.Parse() - opts := &plugin.ServeOpts{ProviderFunc: rafay.New(version)} + // Upgrade the legacy provider to protocol v6 + upgradedSdkServer, err := tf5to6server.UpgradeServer(ctx, legacySDK.New(version)().GRPCProvider) + if err != nil { + log.Fatal(err) + } + + // Create the provider server functions list + providers := []func() tfprotov6.ProviderServer{ + providerserver.NewProtocol6(framework.New(version)()), // terraform-plugin-framework provider + func() tfprotov6.ProviderServer { + return upgradedSdkServer + }, + } + + // Create a new mux server + muxServer, err := tf6muxserver.NewMuxServer(ctx, providers...) + + if err != nil { + log.Fatal(err) + } - if debugMode { - err := plugin.Debug(context.Background(), "registry.terraform.io/RafaySystems/rafay", opts) + // Serve the provider + var serveOpts []tf6server.ServeOpt - if err != nil { - log.Fatal(err.Error()) - } - return + if debug { + serveOpts = append(serveOpts, tf6server.WithManagedDebug()) } - plugin.Serve(opts) + err = tf6server.Serve( + "registry.terraform.io/RafaySystems/rafay", + muxServer.ProviderServer, + serveOpts..., + ) + + if err != nil { + log.Fatal(err) + } } diff --git a/rafay/aks_config.go b/rafay/aks_config.go index 1b1354b8..3d7c2888 100644 --- a/rafay/aks_config.go +++ b/rafay/aks_config.go @@ -67,14 +67,49 @@ type AKSClusterConfigMetadata struct { } type AKSClusterConfigSpec struct { - SubscriptionID string `yaml:"subscriptionId,omitempty"` - ResourceGroupName string `yaml:"resourceGroupName,omitempty"` - ManagedCluster *AKSManagedCluster `yaml:"managedCluster,omitempty"` - NodePools []*AKSNodePool `yaml:"nodePools,omitempty"` - MaintenanceConfigs []*AKSMaintenanceConfig `yaml:"maintenanceConfigurations,omitempty"` + SubscriptionID string `yaml:"subscriptionId,omitempty"` + ResourceGroupName string `yaml:"resourceGroupName,omitempty"` + ManagedCluster *AKSManagedCluster `yaml:"managedCluster,omitempty"` + NodePools []*AKSNodePool `yaml:"nodePools,omitempty"` + MaintenanceConfigs []*AKSMaintenanceConfig `yaml:"maintenanceConfigurations,omitempty"` + WorkloadIdentities []*AzureWorkloadIdentity `yaml:"workloadIdentities,omitempty"` //Internal *AKSRafayInternal `yaml:"internal,omitempty"` } +type AzureWorkloadIdentity struct { + CreateIdentity bool `yaml:"createIdentity,omitempty"` + Metadata *AzureWorkloadIdentityMetadata `yaml:"metadata,omitempty"` + RoleAssignments []*AzureWorkloadIdentityRoleAssignment `yaml:"roleAssignments,omitempty"` + K8sServiceAccounts []*AzureWorkloadIdentityK8sServiceAccount `yaml:"serviceAccounts,omitempty"` +} + +type AzureWorkloadIdentityMetadata struct { + ClientId string `yaml:"clientId,omitempty"` + PrincipalId string `yaml:"principalId,omitempty"` + Name string `yaml:"name,omitempty"` + Location string `yaml:"location,omitempty"` + ResourceGroup string `yaml:"resourceGroup,omitempty"` + Tags map[string]string `yaml:"tags,omitempty"` +} + +type AzureWorkloadIdentityRoleAssignment struct { + Name string `yaml:"name,omitempty"` + RoleDefinitionId string `yaml:"roleDefinitionId,omitempty"` + Scope string `yaml:"scope,omitempty"` +} + +type AzureWorkloadIdentityK8sServiceAccount struct { + CreateAccount bool `yaml:"createAccount,omitempty"` + Metadata *K8sServiceAccountMetadata `yaml:"metadata,omitempty"` +} + +type K8sServiceAccountMetadata struct { + Name string `yaml:"name"` + Namespace string `yaml:"namespace"` + Annotations map[string]string `yaml:"annotations,omitempty"` + Labels map[string]string `yaml:"labels,omitempty"` +} + type AKSMaintenanceConfig struct { ApiVersion string `yaml:"apiVersion,omitempty"` Name string `yaml:"name,omitempty"` @@ -183,6 +218,7 @@ type AKSManagedClusterProperties struct { DiskEncryptionSetID string `yaml:"diskEncryptionSetID,omitempty"` AddonProfiles *AddonProfiles `yaml:"addonProfiles,omitempty"` //AddonProfiles map[string]string `yaml:"addonProfiles,omitempty"` + SecurityProfile *AKSManagedClusterSecurityProfile `yaml:"securityProfile,omitempty"` ServicePrincipalProfile *AKSManagedClusterServicePrincipalProfile `yaml:"servicePrincipalProfile,omitempty"` LinuxProfile *AKSManagedClusterLinuxProfile `yaml:"linuxProfile,omitempty"` WindowsProfile *AKSManagedClusterWindowsProfile `yaml:"windowsProfile,omitempty"` @@ -190,6 +226,7 @@ type AKSManagedClusterProperties struct { IdentityProfile *AKSManagedClusterIdentityProfile `yaml:"identityProfile,omitempty"` AutoScalerProfile *AKSManagedClusterAutoScalerProfile `yaml:"autoScalerProfile,omitempty"` AutoUpgradeProfile *AKSManagedClusterAutoUpgradeProfile `yaml:"autoUpgradeProfile,omitempty"` + OidcIssuerProfile *AKSManagedClusterOidcIssuerProfile `yaml:"oidcIssuerProfile,omitempty"` PodIdentityProfile *AKSManagedClusterPodIdentityProfile `yaml:"podIdentityProfile,omitempty"` PrivateLinkResources *AKSManagedClusterPrivateLinkResources `yaml:"privateLinkResources,omitempty"` PowerState *AKSManagedClusterPowerState `yaml:"powerState,omitempty"` @@ -240,6 +277,18 @@ type IngressApplicationGatewayAddonConfig struct { WatchNamespace string `yaml:"watchNamespace,omitempty"` } +type AKSManagedClusterSecurityProfile struct { + WorkloadIdentity *AKSManagedClusterWorkloadIdentity `yaml:"workloadIdentity,omitempty"` +} + +type AKSManagedClusterWorkloadIdentity struct { + Enabled *bool `yaml:"enabled,omitempty"` +} + +type AKSManagedClusterOidcIssuerProfile struct { + Enabled *bool `yaml:"enabled,omitempty"` +} + type AKSManagedClusterNetworkProfile struct { LoadBalancerSKU string `yaml:"loadBalancerSku,omitempty"` NetworkPlugin string `yaml:"networkPlugin,omitempty"` diff --git a/rafay/cluster_v3_util.go b/rafay/cluster_v3_util.go index fea32a3a..5ad21041 100644 --- a/rafay/cluster_v3_util.go +++ b/rafay/cluster_v3_util.go @@ -30,9 +30,16 @@ type AksNodepoolsErrorFormatter struct { FailureReason string `json:"failureReason,omitempty"` } +type AksWorkloadIdentityErrorFormatter struct { + Name string `json:"name,omitempty"` + ResourceType string `json:"resourceType,omitempty"` + FailureReason string `json:"failureReason,omitempty"` +} + type AksUpsertErrorFormatter struct { - FailureReason string `json:"failureReason,omitempty"` - Nodepools []AksNodepoolsErrorFormatter `json:"nodepools,omitempty"` + FailureReason string `json:"failureReason,omitempty"` + Nodepools []AksNodepoolsErrorFormatter `json:"nodepools,omitempty"` + WorkloadIdentities []AksWorkloadIdentityErrorFormatter `json:"edgeResources,omitempty"` } func flattenMetadataV3(in *commonpb.Metadata, p []interface{}) []interface{} { diff --git a/rafay/eks_config.go b/rafay/eks_config.go index c8185a7f..a5c1b247 100644 --- a/rafay/eks_config.go +++ b/rafay/eks_config.go @@ -177,6 +177,61 @@ type EKSClusterIAM struct { // necessary to run the VPC controller in the control plane // Defaults to `true` VPCResourceControllerPolicy *bool `yaml:"vpcResourceControllerPolicy,omitempty"` + + PodIdentityAssociations []*IAMPodIdentityAssociation `yaml:"podIdentityAssociations,omitempty"` +} + +type IAMPodIdentityAssociation struct { + Namespace string `yaml:"namespace" json:"namespace"` + ServiceAccountName string `yaml:"serviceAccountName" json:"serviceAccountName"` + RoleARN string `yaml:"roleARN,omitempty" json:"roleARN,omitempty"` + // +optional + CreateServiceAccount *bool `yaml:"createServiceAccount,omitempty" json:"createServiceAccount,omitempty"` + // +optional + RoleName string `yaml:"roleName,omitempty" json:"roleName,omitempty"` + // +optional + PermissionsBoundaryARN string `yaml:"permissionsBoundaryARN,omitempty" json:"permissionsBoundaryARN,omitempty"` + // +optional + PermissionPolicyARNs []string `yaml:"permissionPolicyARNs,omitempty" json:"permissionPolicyARNs,omitempty"` + // +optional + PermissionPolicy map[string]interface{} `yaml:"permissionPolicy,omitempty" json:"permissionPolicy,omitempty"` + // +optional + WellKnownPolicies *WellKnownPolicies `yaml:"wellKnownPolicies,omitempty" json:"wellKnownPolicies,omitempty"` + // +optional + Tags map[string]string `yaml:"tags,omitempty" json:"tags,omitempty"` +} + +type IAMPodIdentityAssociationOutput struct { + Namespace string `yaml:"namespace"` + ServiceAccountName string `yaml:"serviceAccountName"` + RoleARN string `yaml:"roleARN,omitempty"` + // +optional + CreateServiceAccount *bool `yaml:"createServiceAccount,omitempty"` + // +optional + RoleName string `yaml:"roleName,omitempty"` + // +optional + PermissionsBoundaryARN string `yaml:"permissionsBoundaryARN,omitempty"` + // +optional + PermissionPolicyARNs []string `yaml:"permissionPolicyARNs,omitempty"` + // +optional + PermissionPolicy map[string]interface{} `yaml:"permissionPolicy,omitempty"` + // +optional + WellKnownPolicies *WellKnownPolicies `yaml:"wellKnownPolicies,omitempty"` + // +optional + Tags map[string]string `yaml:"tags,omitempty"` + + Status string `yaml:"status,omitempty"` + Comments string `yaml:"comments,omitempty"` +} + +type PodIdentityExtension struct { + HostMetadata *Metadata `yaml:"metadata,omitempty"` + Spec *IAMPodIdentityAssociation `yaml:"spec,omitempty"` +} + +type Metadata struct { + clusterName string `yaml:"clusterName,omitempty"` + projectName string `yaml:"projectName,omitempty"` } // EKSClusterIAMServiceAccount holds an IAM service account metadata and configuration diff --git a/rafay/migrate/aks/fromV1/migrate.go b/rafay/migrate/aks/fromV1/migrate.go index bb9f2737..90147e34 100644 --- a/rafay/migrate/aks/fromV1/migrate.go +++ b/rafay/migrate/aks/fromV1/migrate.go @@ -482,6 +482,14 @@ func clusterAKSManagedClusterProperties() map[string]*schema.Schema { Optional: true, Description: "The name of the resource group containing agent pool nodes.", }, + "oidc_issuer_profile": { + Type: schema.TypeList, + Optional: true, + Description: "Profile of OIDC Issuer configuration.", + Elem: &schema.Resource{ + Schema: clusterAKSManagedClusterOidcIssuerProfile(), + }, + }, "pod_identity_profile": { Type: schema.TypeList, Optional: true, @@ -498,6 +506,14 @@ func clusterAKSManagedClusterProperties() map[string]*schema.Schema { Schema: clusterAKSManagedClusterPrivateLinkResources(), }, }, + "security_profile": { + Type: schema.TypeList, + Optional: true, + Description: "Profile of security configuration.", + Elem: &schema.Resource{ + Schema: clusterAKSManagedClusterSecurityProfile(), + }, + }, "service_principal_profile": { Type: schema.TypeList, Optional: true, @@ -1160,6 +1176,17 @@ func clusterAKSManagedClusterNPOutboundIPsPublicIps() map[string]*schema.Schema return s } +func clusterAKSManagedClusterOidcIssuerProfile() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable the OIDC issuer for the cluster.", + }, + } + return s +} + func clusterAKSManagedClusterPodIdentityProfile() map[string]*schema.Schema { s := map[string]*schema.Schema{ "allow_network_plugin_kubenet": { @@ -1298,6 +1325,32 @@ func clusterAKSManagedClusterPrivateLinkResources() map[string]*schema.Schema { return s } +func clusterAKSManagedClusterSecurityProfile() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "workload_identity": { + Type: schema.TypeList, + Optional: true, + Description: "Profile of the managed cluster workload identity.", + Elem: &schema.Resource{ + Schema: clusterAKSManagedClusterWorkloadIdentityProfile(), + }, + }, + } + return s +} + +func clusterAKSManagedClusterWorkloadIdentityProfile() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to enable the Workload Identity for the cluster.", + }, + } + return s +} + func clusterAKSManagedClusterServicePrincipalProfile() map[string]*schema.Schema { s := map[string]*schema.Schema{ "client_id": { diff --git a/rafay/provider.go b/rafay/provider.go index cf2876eb..201c62dd 100644 --- a/rafay/provider.go +++ b/rafay/provider.go @@ -39,6 +39,7 @@ func New(_ string) func() *schema.Provider { "rafay_eks_cluster_spec": resourceEKSClusterSpec(), "rafay_aks_cluster": resourceAKSCluster(), "rafay_aks_cluster_v3": resourceAKSClusterV3(), + "rafay_aks_workload_identity": resourceAKSWorkloadIdentity(), "rafay_aks_cluster_spec": resourceAKSClusterSpec(), "rafay_gke_cluster": resourceGKEClusterV3(), "rafay_addon": resourceAddon(), @@ -98,6 +99,8 @@ func New(_ string) func() *schema.Provider { "rafay_ztkapolicy": resourceZTKAPolicy(), "rafay_customrole": resourceCustomRole(), "rafay_workload_cd_operator": resourceWorkloadCDOperator(), + "rafay_breakglassaccess": resourceBreakGlassAccess(), + "rafay_eks_pod_identity": resourceEKSPodIdentity(), }, DataSourcesMap: map[string]*schema.Resource{ "rafay_project": dataProject(), diff --git a/rafay/resource_aks_cluster.go b/rafay/resource_aks_cluster.go index 48b370d2..d0b3dbc3 100644 --- a/rafay/resource_aks_cluster.go +++ b/rafay/resource_aks_cluster.go @@ -496,6 +496,14 @@ func clusterAKSManagedClusterProperties() map[string]*schema.Schema { Optional: true, Description: "The name of the resource group containing agent pool nodes.", }, + "oidc_issuer_profile": { + Type: schema.TypeList, + Optional: true, + Description: "Profile of OpenID Connect configuration.", + Elem: &schema.Resource{ + Schema: clusterAKSManagedClusterOidcIssuerProfile(), + }, + }, "pod_identity_profile": { Type: schema.TypeList, Optional: true, @@ -520,6 +528,14 @@ func clusterAKSManagedClusterProperties() map[string]*schema.Schema { Schema: clusterAKSManagedClusterPrivateLinkResources(), }, }, + "security_profile": { + Type: schema.TypeList, + Optional: true, + Description: "Profile of security configuration.", + Elem: &schema.Resource{ + Schema: clusterAKSManagedClusterSecurityProfile(), + }, + }, "service_principal_profile": { Type: schema.TypeList, Optional: true, @@ -1217,6 +1233,18 @@ func clusterAKSManagedClusterNPOutboundIPsPublicIps() map[string]*schema.Schema return s } +func clusterAKSManagedClusterOidcIssuerProfile() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable OIDC Issuer", + Default: false, + }, + } + return s +} + func clusterAKSManagedClusterPodIdentityProfile() map[string]*schema.Schema { s := map[string]*schema.Schema{ "allow_network_plugin_kubenet": { @@ -1369,6 +1397,32 @@ func clusterAKSManagedClusterPrivateLinkResources() map[string]*schema.Schema { return s } +func clusterAKSManagedClusterSecurityProfile() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "workload_identity": { + Type: schema.TypeList, + Optional: true, + Description: "Profile of the managed cluster workload identity.", + Elem: &schema.Resource{ + Schema: clusterAKSManagedClusterWorkloadIdentity(), + }, + }, + } + return s +} + +func clusterAKSManagedClusterWorkloadIdentity() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable workload identity", + Default: false, + }, + } + return s +} + func clusterAKSManagedClusterServicePrincipalProfile() map[string]*schema.Schema { s := map[string]*schema.Schema{ "client_id": { @@ -2492,6 +2546,10 @@ func expandAKSManagedClusterProperties(p []interface{}) *AKSManagedClusterProper obj.NodeResourceGroup = v } + if v, ok := in["oidc_issuer_profile"].([]interface{}); ok && len(v) > 0 { + obj.OidcIssuerProfile = expandAKSManagedClusterOidcIssuerProfile(v) + } + if v, ok := in["pod_identity_profile"].([]interface{}); ok && len(v) > 0 { obj.PodIdentityProfile = expandAKSManagedClusterPodIdentityProfile(v) } @@ -2504,6 +2562,10 @@ func expandAKSManagedClusterProperties(p []interface{}) *AKSManagedClusterProper obj.PowerState = expandAKSManagedClusterPowerState(v) } + if v, ok := in["security_profile"].([]interface{}); ok && len(v) > 0 { + obj.SecurityProfile = expandAKSManagedClusterSecurityProfile(v) + } + if v, ok := in["service_principal_profile"].([]interface{}); ok && len(v) > 0 { obj.ServicePrincipalProfile = expandAKSManagedClusterServicePrincipalProfile(v) } @@ -3123,6 +3185,20 @@ func expandAKSManagedClusterNPOutboundIPsPublicIps(p []interface{}) []*AKSManage return out } +func expandAKSManagedClusterOidcIssuerProfile(p []interface{}) *AKSManagedClusterOidcIssuerProfile { + obj := &AKSManagedClusterOidcIssuerProfile{} + if len(p) == 0 || p[0] == nil { + return obj + } + in := p[0].(map[string]interface{}) + + if v, ok := in["enabled"].(bool); ok { + obj.Enabled = &v + } + + return obj +} + func expandAKSManagedClusterPodIdentityProfile(p []interface{}) *AKSManagedClusterPodIdentityProfile { obj := &AKSManagedClusterPodIdentityProfile{} if len(p) == 0 || p[0] == nil { @@ -3268,6 +3344,34 @@ func expandAKSManagedClusterPowerState(p []interface{}) *AKSManagedClusterPowerS return obj } +func expandAKSManagedClusterSecurityProfile(p []interface{}) *AKSManagedClusterSecurityProfile { + obj := &AKSManagedClusterSecurityProfile{} + if len(p) == 0 || p[0] == nil { + return obj + } + in := p[0].(map[string]interface{}) + + if v, ok := in["workload_identity"].([]interface{}); ok && len(v) > 0 { + obj.WorkloadIdentity = expandAKSManagedClusterWorkloadIdentity(v) + } + + return obj +} + +func expandAKSManagedClusterWorkloadIdentity(p []interface{}) *AKSManagedClusterWorkloadIdentity { + obj := &AKSManagedClusterWorkloadIdentity{} + if len(p) == 0 || p[0] == nil { + return obj + } + in := p[0].(map[string]interface{}) + + if v, ok := in["enabled"].(bool); ok { + obj.Enabled = &v + } + + return obj +} + func expandAKSManagedClusterServicePrincipalProfile(p []interface{}) *AKSManagedClusterServicePrincipalProfile { obj := &AKSManagedClusterServicePrincipalProfile{} if len(p) == 0 || p[0] == nil { @@ -4043,8 +4147,11 @@ func flattenAKSCluster(d *schema.ResourceData, in *AKSCluster) error { if !ok { v = []interface{}{} } - - ret2 = flattenAKSClusterSpec(in.Spec, v, rawState.GetAttr("spec")) + var nRawState cty.Value + if !rawState.IsNull() { + nRawState = rawState.GetAttr("spec") + } + ret2 = flattenAKSClusterSpec(in.Spec, v, nRawState) } err = d.Set("spec", ret2) @@ -4082,7 +4189,9 @@ func flattenAKSClusterSpec(in *AKSClusterSpec, p []interface{}, rawState cty.Val return nil } obj := map[string]interface{}{} - rawState = rawState.AsValueSlice()[0] + if !rawState.IsNull() && len(rawState.AsValueSlice()) > 0 { + rawState = rawState.AsValueSlice()[0] + } if len(p) != 0 && p[0] != nil { obj = p[0].(map[string]interface{}) } @@ -4107,7 +4216,11 @@ func flattenAKSClusterSpec(in *AKSClusterSpec, p []interface{}, rawState cty.Val if !ok { v = []interface{}{} } - obj["cluster_config"] = flattenAKSClusterConfig(in.AKSClusterConfig, v, rawState.GetAttr("cluster_config")) + var nRawState cty.Value + if !rawState.IsNull() { + nRawState = rawState.GetAttr("cluster_config") + } + obj["cluster_config"] = flattenAKSClusterConfig(in.AKSClusterConfig, v, nRawState) } if in.Sharing != nil { @@ -4130,7 +4243,9 @@ func flattenAKSClusterConfig(in *AKSClusterConfig, p []interface{}, rawState cty return nil } obj := map[string]interface{}{} - rawState = rawState.AsValueSlice()[0] + if !rawState.IsNull() && len(rawState.AsValueSlice()) > 0 { + rawState = rawState.AsValueSlice()[0] + } if len(p) != 0 && p[0] != nil { obj = p[0].(map[string]interface{}) } @@ -4156,7 +4271,11 @@ func flattenAKSClusterConfig(in *AKSClusterConfig, p []interface{}, rawState cty if !ok { v = []interface{}{} } - obj["spec"] = flattenAKSClusterConfigSpec(in.Spec, v, rawState.GetAttr("spec")) + var nRawState cty.Value + if !rawState.IsNull() { + nRawState = rawState.GetAttr("spec") + } + obj["spec"] = flattenAKSClusterConfigSpec(in.Spec, v, nRawState) } return []interface{}{obj} @@ -4183,7 +4302,10 @@ func flattenAKSClusterConfigSpec(in *AKSClusterConfigSpec, p []interface{}, rawS if in == nil { return nil } - rawState = rawState.AsValueSlice()[0] + if !rawState.IsNull() && len(rawState.AsValueSlice()) > 0 { + rawState = rawState.AsValueSlice()[0] + } + obj := map[string]interface{}{} if len(p) != 0 && p[0] != nil { obj = p[0].(map[string]interface{}) @@ -4211,7 +4333,11 @@ func flattenAKSClusterConfigSpec(in *AKSClusterConfigSpec, p []interface{}, rawS if !ok { v = []interface{}{} } - obj["node_pools"] = flattenAKSNodePool(in.NodePools, v, rawState.GetAttr("node_pools")) + var nRawState cty.Value + if !rawState.IsNull() { + nRawState = rawState.GetAttr("node_pools") + } + obj["node_pools"] = flattenAKSNodePool(in.NodePools, v, nRawState) } if in.MaintenanceConfigs != nil && len(in.MaintenanceConfigs) > 0 { @@ -4450,6 +4576,14 @@ func flattenAKSManagedClusterProperties(in *AKSManagedClusterProperties, p []int obj["node_resource_group"] = in.NodeResourceGroup } + if in.OidcIssuerProfile != nil { + v, ok := obj["oidc_issuer_profile"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["oidc_issuer_profile"] = flattenAKSMCPropertiesOidcIssuerProfile(in.OidcIssuerProfile, v) + } + if in.PodIdentityProfile != nil { v, ok := obj["pod_identity_profile"].([]interface{}) if !ok { @@ -4474,6 +4608,14 @@ func flattenAKSManagedClusterProperties(in *AKSManagedClusterProperties, p []int obj["private_link_resources"] = flattenAKSManagedClusterPrivateLinkResources(in.PrivateLinkResources, v) } + if in.SecurityProfile != nil { + v, ok := obj["security_profile"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["security_profile"] = flattenAKSMCPropertiesSecurityProfile(in.SecurityProfile, v) + } + if in.ServicePrincipalProfile != nil { v, ok := obj["service_principal_profile"].([]interface{}) if !ok { @@ -5255,6 +5397,21 @@ func flattenAKSManagedClusterNPOutboundIPsPublicIPs(in []*AKSManagedClusterNPOut } +func flattenAKSMCPropertiesOidcIssuerProfile(in *AKSManagedClusterOidcIssuerProfile, p []interface{}) []interface{} { + if in == nil { + return nil + } + obj := map[string]interface{}{} + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + obj["enabled"] = in.Enabled + + return []interface{}{obj} + +} + func flattenAKSManagedClusterPodIdentityProfile(in *AKSManagedClusterPodIdentityProfile, p []interface{}) []interface{} { if in == nil { return nil @@ -5390,6 +5547,42 @@ func flattenAKSManagedClusterPIPUserAssignedIdentityExceptions(inp []*AKSManaged } +func flattenAKSMCPropertiesSecurityProfile(in *AKSManagedClusterSecurityProfile, p []interface{}) []interface{} { + if in == nil { + return nil + } + obj := map[string]interface{}{} + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + if in.WorkloadIdentity != nil { + v, ok := obj["workload_identity"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["workload_identity"] = flattenAKSManagedClusterWorkloadIdentity(in.WorkloadIdentity, v) + } + + return []interface{}{obj} + +} + +func flattenAKSManagedClusterWorkloadIdentity(in *AKSManagedClusterWorkloadIdentity, p []interface{}) []interface{} { + if in == nil { + return nil + } + obj := map[string]interface{}{} + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + obj["enabled"] = in.Enabled + + return []interface{}{obj} + +} + func flattenAKSManagedClusterServicePrincipalProfile(in *AKSManagedClusterServicePrincipalProfile, p []interface{}) []interface{} { if in == nil { return nil @@ -5866,7 +6059,7 @@ func flattenAKSNodePool(in []*AKSNodePool, p []interface{}, rawState cty.Value) out := make([]interface{}, len(in)) for i, in := range in { var nRawState cty.Value - if len(rawState.AsValueSlice()) > i { + if !rawState.IsNull() && len(rawState.AsValueSlice()) > i { nRawState = rawState.AsValueSlice()[0] } obj := map[string]interface{}{} @@ -5887,11 +6080,11 @@ func flattenAKSNodePool(in []*AKSNodePool, p []interface{}, rawState cty.Value) if !ok { v = []interface{}{} } - if nRawState.IsNull() { - obj["properties"] = flattenAKSNodePoolProperties(in.Properties, v, nRawState) - } else { - obj["properties"] = flattenAKSNodePoolProperties(in.Properties, v, nRawState.GetAttr("properties")) + var propRawState cty.Value + if !nRawState.IsNull() { + propRawState = nRawState.GetAttr("properties") } + obj["properties"] = flattenAKSNodePoolProperties(in.Properties, v, propRawState) } if len(in.Type) > 0 { @@ -5911,7 +6104,7 @@ func flattenAKSNodePoolProperties(in *AKSNodePoolProperties, p []interface{}, ra if in == nil { return nil } - if !rawState.IsNull() { + if !rawState.IsNull() && len(rawState.AsValueSlice()) > 0 { rawState = rawState.AsValueSlice()[0] } obj := map[string]interface{}{} @@ -6243,6 +6436,44 @@ func aksClusterCTLStatus(taskid, projectID string) (string, error) { func processInputs(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { log.Println("AKS process inputs") + + desiredObj, err := expandAksCluster(d) + if err != nil { + log.Println("error while expanding aks cluster", err) + return diag.FromErr(err) + } + + // Only proceed with stitching if the cluster resource already exists + if d.Id() != "" { + // ============== Stitching Start ============== + + log.Println("Including first class edge resources in desired spec") + + deployedObj, err := getDeployedClusterSpec(d) + if err != nil { + log.Println("error while reading aks cluster", err) + return diag.FromErr(err) + } + + if len(deployedObj.Spec.AKSClusterConfig.Spec.WorkloadIdentities) > 0 { + // Copy over the WorkloadIdentities from the deployed cluster spec + + desiredObj.Spec.AKSClusterConfig.Spec.WorkloadIdentities = deployedObj.Spec.AKSClusterConfig.Spec.WorkloadIdentities + } + + // ============== Stitching End ============== + } + + out, err := yamlf.Marshal(desiredObj) + if err != nil { + log.Println("err marshall:", err) + return diag.FromErr(err) + } + log.Printf("AKS Cluster YAML SPEC \n---\n%s\n----\n", out) + return process_filebytes(ctx, d, m, out, desiredObj) +} + +func expandAksCluster(d *schema.ResourceData) (*AKSCluster, error) { obj := &AKSCluster{} rawConfig := d.GetRawConfig() @@ -6250,14 +6481,14 @@ func processInputs(ctx context.Context, d *schema.ResourceData, m interface{}) d obj.APIVersion = v } else { log.Println("apiversion unable to be found") - return diag.FromErr(fmt.Errorf("%s", "Apiversion is missing")) + return obj, fmt.Errorf("%s", "Apiversion is missing") } if v, ok := d.Get("kind").(string); ok { obj.Kind = v } else { log.Println("kind unable to be found") - return diag.FromErr(fmt.Errorf("%s", "Kind is missing")) + return obj, fmt.Errorf("%s", "Kind is missing") } if v, ok := d.Get("metadata").([]interface{}); ok { @@ -6265,34 +6496,28 @@ func processInputs(ctx context.Context, d *schema.ResourceData, m interface{}) d log.Println("md:", obj.Metadata) } else { log.Println("metadata unable to be found") - return diag.FromErr(fmt.Errorf("%s", "Metadata is missing")) + return obj, fmt.Errorf("%s", "Metadata is missing") } if v, ok := d.Get("spec").([]interface{}); ok { obj.Spec = expandAKSClusterSpec(v, rawConfig.GetAttr("spec")) } else { log.Println("Cluster spec unable to be found") - return diag.FromErr(fmt.Errorf("%s", "Spec is missing")) + return obj, fmt.Errorf("%s", "Spec is missing") } projectName := obj.Metadata.Project _, err := project.GetProjectByName(projectName) if err != nil { log.Println("Cluster project name is invalid", err) - return diag.FromErr(fmt.Errorf("%s", "Cluster project name is invalid")) + return obj, fmt.Errorf("%s", "Cluster project name is invalid") } if obj.Metadata.Name != obj.Spec.AKSClusterConfig.Metadata.Name { - return diag.FromErr(fmt.Errorf("%s", "ClusterConfig name does not match config file")) + return obj, fmt.Errorf("%s", "ClusterConfig name does not match config file") } - out, err := yamlf.Marshal(obj) - if err != nil { - log.Println("err marshall:", err) - return diag.FromErr(err) - } - log.Printf("AKS Cluster YAML SPEC \n---\n%s\n----\n", out) - return process_filebytes(ctx, d, m, out, obj) + return obj, nil } func resourceAKSClusterUpsert(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -6454,14 +6679,46 @@ func resourceAKSClusterRead(ctx context.Context, d *schema.ResourceData, m inter var diags diag.Diagnostics log.Println("resourceAKSClusterRead") + clusterSpec, err := getDeployedClusterSpec(d) + if err != nil { + log.Printf("error in get cluster spec %s", err.Error()) + return diag.FromErr(err) + } + + // ============== Unfurl Start ============== + + log.Println("Excluding first class edge resources in deployed spec") + + // Remove the cluster associated but externalized edge resources from the deployed cluster + if len(clusterSpec.Spec.AKSClusterConfig.Spec.WorkloadIdentities) > 0 { + // WorkloadIdentities is not part of the terraform cluster resource schema + + log.Println("Removing deployed workload identities from deployed cluster spec") + clusterSpec.Spec.AKSClusterConfig.Spec.WorkloadIdentities = nil + } + + // ============== Unfurl End ================= + + err = flattenAKSCluster(d, clusterSpec) + if err != nil { + log.Printf("get aks cluster set error %s", err.Error()) + return diag.FromErr(err) + } + + return diags +} + +func getDeployedClusterSpec(d *schema.ResourceData) (*AKSCluster, error) { + clusterSpec := &AKSCluster{} + projectName, ok := d.Get("metadata.0.project").(string) if !ok || projectName == "" { - return diag.FromErr(errors.New("project name unable to be found")) + return clusterSpec, errors.New("project name unable to be found") } clusterName, ok := d.Get("metadata.0.name").(string) if !ok || clusterName == "" { - return diag.FromErr(errors.New("cluster name unable to be found")) + return clusterSpec, errors.New("cluster name unable to be found") } fmt.Printf("Found project_name: %s, cluster_name: %s", projectName, clusterName) @@ -6470,7 +6727,7 @@ func resourceAKSClusterRead(ctx context.Context, d *schema.ResourceData, m inter projectId, err := getProjectIDFromName(projectName) if err != nil { fmt.Print("Cluster project name is invalid") - return diag.FromErr(fmt.Errorf("cluster project name is invalid. Error: %s", err.Error())) + return clusterSpec, fmt.Errorf("cluster project name is invalid. Error: %s", err.Error()) } c, err := cluster.GetCluster(clusterName, projectId, uaDef) @@ -6479,9 +6736,9 @@ func resourceAKSClusterRead(ctx context.Context, d *schema.ResourceData, m inter if strings.Contains(err.Error(), "not found") { log.Println("Resource Read ", "error", err) d.SetId("") - return diag.FromErr(fmt.Errorf("resource read failed, cluster not found. Error: %s", err.Error())) + return clusterSpec, fmt.Errorf("resource read failed, cluster not found. Error: %s", err.Error()) } - return diag.FromErr(err) + return clusterSpec, err } // another @@ -6490,21 +6747,17 @@ func resourceAKSClusterRead(ctx context.Context, d *schema.ResourceData, m inter clusterSpecYaml, err := clusterctl.GetClusterSpec(logger, rctlCfg, c.Name, projectId, uaDef) if err != nil { log.Printf("error in get clusterspec %s", err.Error()) - return diag.FromErr(err) + return clusterSpec, err } + log.Println("clusterSpecYaml from getClusterSpec:", clusterSpecYaml) - clusterSpec := AKSCluster{} err = yaml.Unmarshal([]byte(clusterSpecYaml), &clusterSpec) if err != nil { - return diag.FromErr(err) - } - err = flattenAKSCluster(d, &clusterSpec) - if err != nil { - log.Printf("get aks cluster set error %s", err.Error()) - return diag.FromErr(err) + return clusterSpec, err } + log.Println("unmarshalled clusterSpec from getClusterSpec:", clusterSpec) - return diags + return clusterSpec, nil } func resourceAKSClusterUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { diff --git a/rafay/resource_aks_cluster_v3.go b/rafay/resource_aks_cluster_v3.go index dbd2ab9f..ee098710 100644 --- a/rafay/resource_aks_cluster_v3.go +++ b/rafay/resource_aks_cluster_v3.go @@ -58,35 +58,65 @@ func resourceAKSClusterV3Read(ctx context.Context, d *schema.ResourceData, m int var diags diag.Diagnostics log.Println("resourceClusterRead ") + + deployedCluster, err := getDeployedClusterSpecV3(ctx, d) + if err != nil { + return diag.FromErr(err) + } + + // ============== Unfurl Start ============== + + log.Println("Excluding first class edge resources in deployed spec") + + // Remove the cluster associated but externalized edge resources from the deployed cluster + if deployedCluster != nil && deployedCluster.Spec != nil && deployedCluster.Spec.GetAks() != nil { + if len(deployedCluster.Spec.GetAks().Spec.WorkloadIdentities) > 0 { + // WorkloadIdentities is not part of the terraform cluster resource schema + + log.Println("Removing deployed workload identities from deployed cluster spec") + deployedCluster.Spec.GetAks().Spec.WorkloadIdentities = nil + } + } + + // ============== Unfurl End ================= + + err = flattenAKSClusterV3(d, deployedCluster) + if err != nil { + return diag.FromErr(err) + } + + return diags +} + +func getDeployedClusterSpecV3(ctx context.Context, d *schema.ResourceData) (*infrapb.Cluster, error) { + var deployedCluster *infrapb.Cluster + + log.Println("getDeployedClusterSpecV3") tflog := os.Getenv("TF_LOG") if tflog == "TRACE" || tflog == "DEBUG" { ctx = context.WithValue(ctx, "debug", "true") } - tfClusterState, err := expandClusterV3(d) + desiredTfClusterState, err := expandClusterV3(d) if err != nil { - return diag.FromErr(err) + return deployedCluster, err } auth := config.GetConfig().GetAppAuthProfile() client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) if err != nil { - return diag.FromErr(err) + return deployedCluster, err } - ag, err := client.InfraV3().Cluster().Get(ctx, options.GetOptions{ - Name: tfClusterState.Metadata.Name, - Project: tfClusterState.Metadata.Project, + deployedCluster, err = client.InfraV3().Cluster().Get(ctx, options.GetOptions{ + Name: desiredTfClusterState.Metadata.Name, + Project: desiredTfClusterState.Metadata.Project, }) if err != nil { - return diag.FromErr(err) + return deployedCluster, err } - err = flattenAKSClusterV3(d, ag) - if err != nil { - return diag.FromErr(err) - } + return deployedCluster, nil - return diags } func resourceAKSClusterV3Update(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -197,13 +227,38 @@ func resourceAKSClusterV3Upsert(ctx context.Context, d *schema.ResourceData, m i } } - cluster, err := expandClusterV3(d) + desiredCluster, err := expandClusterV3(d) if err != nil { log.Printf("Cluster expandCluster error") return diag.FromErr(err) } - log.Println(">>>>>> CLUSTER: ", cluster) + // Only proceed with stitching if the cluster resource already exists + if d.Id() != "" { + // ============== Stitching Start ============== + + log.Println("Including first class edge resources in desired spec") + + deployedCluster, err := getDeployedClusterSpecV3(ctx, d) + if err != nil { + log.Println("error getting deployed cluster", err) + return diag.FromErr(err) + } + + if deployedCluster != nil && deployedCluster.Spec != nil && deployedCluster.Spec.GetAks() != nil { + if len(deployedCluster.Spec.GetAks().Spec.WorkloadIdentities) > 0 { + // Copy over the WorkloadIdentities from the deployed cluster spec + + log.Println("Adding deployed workload identities from deployed cluster spec") + desiredCluster.Spec.GetAks().Spec.WorkloadIdentities = deployedCluster.Spec.GetAks().Spec.WorkloadIdentities + } + + } + + // ============== Stitching End ============== + } + + log.Println(">>>>>> CLUSTER: ", desiredCluster) auth := config.GetConfig().GetAppAuthProfile() client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) @@ -211,10 +266,10 @@ func resourceAKSClusterV3Upsert(ctx context.Context, d *schema.ResourceData, m i return diag.FromErr(err) } - err = client.InfraV3().Cluster().Apply(ctx, cluster, options.ApplyOptions{}) + err = client.InfraV3().Cluster().Apply(ctx, desiredCluster, options.ApplyOptions{}) if err != nil { // XXX Debug - n1 := spew.Sprintf("%+v", cluster) + n1 := spew.Sprintf("%+v", desiredCluster) log.Println("Cluster apply cluster:", n1) log.Printf("Cluster apply error") return diag.FromErr(err) @@ -223,9 +278,9 @@ func resourceAKSClusterV3Upsert(ctx context.Context, d *schema.ResourceData, m i ticker := time.NewTicker(time.Duration(60) * time.Second) defer ticker.Stop() - edgeName := cluster.Metadata.Name - projectName := cluster.Metadata.Project - d.SetId(cluster.Metadata.Name) + edgeName := desiredCluster.Metadata.Name + projectName := desiredCluster.Metadata.Project + d.SetId(desiredCluster.Metadata.Name) var warnings []string LOOP: @@ -940,6 +995,10 @@ func expandAKSManagedClusterV3Properties(p []interface{}) *infrapb.ManagedCluste obj.NodeResourceGroup = v } + if v, ok := in["oidc_issuer_profile"].([]interface{}); ok && len(v) > 0 { + obj.OidcIssuerProfile = expandAKSManagedClusterV3OidcIssuerProfile(v) + } + if v, ok := in["pod_identity_profile"].([]interface{}); ok && len(v) > 0 { obj.PodIdentityProfile = expandAKSManagedClusterV3PodIdentityProfile(v) } @@ -952,6 +1011,10 @@ func expandAKSManagedClusterV3Properties(p []interface{}) *infrapb.ManagedCluste obj.PowerState = expandAKSV3ManagedClusterPowerState(v) } + if v, ok := in["security_profile"].([]interface{}); ok && len(v) > 0 { + obj.SecurityProfile = expandAKSManagedClusterV3SecurityProfile(v) + } + if v, ok := in["service_principal_profile"].([]interface{}); ok && len(v) > 0 { obj.ServicePrincipalProfile = expandAKSManagedClusterV3ServicePrincipalProfile(v) } @@ -1562,6 +1625,20 @@ func expandAKSManagedClusterV3NPOutboundIPsPublicIps(p []interface{}) []*infrapb return out } +func expandAKSManagedClusterV3OidcIssuerProfile(p []interface{}) *infrapb.OIDCIssuerProfile { + obj := &infrapb.OIDCIssuerProfile{} + if len(p) == 0 || p[0] == nil { + return obj + } + in := p[0].(map[string]interface{}) + + if v, ok := in["enabled"].(bool); ok { + obj.Enabled = v + } + + return obj +} + func expandAKSManagedClusterV3PodIdentityProfile(p []interface{}) *infrapb.Podidentityprofile { obj := &infrapb.Podidentityprofile{} if len(p) == 0 || p[0] == nil { @@ -1712,6 +1789,34 @@ func expandAKSV3ManagedClusterPowerState(p []interface{}) *infrapb.PowerState { return obj } +func expandAKSManagedClusterV3SecurityProfile(p []interface{}) *infrapb.Securityprofile { + obj := &infrapb.Securityprofile{} + if len(p) == 0 || p[0] == nil { + return obj + } + in := p[0].(map[string]interface{}) + + if v, ok := in["workload_identity"].([]interface{}); ok && len(v) > 0 { + obj.WorkloadIdentity = expandAKSManagedClusterV3NPWorkloadIdentity(v) + } + + return obj +} + +func expandAKSManagedClusterV3NPWorkloadIdentity(p []interface{}) *infrapb.Securityprofile_WorkloadIdentity { + obj := &infrapb.Securityprofile_WorkloadIdentity{} + if len(p) == 0 || p[0] == nil { + return obj + } + in := p[0].(map[string]interface{}) + + if v, ok := in["enabled"].(bool); ok { + obj.Enabled = v + } + + return obj +} + func expandAKSManagedClusterV3ServicePrincipalProfile(p []interface{}) *infrapb.Serviceprincipalprofile { obj := &infrapb.Serviceprincipalprofile{} if len(p) == 0 || p[0] == nil { @@ -2837,6 +2942,14 @@ func flattenAKSV3ManagedClusterProperties(in *infrapb.ManagedClusterProperties, obj["linux_profile"] = flattenAKSV3ManagedClusterLinuxProfile(in.LinuxProfile, v) } + if in.OidcIssuerProfile != nil { + v, ok := obj["oidc_issuer_profile"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["oidc_issuer_profile"] = flattenAKSV3MCPropertiesOidcIssuerProfile(in.OidcIssuerProfile, v) + } + if in.NetworkProfile != nil { v, ok := obj["network_profile"].([]interface{}) if !ok { @@ -2873,6 +2986,14 @@ func flattenAKSV3ManagedClusterProperties(in *infrapb.ManagedClusterProperties, obj["private_link_resources"] = flattenAKSV3ManagedClusterPrivateLinkResources(in.PrivateLinkResources, v) } + if in.SecurityProfile != nil { + v, ok := obj["security_profile"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["security_profile"] = flattenAKSV3MCPropertiesSecurityProfile(in.SecurityProfile, v) + } + if in.ServicePrincipalProfile != nil { v, ok := obj["service_principal_profile"].([]interface{}) if !ok { @@ -3610,6 +3731,20 @@ func flattenAKSV3ManagedClusterNPOutboundIPsPublicIPs(in []*infrapb.Publicips, p } +func flattenAKSV3MCPropertiesOidcIssuerProfile(in *infrapb.OIDCIssuerProfile, p []interface{}) []interface{} { + if in == nil { + return nil + } + obj := map[string]interface{}{} + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + obj["enabled"] = in.Enabled + + return []interface{}{obj} +} + func flattenAKSV3ManagedClusterPodIdentityProfile(in *infrapb.Podidentityprofile, p []interface{}) []interface{} { if in == nil { return nil @@ -4293,3 +4428,39 @@ func flattenClusterV3Blueprint(in *infrapb.BlueprintConfig) []interface{} { return []interface{}{obj} } + +func flattenAKSV3MCPropertiesSecurityProfile(in *infrapb.Securityprofile, p []interface{}) []interface{} { + if in == nil { + return nil + } + obj := map[string]interface{}{} + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + if in.WorkloadIdentity != nil { + v, ok := obj["workload_identity"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["workload_identity"] = flattenAKSV3MCPropertiesWorkloadIdentity(in.WorkloadIdentity, v) + } + + return []interface{}{obj} + +} + +func flattenAKSV3MCPropertiesWorkloadIdentity(in *infrapb.Securityprofile_WorkloadIdentity, p []interface{}) []interface{} { + if in == nil { + return nil + } + obj := map[string]interface{}{} + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + obj["enabled"] = in.Enabled + + return []interface{}{obj} + +} diff --git a/rafay/resource_aks_workload_identity.go b/rafay/resource_aks_workload_identity.go new file mode 100644 index 00000000..dbf1fe3c --- /dev/null +++ b/rafay/resource_aks_workload_identity.go @@ -0,0 +1,809 @@ +package rafay + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "strings" + "time" + + "github.com/RafaySystems/rafay-common/pkg/hub/client/options" + typed "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" + "github.com/RafaySystems/rafay-common/pkg/hub/terraform/resource" + "github.com/RafaySystems/rafay-common/proto/types/hub/infrapb" + "github.com/RafaySystems/rctl/pkg/config" + "github.com/RafaySystems/rctl/pkg/versioninfo" + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceAKSWorkloadIdentity() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceAKSWorkloadIdentityCreate, + ReadContext: resourceAKSWorkloadIdentityRead, + UpdateContext: resourceAKSWorkloadIdentityUpdate, + DeleteContext: resourceAKSWorkloadIdentityDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(2 * time.Minute), + Delete: schema.DefaultTimeout(2 * time.Minute), + }, + + SchemaVersion: 1, + Schema: resource.WorkloadIdentitySchema.Schema, + } +} + +func resourceAKSWorkloadIdentityCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Println("create aks workload identity") + + diags := resourceAKSWorkloadIdentityUpsert(ctx, d) + return diags +} + +func resourceAKSWorkloadIdentityRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + log.Println("read aks workload identity") + + tflog := os.Getenv("TF_LOG") + if tflog == "TRACE" || tflog == "DEBUG" { + ctx = context.WithValue(ctx, "debug", "true") + } + + desiredInfraAksWorkloadIdentity, err := expandAksWorkloadIdentity(d) + if err != nil { + return diag.FromErr(err) + } + + wiName := desiredInfraAksWorkloadIdentity.Spec.Metadata.Name + wiClusterName := desiredInfraAksWorkloadIdentity.Metadata.Clustername + wiProjectName := desiredInfraAksWorkloadIdentity.Metadata.Project + + deployedAksInfraWorkloadIdentity, err := getAksWorkloadIdentity(ctx, wiName, wiClusterName, wiProjectName) + if err != nil { + return diag.FromErr(err) + } + + err = flattenAksWorkloadIdentity(d, deployedAksInfraWorkloadIdentity) + if err != nil { + return diag.FromErr(err) + } + + return diags +} + +func getAksWorkloadIdentity(ctx context.Context, name, clusterName, project string) (*infrapb.AksWorkloadIdentity, error) { + auth := config.GetConfig().GetAppAuthProfile() + client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) + if err != nil { + return nil, err + } + + extResponse, err := client.InfraV3().Cluster().ExtApi().GetAksWorkloadIdentity(ctx, options.ExtOptions{ + Name: clusterName, + Project: project, + UrlParams: map[string]string{ + "identity_name": name, + }, + }) + if err != nil { + return nil, err + } + + var deployedAksInfraWorkloadIdentity infrapb.AksWorkloadIdentity + if err = json.Unmarshal(extResponse.Body, &deployedAksInfraWorkloadIdentity); err != nil { + return nil, err + } + + log.Println("deployedAksInfraWorkloadIdentity from controller", spew.Sprintf("%+v", &deployedAksInfraWorkloadIdentity)) + + return &deployedAksInfraWorkloadIdentity, nil + +} + +func listAksWorkloadIdentity(ctx context.Context, clusterName, project string) (*infrapb.AksWorkloadIdentityList, error) { + auth := config.GetConfig().GetAppAuthProfile() + client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) + if err != nil { + return nil, err + } + + extResponse, err := client.InfraV3().Cluster().ExtApi().ListAksWorkloadIdentities(ctx, options.ExtOptions{ + Name: clusterName, + Project: project, + }) + if err != nil { + return nil, err + } + + var aksInfraWorkloadIdentityList infrapb.AksWorkloadIdentityList + if err = json.Unmarshal(extResponse.Body, &aksInfraWorkloadIdentityList); err != nil { + return nil, err + } + + log.Println("aksInfraWorkloadIdentityList from controller", spew.Sprintf("%+v", &aksInfraWorkloadIdentityList)) + + return &aksInfraWorkloadIdentityList, nil + +} + +func resourceAKSWorkloadIdentityUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Println("update aks workload identity") + + return resourceAKSWorkloadIdentityUpsert(ctx, d) +} + +func resourceAKSWorkloadIdentityDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + log.Println("delete ak workload identity") + + tflog := os.Getenv("TF_LOG") + if tflog == "TRACE" || tflog == "DEBUG" { + ctx = context.WithValue(ctx, "debug", "true") + } + + desiredInfraAksWorkloadIdentity, err := expandAksWorkloadIdentity(d) + if err != nil { + return diag.FromErr(err) + } + + wiName := desiredInfraAksWorkloadIdentity.Spec.Metadata.Name + wiClusterName := desiredInfraAksWorkloadIdentity.Metadata.Clustername + wiProjectName := desiredInfraAksWorkloadIdentity.Metadata.Project + + log.Printf("deleting workload identity: %s for edgename: %s and projectname: %s", wiName, wiClusterName, wiProjectName) + + auth := config.GetConfig().GetAppAuthProfile() + client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) + if err != nil { + return diag.FromErr(err) + } + + _, err = client.InfraV3().Cluster().ExtApi().DeleteAksWorkloadIdentity(ctx, options.ExtOptions{ + Name: wiClusterName, + Project: wiProjectName, + UrlParams: map[string]string{ + "identity_name": wiName, + }, + }) + if err != nil { + return diag.FromErr(err) + } + + ticker := time.NewTicker(time.Duration(60) * time.Second) + defer ticker.Stop() + +LOOP: + for { + select { + case <-ctx.Done(): + log.Printf("workload identity deletion %s timed out", wiName) + return diag.FromErr(fmt.Errorf("workload identity deletion %s timed out", wiName)) + + case <-ticker.C: + aksWorkloadIdentityList, err := listAksWorkloadIdentity(ctx, wiClusterName, wiProjectName) + if err != nil { + return diag.FromErr(err) + } + + for _, aksWorkloadIdentity := range aksWorkloadIdentityList.Items { + if aksWorkloadIdentity.Spec.Metadata.Name == wiName { + log.Printf("workload identity %s deletion in progress", wiName) + continue LOOP + } + } + + log.Printf("workload identity %s deletion complete", wiName) + break LOOP + } + } + + return diags +} + +func resourceAKSWorkloadIdentityUpsert(ctx context.Context, d *schema.ResourceData) diag.Diagnostics { + log.Println("upsert aks workload identity") + + var diags diag.Diagnostics + + tflog := os.Getenv("TF_LOG") + if tflog == "TRACE" || tflog == "DEBUG" { + ctx = context.WithValue(ctx, "debug", "true") + } + + if d.State() != nil && d.State().ID != "" { + n := GetMetaName(d) + if n != "" && n != d.State().ID { + log.Printf("metadata name change not supported") + d.State().Tainted = true + return diag.FromErr(fmt.Errorf("%s", "metadata name change not supported")) + } + } + + desiredInfraAksWorkloadIdentity, err := expandAksWorkloadIdentity(d) + if err != nil { + log.Println("error in expanding aks workload identity", err) + return diag.FromErr(err) + } + + wiName := desiredInfraAksWorkloadIdentity.Spec.Metadata.Name + wiClusterName := desiredInfraAksWorkloadIdentity.Metadata.Clustername + wiProjectName := desiredInfraAksWorkloadIdentity.Metadata.Project + + log.Printf("upserting workload identity: %s for edgename: %s and projectname: %s", wiName, wiClusterName, wiProjectName) + + auth := config.GetConfig().GetAppAuthProfile() + client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) + if err != nil { + return diag.FromErr(err) + } + + desiredInfraAksWorkloadIdentityBytes, err := json.Marshal(desiredInfraAksWorkloadIdentity) + if err != nil { + return diag.FromErr(err) + } + + response, err := client.InfraV3().Cluster().ExtApi().ApplyAksWorkloadIdentity(ctx, options.ExtOptions{ + Name: wiClusterName, + Project: wiProjectName, + Body: desiredInfraAksWorkloadIdentityBytes, + }) + if err != nil { + log.Println("workload identity apply error", err) + return diag.FromErr(err) + } + + var applyAksWorkloadIdentityResponse infrapb.ApplyAksWorkloadIdentityResponse + if err = json.Unmarshal(response.Body, &applyAksWorkloadIdentityResponse); err != nil { + return diag.FromErr(err) + } + + log.Println("applyAksWorkloadIdentityResponse from controller", spew.Sprintf("%+v", &applyAksWorkloadIdentityResponse)) + + if applyAksWorkloadIdentityResponse.TasksetId == "0lk5wke" { + log.Println("Taskset ID is 0, implies no-op. Workload Identity already exists") + d.SetId(wiName) + return diags + } + + ticker := time.NewTicker(time.Duration(5) * time.Second) + defer ticker.Stop() + +LOOP: + for { + select { + case <-ctx.Done(): + log.Printf("workload identity %s operation timed out", wiName) + return diag.FromErr(fmt.Errorf("workload identity %s operation timed out", wiName)) + case <-ticker.C: + statusCluster, err := client.InfraV3().Cluster().Status(ctx, options.StatusOptions{ + Name: wiClusterName, + Project: wiProjectName, + }) + if err != nil { + log.Println("error in getting cluster status", err) + return diag.FromErr(err) + } + + if len(statusCluster.Status.LastTasksets) == 0 { + log.Printf("workload identity %s operation not started", wiName) + continue + } + + for _, taskset := range statusCluster.Status.LastTasksets { + if taskset.TasksetId != applyAksWorkloadIdentityResponse.TasksetId { + continue + } + + tasksetStatus := taskset.TasksetStatus + + switch tasksetStatus { + case "PROVISION_TASKSET_STATUS_COMPLETE": + log.Printf("workload identity %s operation completed", wiName) + break LOOP + + case "PROVISION_TASKSET_STATUS_FAILED": + log.Printf("workload identity %s operation failed", wiName) + + if statusCluster.Status.Aks == nil { + return diag.FromErr(fmt.Errorf("workload identity %s operation failed", wiName)) + } + + msg, err := collectAKSV3UpsertEdgeResourceErrors(desiredInfraAksWorkloadIdentity, statusCluster.Status.Aks.EdgeResources) + if err != nil { + return diag.FromErr(err) + } + return diag.Errorf("workload identity %s operation failed with errors: %s", wiName, msg) + + case "PROVISION_TASKSET_STATUS_IN_PROGRESS", "PROVISION_TASKSET_STATUS_PENDING": + log.Printf("workload identity %s operation", wiName) + } + } + } + } + + d.SetId(wiName) + return diags +} + +func collectAKSV3UpsertEdgeResourceErrors(desiredInfraAksWorkloadIdentity *infrapb.AksWorkloadIdentity, edgeResources []*infrapb.EdgeResourceStatus) (string, error) { + raSet := make(map[string]struct{}) + for _, ra := range desiredInfraAksWorkloadIdentity.Spec.RoleAssignments { + raSet[ra.Name] = struct{}{} + } + + saSet := make(map[string]struct{}) + for _, sa := range desiredInfraAksWorkloadIdentity.Spec.ServiceAccounts { + saSet[sa.Metadata.Name] = struct{}{} + } + + var found bool + collectedErrors := AksUpsertErrorFormatter{} + + collectedErrors.WorkloadIdentities = []AksWorkloadIdentityErrorFormatter{} + for _, er := range edgeResources { + if er.EdgeResourceType != "AksWorkloadIdentity" && er.Name != desiredInfraAksWorkloadIdentity.Spec.Metadata.Name { + continue + } + found = true + + if strings.Contains(er.ProvisionStatus, "FAILED") { + collectedErrors.WorkloadIdentities = append(collectedErrors.WorkloadIdentities, AksWorkloadIdentityErrorFormatter{ + Name: er.Name, + FailureReason: er.ProvisionStatusReason, + }) + } + + for _, ra := range er.AksWorkloadIdentityStatus.RoleAssignmentsStatus { + if _, found := raSet[ra.Name]; !found { + continue + } + if strings.Contains(ra.ProvisionStatus, "FAILED") { + collectedErrors.WorkloadIdentities = append(collectedErrors.WorkloadIdentities, AksWorkloadIdentityErrorFormatter{ + Name: er.Name, + FailureReason: ra.ProvisionStatusReason, + }) + } + } + + for _, sa := range er.AksWorkloadIdentityStatus.ServiceAccountsStatus { + if _, found := saSet[sa.Name]; !found { + continue + } + if strings.Contains(sa.ProvisionStatus, "FAILED") { + collectedErrors.WorkloadIdentities = append(collectedErrors.WorkloadIdentities, AksWorkloadIdentityErrorFormatter{ + Name: er.Name, + FailureReason: sa.ProvisionStatusReason, + }) + } + } + } + + if !found { + collectedErrors.WorkloadIdentities = append(collectedErrors.WorkloadIdentities, AksWorkloadIdentityErrorFormatter{ + Name: desiredInfraAksWorkloadIdentity.Spec.Metadata.Name, + FailureReason: "workload identity not found in the cluster", + }) + } + + collectedErrsFormattedBytes, err := json.MarshalIndent(collectedErrors, "", " ") + if err != nil { + return "", err + } + collectErrs := strings.ReplaceAll(string(collectedErrsFormattedBytes), "\\n", "\n") + + fmt.Println("after MarshalIndent: ", "collectErrs", collectErrs) + return "\n" + collectErrs, nil +} + +func expandAksWorkloadIdentity(in *schema.ResourceData) (*infrapb.AksWorkloadIdentity, error) { + if in == nil { + return nil, fmt.Errorf("%s", "expand empty aks workload identity input") + } + obj := &infrapb.AksWorkloadIdentity{} + + if v, ok := in.Get("metadata").([]interface{}); ok && len(v) > 0 { + obj.Metadata = expandAksWorkloadIdentityMetadata(v) + } + + if v, ok := in.Get("spec").([]interface{}); ok && len(v) > 0 { + obj.Spec = expandAksWorkloadIdentitySpec(v) + } + + log.Println("desiredInfraAksWorkloadIdentity from expandAksWorkloadIdentity", spew.Sprintf("%+v", obj)) + + return obj, nil + +} + +func expandAksWorkloadIdentityMetadata(p []interface{}) *infrapb.AksWorkloadIdentityMetadata { + obj := infrapb.AksWorkloadIdentityMetadata{} + + if len(p) == 0 || p[0] == nil { + return &obj + } + + m := p[0].(map[string]interface{}) + + if v, ok := m["name"].(string); ok && v != "" { + obj.Name = v + } + + if v, ok := m["project"].(string); ok && v != "" { + obj.Project = v + } + + if v, ok := m["cluster_name"].(string); ok && v != "" { + obj.Clustername = v + } + + return &obj +} + +func expandAksWorkloadIdentitySpec(p []interface{}) *infrapb.AksWorkloadIdentitySpec { + obj := &infrapb.AksWorkloadIdentitySpec{} + + if len(p) == 0 || p[0] == nil { + return nil + } + + m := p[0].(map[string]interface{}) + + if v, ok := m["create_identity"].(bool); ok { + obj.CreateIdentity = v + } + + if v, ok := m["metadata"].([]interface{}); ok && len(v) > 0 { + obj.Metadata = expandAzureWorkloadIdentityMetadata(v) + } + + if v, ok := m["role_assignments"].([]interface{}); ok && len(v) > 0 { + obj.RoleAssignments = expandRoleAssignments(v) + } + + if v, ok := m["service_accounts"].([]interface{}); ok && len(v) > 0 { + obj.ServiceAccounts = expandServiceAccounts(v) + } + + return obj +} + +func expandAzureWorkloadIdentityMetadata(p []interface{}) *infrapb.AzureWorkloadIdentityMetadata { + obj := infrapb.AzureWorkloadIdentityMetadata{} + + if len(p) == 0 || p[0] == nil { + return &obj + } + + m := p[0].(map[string]interface{}) + + if v, ok := m["client_id"].(string); ok && v != "" { + obj.ClientId = v + } + + if v, ok := m["principal_id"].(string); ok && v != "" { + obj.PrincipalId = v + } + + if v, ok := m["name"].(string); ok && v != "" { + obj.Name = v + } + + if v, ok := m["location"].(string); ok && v != "" { + obj.Location = v + } + + if v, ok := m["resource_group"].(string); ok && v != "" { + obj.ResourceGroup = v + } + + if v, ok := m["tags"].(map[string]interface{}); ok { + obj.Tags = expandTags(v) + } + + return &obj +} + +func expandRoleAssignments(p []interface{}) []*infrapb.AzureWorkloadIdentityRoleAssignment { + var roleAssignments []*infrapb.AzureWorkloadIdentityRoleAssignment + + for _, item := range p { + m := item.(map[string]interface{}) + + roleAssignment := &infrapb.AzureWorkloadIdentityRoleAssignment{} + + if v, ok := m["name"].(string); ok && v != "" { + roleAssignment.Name = v + } + + if v, ok := m["role_definition_id"].(string); ok && v != "" { + roleAssignment.RoleDefinitionId = v + } + + if v, ok := m["scope"].(string); ok && v != "" { + roleAssignment.Scope = v + } + + roleAssignments = append(roleAssignments, roleAssignment) + } + + return roleAssignments +} + +func expandServiceAccounts(p []interface{}) []*infrapb.AzureWorkloadIdentityK8SServiceAccount { + var serviceAccounts []*infrapb.AzureWorkloadIdentityK8SServiceAccount + + for _, item := range p { + m := item.(map[string]interface{}) + + serviceAccount := &infrapb.AzureWorkloadIdentityK8SServiceAccount{} + + if v, ok := m["metadata"].([]interface{}); ok && len(v) > 0 { + serviceAccount.Metadata = expandServiceAccountMetadata(v) + } + + if v, ok := m["create_account"].(bool); ok { + serviceAccount.CreateAccount = v + } + + serviceAccounts = append(serviceAccounts, serviceAccount) + } + + return serviceAccounts +} + +func expandServiceAccountMetadata(p []interface{}) *infrapb.K8SServiceAccountMetadata { + obj := infrapb.K8SServiceAccountMetadata{} + + if len(p) == 0 || p[0] == nil { + return &obj + } + + m := p[0].(map[string]interface{}) + + if v, ok := m["name"].(string); ok && v != "" { + obj.Name = v + } + + if v, ok := m["namespace"].(string); ok && v != "" { + obj.Namespace = v + } + + if v, ok := m["annotations"].(map[string]interface{}); ok { + obj.Annotations = expandAnnotations(v) + } + + if v, ok := m["labels"].(map[string]interface{}); ok { + obj.Labels = expandLabels(v) + } + + return &obj +} + +func expandTags(m map[string]interface{}) map[string]string { + tags := make(map[string]string) + + for k, v := range m { + if s, ok := v.(string); ok && s != "" { + tags[k] = s + } + } + + return tags +} + +func expandAnnotations(m map[string]interface{}) map[string]string { + annotations := make(map[string]string) + + for k, v := range m { + if s, ok := v.(string); ok && s != "" { + annotations[k] = s + } + } + + return annotations +} + +func expandLabels(m map[string]interface{}) map[string]string { + labels := make(map[string]string) + + for k, v := range m { + if s, ok := v.(string); ok && s != "" { + labels[k] = s + } + } + + return labels +} + +func flattenAksWorkloadIdentity(d *schema.ResourceData, in *infrapb.AksWorkloadIdentity) error { + if in == nil { + return nil + } + + obj := map[string]interface{}{} + + if len(in.ApiVersion) > 0 { + obj["api_version"] = in.ApiVersion + } + if len(in.Kind) > 0 { + obj["kind"] = in.Kind + } + + var metadata []interface{} + if in.Metadata != nil { + metadata = flattenMetadata(in.Metadata) + } + + log.Println("metadata from flattenAksWorkloadIdentity", spew.Sprintf("%+v", metadata)) + + if err := d.Set("metadata", metadata); err != nil { + return err + } + + var spec []interface{} + if in.Spec != nil { + spec = flattenSpec(in.Spec) + } + + log.Println("spec from flattenAksWorkloadIdentity", spew.Sprintf("%+v", spec)) + + if err := d.Set("spec", spec); err != nil { + return err + } + + return nil +} + +func flattenMetadata(in *infrapb.AksWorkloadIdentityMetadata) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.Clustername) > 0 { + obj["cluster_name"] = in.Clustername + } + if len(in.Project) > 0 { + obj["project"] = in.Project + } + // if len(in.Name) > 0 { + // obj["name"] = in.Name + // } + + return []interface{}{obj} +} + +func flattenSpec(in *infrapb.AksWorkloadIdentitySpec) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + obj["create_identity"] = in.CreateIdentity + + var metadata []interface{} + if in.Metadata != nil { + metadata = flattenSpecMetadata(in.Metadata) + } + obj["metadata"] = metadata + + var roleAssignments []interface{} + for _, ra := range in.RoleAssignments { + roleAssignments = append(roleAssignments, flattenRoleAssignment(ra)...) + } + obj["role_assignments"] = roleAssignments + + var serviceAccounts []interface{} + for _, sa := range in.ServiceAccounts { + serviceAccounts = append(serviceAccounts, flattenServiceAccount(sa)...) + } + obj["service_accounts"] = serviceAccounts + + return []interface{}{obj} +} + +func flattenSpecMetadata(in *infrapb.AzureWorkloadIdentityMetadata) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.Name) > 0 { + obj["name"] = in.Name + } + if len(in.Location) > 0 { + obj["location"] = in.Location + } + if len(in.ResourceGroup) > 0 { + obj["resource_group"] = in.ResourceGroup + } + + var tags map[string]interface{} + if in.Tags != nil { + tags = flattenStringMap(in.Tags) + } + obj["tags"] = tags + + return []interface{}{obj} +} + +func flattenRoleAssignment(in *infrapb.AzureWorkloadIdentityRoleAssignment) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.Name) > 0 { + obj["name"] = in.Name + } + if len(in.Scope) > 0 { + obj["scope"] = in.Scope + } + + return []interface{}{obj} +} + +func flattenServiceAccount(in *infrapb.AzureWorkloadIdentityK8SServiceAccount) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + obj["create_account"] = in.CreateAccount + + if in.Metadata != nil { + obj["metadata"] = flattenServiceAccountMetadata(in.Metadata) + } + + return []interface{}{obj} +} + +func flattenServiceAccountMetadata(in *infrapb.K8SServiceAccountMetadata) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + + if len(in.Name) > 0 { + obj["name"] = in.Name + } + if len(in.Namespace) > 0 { + obj["namespace"] = in.Namespace + } + + if in.Annotations != nil { + obj["annotations"] = flattenStringMap(in.Annotations) + } + + if in.Labels != nil { + obj["labels"] = flattenStringMap(in.Labels) + } + + return []interface{}{obj} +} + +func flattenStringMap(in map[string]string) map[string]interface{} { + if in == nil { + return nil + } + + out := make(map[string]interface{}) + for k, v := range in { + out[k] = v + } + + return out +} diff --git a/rafay/resource_breakglassaccess.go b/rafay/resource_breakglassaccess.go new file mode 100644 index 00000000..97a95a78 --- /dev/null +++ b/rafay/resource_breakglassaccess.go @@ -0,0 +1,373 @@ +package rafay + +import ( + "context" + "fmt" + "log" + "os" + "time" + + "github.com/RafaySystems/rctl/pkg/config" + + "github.com/RafaySystems/rafay-common/pkg/hub/client/options" + typed "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" + "github.com/RafaySystems/rafay-common/pkg/hub/terraform/resource" + "github.com/RafaySystems/rafay-common/proto/types/hub/commonpb" + "github.com/RafaySystems/rafay-common/proto/types/hub/systempb" + "github.com/RafaySystems/rctl/pkg/versioninfo" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceBreakGlassAccess() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBreakGlassAccessCreate, + ReadContext: resourceBreakGlassAccessRead, + UpdateContext: resourceBreakGlassAccessUpdate, + DeleteContext: resourceBreakGlassAccessDelete, + Importer: &schema.ResourceImporter{ + State: resourceBreakGlassAccessImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + SchemaVersion: 1, + Schema: resource.BreakGlassAccessSchema.Schema, + } +} + +func resourceBreakGlassAccessImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + if d.Id() == "" { + return nil, fmt.Errorf("username not provided, usage e.g terraform import rafay_breakglassaccess.resource ") + } + + username := d.Id() + + log.Println("Importing break glass access for user: ", username) + + breakGlassAccess, err := expandBreakGlassAccess(d) + if err != nil { + log.Printf("breakGlassAccess expandBreakGlassAccess error") + return nil, err + } + + var metaD commonpb.Metadata + metaD.Name = username + breakGlassAccess.Metadata = &metaD + + err = d.Set("metadata", flattenMetaData(breakGlassAccess.Metadata)) + if err != nil { + return nil, err + } + + d.SetId(username) + + return []*schema.ResourceData{d}, nil +} + +func resourceBreakGlassAccessCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Println("break glass access create") + diags := resourceBreakGlassAccessUpsert(ctx, d, m) + if diags.HasError() { + tflog := os.Getenv("TF_LOG") + if tflog == "TRACE" || tflog == "DEBUG" { + ctx = context.WithValue(ctx, "debug", "true") + } + zr, err := expandBreakGlassAccess(d) + if err != nil { + return diags + } + auth := config.GetConfig().GetAppAuthProfile() + client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) + if err != nil { + return diags + } + + err = client.SystemV3().BreakGlassAccess().Delete(ctx, options.DeleteOptions{ + Name: zr.Metadata.Name, + }) + if err != nil { + return diags + } + } + return diags +} + +func resourceBreakGlassAccessUpsert(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + log.Printf("break glass access upsert starts") + tflog := os.Getenv("TF_LOG") + if tflog == "TRACE" || tflog == "DEBUG" { + ctx = context.WithValue(ctx, "debug", "true") + } + + if d.State() != nil && d.State().ID != "" { + n := GetMetaName(d) + if n != "" && n != d.State().ID { + log.Printf("metadata name change not supported") + d.State().Tainted = true + return diag.FromErr(fmt.Errorf("%s", "metadata name change not supported")) + } + } + + tus, err := expandBreakGlassAccess(d) + if err != nil { + return diag.FromErr(err) + } + + auth := config.GetConfig().GetAppAuthProfile() + client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) + if err != nil { + return diag.FromErr(err) + } + + err = client.SystemV3().BreakGlassAccess().Apply(ctx, tus, options.ApplyOptions{}) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(tus.Metadata.Name) + return diags +} + +func resourceBreakGlassAccessRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + log.Println("resource break glass access ") + + meta := GetMetaData(d) + if meta == nil { + return diag.FromErr(fmt.Errorf("%s", "failed to read resource ")) + } + if d.State() != nil && d.State().ID != "" { + meta.Name = d.State().ID + } + + auth := config.GetConfig().GetAppAuthProfile() + client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) + if err != nil { + log.Println("read client err") + return diag.FromErr(err) + } + + ac, err := client.SystemV3().BreakGlassAccess().Get(ctx, options.GetOptions{ + Name: meta.Name, + }) + if err != nil { + log.Println("read get err") + return diag.FromErr(err) + } + + err = flattenBreakGlassAccess(d, ac) + if err != nil { + log.Println("read flatten err") + return diag.FromErr(err) + } + return diags +} + +func resourceBreakGlassAccessUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + return resourceBreakGlassAccessUpsert(ctx, d, m) +} + +func resourceBreakGlassAccessDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + tflog := os.Getenv("TF_LOG") + if tflog == "TRACE" || tflog == "DEBUG" { + ctx = context.WithValue(ctx, "debug", "true") + } + + tus, err := expandBreakGlassAccess(d) + if err != nil { + return diag.FromErr(err) + } + + auth := config.GetConfig().GetAppAuthProfile() + client, err := typed.NewClientWithUserAgent(auth.URL, auth.Key, versioninfo.GetUserAgent(), options.WithInsecureSkipVerify(auth.SkipServerCertValid)) + if err != nil { + return diag.FromErr(err) + } + + err = client.SystemV3().BreakGlassAccess().Delete(ctx, options.DeleteOptions{ + Name: tus.Metadata.Name, + }) + + if err != nil { + log.Println("break glass access delete error") + return diag.FromErr(err) + } + + return diags +} + +func expandBreakGlassAccess(in *schema.ResourceData) (*systempb.BreakGlassAccess, error) { + log.Println("expand break glass access") + if in == nil { + return nil, fmt.Errorf("%s", "expand BreakGlassAccess empty input") + } + obj := &systempb.BreakGlassAccess{} + + if v, ok := in.Get("metadata").([]interface{}); ok { + obj.Metadata = expandMetaData(v) + } + + if v, ok := in.Get("spec").([]interface{}); ok && len(v) > 0 { + objSpec, err := expandBreakGlassAccessSpec(v) + if err != nil { + return nil, err + } + obj.Spec = objSpec + } + + obj.ApiVersion = "system.k8smgmt.io/v3" + obj.Kind = "BreakGlassAccess" + + return obj, nil +} + +func expandBreakGlassAccessSpec(p []interface{}) (*systempb.BreakGlassAccessSpec, error) { + obj := &systempb.BreakGlassAccessSpec{} + if len(p) == 0 || p[0] == nil { + return obj, fmt.Errorf("expandBreakGlassAccessSpec empty input") + } + + in := p[0].(map[string]interface{}) + + if v, ok := in["groups"].([]interface{}); ok && len(v) > 0 { + obj.Groups = expandGroups(v) + } + + return obj, nil +} + +func expandGroups(p []interface{}) []*systempb.GroupSpec { + groups := make([]*systempb.GroupSpec, len(p)) + for i, group := range p { + groupMap := group.(map[string]interface{}) + g := &systempb.GroupSpec{} + + if v, ok := groupMap["user_type"].(string); ok && len(v) > 0 { + g.UserType = v + } + + if v, ok := groupMap["group_expiry"].([]interface{}); ok && len(v) > 0 { + g.GroupExpiry = expandGroupExpiry(v) + } + + groups[i] = g + } + return groups +} + +func expandGroupExpiry(p []interface{}) []*systempb.GroupExpiryDetails { + groupExpiries := make([]*systempb.GroupExpiryDetails, len(p)) + for i, expiry := range p { + expiryMap := expiry.(map[string]interface{}) + ge := &systempb.GroupExpiryDetails{} + + if v, ok := expiryMap["expiry"].(int); ok { + ge.Expiry = float64(v) + } + + if v, ok := expiryMap["name"].(string); ok && len(v) > 0 { + ge.Name = v + } + + if v, ok := expiryMap["start_time"].(string); ok && len(v) > 0 { + ge.StartTime = v + } + groupExpiries[i] = ge + } + return groupExpiries +} + +// Flatteners +func flattenBreakGlassAccess(d *schema.ResourceData, in *systempb.BreakGlassAccess) error { + if in == nil { + return nil + } + + err := d.Set("metadata", flattenMetaData(in.Metadata)) + if err != nil { + log.Println("flatten metadata err") + return err + } + + v, ok := d.Get("spec").([]interface{}) + if !ok { + v = []interface{}{} + } + + var ret []interface{} + ret, err = flattenBreakGlassAccessSpec(in.Spec, v) + if err != nil { + log.Println("flatten break glass access spec err") + return err + } + + err = d.Set("spec", ret) + if err != nil { + log.Println("set spec err") + return err + } + return nil +} + +func flattenBreakGlassAccessSpec(in *systempb.BreakGlassAccessSpec, p []interface{}) ([]interface{}, error) { + if in == nil { + return nil, fmt.Errorf("flattenBreakGlassAccessSpec empty input") + } + + obj := map[string]interface{}{} + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + if in.Groups != nil { + obj["groups"] = flattenGroups(in.Groups) + } + + return []interface{}{obj}, nil +} + +func flattenGroups(groups []*systempb.GroupSpec) []interface{} { + flattenedGroups := make([]interface{}, len(groups)) + for i, group := range groups { + groupMap := map[string]interface{}{} + + if len(group.UserType) > 0 { + groupMap["user_type"] = group.UserType + } + + if group.GroupExpiry != nil { + groupMap["group_expiry"] = flattenGroupExpiry(group.GroupExpiry) + } + + flattenedGroups[i] = groupMap + } + return flattenedGroups +} + +func flattenGroupExpiry(groupExpiries []*systempb.GroupExpiryDetails) []interface{} { + flattenedGroupExpiries := make([]interface{}, len(groupExpiries)) + for i, expiry := range groupExpiries { + expiryMap := map[string]interface{}{} + + expiryMap["expiry"] = int(expiry.Expiry) + + if len(expiry.Name) > 0 { + expiryMap["name"] = expiry.Name + } + + expiryMap["start_time"] = expiry.StartTime + + flattenedGroupExpiries[i] = expiryMap + } + return flattenedGroupExpiries +} diff --git a/rafay/resource_configcontext.go b/rafay/resource_configcontext.go index ea6515b4..f2e27afa 100644 --- a/rafay/resource_configcontext.go +++ b/rafay/resource_configcontext.go @@ -274,31 +274,29 @@ func expandEnvVariables(p []interface{}) []*eaaspb.EnvData { func expandConfigContextCompoundRefs(p []interface{}) []*eaaspb.ConfigContextCompoundRef { var ccs []*eaaspb.ConfigContextCompoundRef - if len(p) == 0 || p[0] == nil { + if len(p) == 0 { return ccs } for i := range p { - cc := expandConfigContextCompoundRef(p[i].([]interface{})) + cc := expandConfigContextCompoundRef(p[i].(map[string]any)) ccs = append(ccs, cc) } return ccs } -func expandConfigContextCompoundRef(p []interface{}) *eaaspb.ConfigContextCompoundRef { +func expandConfigContextCompoundRef(p map[string]any) *eaaspb.ConfigContextCompoundRef { cc := &eaaspb.ConfigContextCompoundRef{} - if len(p) == 0 || p[0] == nil { + if len(p) == 0 { return cc } - in := p[0].(map[string]interface{}) - - if v, ok := in["name"].(string); ok && len(v) > 0 { + if v, ok := p["name"].(string); ok && len(v) > 0 { cc.Name = v } - if v, ok := in["data"].([]interface{}); ok && len(v) > 0 { + if v, ok := p["data"].([]interface{}); ok && len(v) > 0 { cc.Data = expandConfigContextInline(v) } @@ -436,38 +434,32 @@ func flattenConfigContextCompoundRefs(input []*eaaspb.ConfigContextCompoundRef) return ccs } -func flattenConfigContextCompoundRef(input *eaaspb.ConfigContextCompoundRef) []interface{} { - cc := make(map[string]interface{}) +func flattenConfigContextCompoundRef(input *eaaspb.ConfigContextCompoundRef) map[string]any { if input == nil { - return []interface{}{cc} + return nil } + cc := make(map[string]any) if len(input.Name) > 0 { cc["name"] = input.Name } cc["data"] = flattenConfigContextInline(input.Data) - return []interface{}{cc} + return cc } func flattenConfigContextInline(input *eaaspb.ConfigContextInline) []interface{} { - cc := make(map[string]interface{}) if input == nil { - return []interface{}{cc} - } - - if len(input.Envs) > 0 { - cc["envs"] = flattenEnvVariables(input.Envs, nil) - } - if len(input.Files) > 0 { - cc["files"] = flattenCommonpbFiles(input.Files) + return nil } - if len(input.Variables) > 0 { - cc["variables"] = flattenVariables(input.Variables, nil) + return []any{ + map[string]any{ + "envs": flattenEnvVariables(input.Envs, nil), + "files": flattenCommonpbFiles(input.Files), + "variables": flattenVariables(input.Variables, nil), + }, } - - return []interface{}{cc} } func resourceConfigContextImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { diff --git a/rafay/resource_cost_profile.go b/rafay/resource_cost_profile.go index 499cc0a1..ab304f77 100644 --- a/rafay/resource_cost_profile.go +++ b/rafay/resource_cost_profile.go @@ -267,6 +267,11 @@ func expandCostProfileIP(p []interface{}) *costpb.InstallationParams { obj.Azure = expandCostProfileAzureCostProfile(v) } } + if _, ok := in["gcp"]; ok { + if v, ok := in["gcp"].([]interface{}); ok && len(v) > 0 { + obj.Gcp = expandCostProfileGcpCostProfile(v) + } + } if v, ok := in["other"].([]interface{}); ok && len(v) > 0 { obj.Other = expandCostProfileOtherCostProfile(v) @@ -275,7 +280,20 @@ func expandCostProfileIP(p []interface{}) *costpb.InstallationParams { return obj } +func expandCostProfileGcpCostProfile(p []interface{}) *costpb.GcpCostProfile { + obj := &costpb.GcpCostProfile{} + if len(p) == 0 || p[0] == nil { + return obj + } + + in := p[0].(map[string]interface{}) + if v, ok := in["gcp_credentials"].([]interface{}); ok && len(v) > 0 { + obj.GcpCredentials = expandCostProfileGcpCredentials(v) + } + return obj + +} func expandCostProfileAwsCostProfile(p []interface{}) *costpb.AwsCostProfile { obj := &costpb.AwsCostProfile{} if len(p) == 0 || p[0] == nil { @@ -325,6 +343,20 @@ func expandCostProfileAwsCredentials(p []interface{}) *costpb.AwsCredsCostProfil } +func expandCostProfileGcpCredentials(p []interface{}) *costpb.GcpCredsCostProfile{ + obj := &costpb.GcpCredsCostProfile{} + if len(p) == 0 || p[0] == nil { + return obj + } + + in := p[0].(map[string]interface{}) + if v, ok := in["cloud_credentials_name"].(string); ok && len(v) > 0 { + obj.CloudCredentialsName = v + } + + return obj +} + func expandCostProfileAwsCurIntegration(p []interface{}) *costpb.AwsCurIntegration { obj := &costpb.AwsCurIntegration{} if len(p) == 0 || p[0] == nil { @@ -601,6 +633,14 @@ func flattenCostProfileSpecIP(in *costpb.InstallationParams, p []interface{}) [] obj["other"] = flattenCostProfileOtherIP(in.Other, v) } + if in.Gcp != nil { + v, ok := obj["gcp"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["gcp"] = flattenCostProfileGcpIP(in.Gcp, v) + } + return []interface{}{obj} } @@ -640,7 +680,41 @@ func flattenCostProfileAwsIP(in *costpb.AwsCostProfile, p []interface{}) []inter return []interface{}{obj} } +func flattenCostProfileGcpIP(in *costpb.GcpCostProfile, p []interface{}) []interface{} { + if in == nil { + return nil + } + + obj := map[string]interface{}{} + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + if in.GcpCredentials != nil { + v, ok := obj["gcp_credentials"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["gcp_credentials"] = flattenCostProfileGcpCredentials(in.GcpCredentials, v) + } + return []interface{}{obj} +} +func flattenCostProfileGcpCredentials(in *costpb.GcpCredsCostProfile, p []interface{}) []interface{} { + if in == nil { + return nil + } + + obj := map[string]interface{}{} + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + if len(in.CloudCredentialsName) > 0 { + obj["cloud_credentials_name"] = in.CloudCredentialsName + } + return []interface{}{obj} + +} func flattenCostProfileAwsCredentials(in *costpb.AwsCredsCostProfile, p []interface{}) []interface{} { if in == nil { return nil diff --git a/rafay/resource_driver.go b/rafay/resource_driver.go index f8547c96..4b132108 100644 --- a/rafay/resource_driver.go +++ b/rafay/resource_driver.go @@ -9,13 +9,14 @@ import ( "time" "github.com/RafaySystems/rafay-common/pkg/hub/client/options" - typed "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" + "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" "github.com/RafaySystems/rafay-common/pkg/hub/terraform/resource" "github.com/RafaySystems/rafay-common/proto/types/hub/commonpb" "github.com/RafaySystems/rafay-common/proto/types/hub/eaaspb" "github.com/RafaySystems/rctl/pkg/config" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/protobuf/types/known/structpb" ) func resourceDriver() *schema.Resource { @@ -227,6 +228,18 @@ func expandDriverSpec(p []interface{}) (*eaaspb.DriverSpec, error) { spec.Sharing = expandSharingSpec(v) } + if v, ok := in["inputs"].([]interface{}); ok && len(v) > 0 { + spec.Inputs = expandConfigContextCompoundRefs(v) + } + + var err error + if v, ok := in["outputs"].(string); ok && len(v) > 0 { + spec.Outputs, err = expandDriverOutputs(v) + if err != nil { + return nil, err + } + } + return spec, nil } @@ -458,42 +471,16 @@ func expandDriverHttpConfig(p []interface{}) *eaaspb.HTTPDriverConfig { return &hc } -func expandDriverCompoundRef(p []interface{}) *eaaspb.DriverCompoundRef { - driver := &eaaspb.DriverCompoundRef{} - if len(p) == 0 || p[0] == nil { - return driver - } - - in := p[0].(map[string]interface{}) - - if v, ok := in["name"].(string); ok && len(v) > 0 { - driver.Name = v - } - - if v, ok := in["data"].([]interface{}); ok && len(v) > 0 { - driver.Data = expandDriverInline(v) - } - - return driver -} - -func expandDriverInline(p []interface{}) *eaaspb.DriverInline { - driver := &eaaspb.DriverInline{} - if len(p) == 0 || p[0] == nil { - return driver - } - - in := p[0].(map[string]interface{}) - - if v, ok := in["config"].([]interface{}); ok && len(v) > 0 { - driver.Config = expandDriverConfig(v) +func expandDriverOutputs(p string) (*structpb.Struct, error) { + if len(p) == 0 { + return nil, nil } - if v, ok := in["inputs"].([]interface{}); ok && len(v) > 0 { - driver.Inputs = expandConfigContextCompoundRefs(v) + var s structpb.Struct + if err := s.UnmarshalJSON([]byte(p)); err != nil { + return nil, err } - - return driver + return &s, nil } // Flatteners @@ -547,9 +534,9 @@ func flattenDriverSpec(in *eaaspb.DriverSpec, p []interface{}) ([]interface{}, e obj["config"] = flattenDriverConfig(in.Config, v) } - obj["sharing"] = flattenSharingSpec(in.Sharing) - + obj["inputs"] = flattenConfigContextCompoundRefs(in.Inputs) + obj["outputs"] = flattenDriverOutputs(in.Outputs) return []interface{}{obj}, nil } @@ -891,6 +878,14 @@ func flattenContainerDriverVolumeOptions(input []*eaaspb.ContainerDriverVolumeOp return out } +func flattenDriverOutputs(in *structpb.Struct) string { + if in == nil { + return "" + } + b, _ := in.MarshalJSON() + return string(b) +} + func resourceDriverImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { log.Printf("Driver Import Starts") @@ -916,33 +911,3 @@ func resourceDriverImport(d *schema.ResourceData, m interface{}) ([]*schema.Reso d.SetId(cc.Metadata.Name) return []*schema.ResourceData{d}, nil } - -func flattenDriverCompoundRef(input *eaaspb.DriverCompoundRef) []interface{} { - log.Println("flatten driver compound ref start") - if input == nil { - return nil - } - obj := map[string]interface{}{} - if len(input.Name) > 0 { - obj["name"] = input.Name - } - if input.Data != nil { - obj["data"] = flattenDriverInline(input.Data) - } - return []interface{}{obj} -} - -func flattenDriverInline(input *eaaspb.DriverInline) []interface{} { - log.Println("flatten driver inline start") - if input == nil { - return nil - } - obj := map[string]interface{}{} - if input.Config != nil { - obj["config"] = flattenDriverConfig(input.Config, []interface{}{}) - } - if len(input.Inputs) > 0 { - obj["inputs"] = flattenConfigContextCompoundRefs(input.Inputs) - } - return []interface{}{obj} -} diff --git a/rafay/resource_eks_cluster.go b/rafay/resource_eks_cluster.go index 99216813..fbbcae23 100644 --- a/rafay/resource_eks_cluster.go +++ b/rafay/resource_eks_cluster.go @@ -498,6 +498,79 @@ func iamFields() map[string]*schema.Schema { Optional: true, Description: "attaches the IAM policy necessary to run the VPC controller in the control plane", }, + "pod_identity_associations": { + Type: schema.TypeList, + Optional: true, + Description: "pod identity associations", + Elem: &schema.Resource{ + Schema: podIdentityAssociationsFields(), + }, + }, + } + return s +} + +func podIdentityAssociationsFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "namespace": { + Type: schema.TypeString, + Required: true, + Description: "namespace of service account", + }, + "service_account_name": { + Type: schema.TypeString, + Required: true, + Description: "name of service account", + }, + "role_arn": { + Type: schema.TypeString, + Optional: true, + Description: "role ARN of AWS role to associate with service account", + }, + "create_service_account": { + Type: schema.TypeBool, + Optional: true, + Description: "enable flag to create service account", + }, + "role_name": { + Type: schema.TypeString, + Optional: true, + Description: "aws role name to associate", + }, + "permission_boundary_arn": { + Type: schema.TypeString, + Optional: true, + Description: "permission boundary ARN", + }, + "permission_policy_arns": { + Type: schema.TypeList, + Optional: true, + Description: "permission policy ARNs", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "permission_policy": { + Type: schema.TypeString, + Optional: true, + Description: "permission policy document", + }, + "well_known_policies": { + Type: schema.TypeList, + Optional: true, + Description: "for attaching common IAM policies", + Elem: &schema.Resource{ + Schema: serviceAccountsWellKnownPolicyFields(), + }, + }, + "tags": { + Type: schema.TypeMap, + Optional: true, + Description: "AWS tags for the service account", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, } return s } @@ -3668,6 +3741,10 @@ func expandIAMFields(p []interface{}) *EKSClusterIAM { obj.VPCResourceControllerPolicy = &v } + if v, ok := in["pod_identity_associations"].([]interface{}); ok && len(v) > 0 { + obj.PodIdentityAssociations = expandIAMPodIdentityAssociationsConfig(v) + } + return obj } @@ -3695,6 +3772,49 @@ func expandServiceAccountsMetadata(p []interface{}) *EKSClusterIAMMeta { return obj } +func expandIAMPodIdentityAssociationsConfig(p []interface{}) []*IAMPodIdentityAssociation { + out := make([]*IAMPodIdentityAssociation, len(p)) + if len(p) == 0 || p[0] == nil { + return out + } + for i := range p { + obj := &IAMPodIdentityAssociation{} + in := p[i].(map[string]interface{}) + if v, ok := in["namespace"].(string); ok && len(v) > 0 { + obj.Namespace = v + } + if v, ok := in["service_account_name"].(string); ok && len(v) > 0 { + obj.ServiceAccountName = v + } + if v, ok := in["role_arn"].(string); ok && len(v) > 0 { + obj.RoleARN = v + } + if v, ok := in["create_service_account"].(bool); ok { + obj.CreateServiceAccount = &v + } + if v, ok := in["role_name"].(string); ok && len(v) > 0 { + obj.RoleName = v + } + if v, ok := in["permission_boundary_arn"].(string); ok && len(v) > 0 { + obj.PermissionsBoundaryARN = v + } + if v, ok := in["permission_policy"].(map[string]interface{}); ok && len(v) > 0 { + obj.PermissionPolicy = v + } + if v, ok := in["permission_policy_arns"].([]interface{}); ok && len(v) > 0 { + obj.PermissionPolicyARNs = toArrayString(v) + } + if v, ok := in["well_known_policies"].([]interface{}); ok && len(v) > 0 { + obj.WellKnownPolicies = expandIAMWellKnownPolicies(v) + } + if v, ok := in["tags"].(map[string]interface{}); ok && len(v) > 0 { + obj.Tags = toMapString(v) + } + out[i] = obj + } + return out +} + func expandIAMServiceAccountsConfig(p []interface{}) []*EKSClusterIAMServiceAccount { out := make([]*EKSClusterIAMServiceAccount, len(p)) if len(p) == 0 || p[0] == nil { @@ -4541,6 +4661,14 @@ func flattenEKSClusterIAM(in *EKSClusterIAM, rawState cty.Value, p []interface{} obj["service_accounts"] = flattenIAMServiceAccounts(in.ServiceAccounts, nRawState, v) } + if in.PodIdentityAssociations != nil { + v, ok := obj["pod_identity_associations"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["pod_identity_associations"] = flattenIAMPodIdentityAssociations(in.PodIdentityAssociations, v) + } + obj["vpc_resource_controller_policy"] = in.VPCResourceControllerPolicy return []interface{}{obj}, nil @@ -4612,6 +4740,57 @@ func flattenSingleIAMServiceAccount(in *EKSClusterIAMServiceAccount) map[string] return obj } +func flattenIAMPodIdentityAssociations(inp []*IAMPodIdentityAssociation, p []interface{}) []interface{} { + if inp == nil { + return nil + } + out := make([]interface{}, len(inp)) + for i, in := range inp { + obj := map[string]interface{}{} + if i < len(p) && p[i] != nil { + obj = p[i].(map[string]interface{}) + } + if len(in.ServiceAccountName) > 0 { + obj["service_account_name"] = in.ServiceAccountName + } + if len(in.Namespace) > 0 { + obj["namespace"] = in.Namespace + } + if len(in.RoleARN) > 0 { + obj["role_arn"] = in.RoleARN + } + if len(in.RoleName) > 0 { + obj["role_name"] = in.RoleName + } + if len(in.PermissionPolicy) > 0 { + obj["permission_policy"] = in.PermissionPolicy + } + if in.WellKnownPolicies != nil { + v, ok := obj["well_known_policies"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["well_known_policies"] = flattenIAMWellKnownPolicies(in.WellKnownPolicies, v) + } + if in.PermissionPolicyARNs != nil && len(in.PermissionPolicyARNs) > 0 { + obj["permission_policy_arns"] = toArrayInterface(in.PermissionPolicyARNs) + } + if len(in.PermissionsBoundaryARN) > 0 { + obj["permissions_boundary_arn"] = in.PermissionsBoundaryARN + } + if in.Tags != nil && len(in.Tags) > 0 { + obj["tags"] = toMapInterface(in.Tags) + } + // if *in.CreateServiceAccount { + // obj["create_service_account"] = *in.CreateServiceAccount + // } + obj["create_service_account"] = true + + out[i] = obj + } + return out +} + func flattenIAMServiceAccounts(inp []*EKSClusterIAMServiceAccount, rawState cty.Value, p []interface{}) []interface{} { if inp == nil { return nil diff --git a/rafay/resource_eks_pod_identity.go b/rafay/resource_eks_pod_identity.go new file mode 100644 index 00000000..e5190e6e --- /dev/null +++ b/rafay/resource_eks_pod_identity.go @@ -0,0 +1,445 @@ +package rafay + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "os" + "time" + + "github.com/RafaySystems/rctl/pkg/cluster" + "github.com/RafaySystems/rctl/pkg/config" + "github.com/RafaySystems/rctl/pkg/project" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/pkg/errors" +) + +func resourceEKSPodIdentity() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceEksPodIdentityCreate, + ReadContext: resourceEksPodIdentityRead, + UpdateContext: resourceEksPodIdentityUpdate, + DeleteContext: resourceEksPodIdentityDelete, + // Importer: &schema.ResourceImporter{ + // State: resourceEksPodIdentityImport, + // }, + SchemaVersion: 1, + Schema: map[string]*schema.Schema{ + "spec": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: podIdentityAssociationsFields(), + }, + MinItems: 1, + MaxItems: 1, + }, + "metadata": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: metadataFields(), + }, + MinItems: 1, + MaxItems: 1, + }, + }, + // Description: resourceEksPodIdentityDescription, + } +} + +func metadataFields() map[string]*schema.Schema { + s := map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + }, + "project_name": { + Type: schema.TypeString, + Required: true, + }, + } + + return s +} + +func resourceEksPodIdentityCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + return resourceEksPodIdentityUpsert(ctx, d, m) +} + +func resourceEksPodIdentityUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + log.Printf("pod identity update starts") + tflog := os.Getenv("TF_LOG") + if tflog == "TRACE" || tflog == "DEBUG" { + ctx = context.WithValue(ctx, "debug", "true") + } + + var podIdentity []*IAMPodIdentityAssociation + metadata := &Metadata{} + //rawConfig := d.GetRawConfig() + + if v, ok := d.Get("spec").([]interface{}); ok && len(v) > 0 { + podIdentity = expandIAMPodIdentityAssociationsConfig(v) + } else { + return diag.FromErr(fmt.Errorf("spec not specified")) + } + + if len(podIdentity) == 0 { + return diag.FromErr(errors.New("could not get pod identity associations")) + } + + if v, ok := d.Get("metadata").([]interface{}); ok && len(v) > 0 { + in := v[0].(map[string]interface{}) + metadata.clusterName = in["cluster_name"].(string) + metadata.projectName = in["project_name"].(string) + } + + resp, err := project.GetProjectByName(metadata.projectName) + if err != nil { + log.Printf("project does not exist, error %s", err.Error()) + return diag.FromErr(err) + } + p, err := project.NewProjectFromResponse([]byte(resp)) + if err != nil { + return diag.FromErr(err) + } else if p == nil { + d.SetId("") + return diags + } + project_id := p.ID + + cluster_resp, err := cluster.GetCluster(metadata.clusterName, project_id, "terraform") + if err != nil { + log.Printf("imported cluster was not created, error %s", err.Error()) + return diag.FromErr(err) + } + + auth := config.GetConfig().GetAppAuthProfile() + uri := fmt.Sprintf("/edge/v1/projects/%s/edges/%s/podidentity", project_id, cluster_resp.ID) + + log.Printf("payload response : %s", podIdentity[0].Namespace) + + response, err := auth.AuthAndRequest(uri, "PUT", podIdentity) + if err != nil { + return diag.FromErr(err) + } + log.Printf("Update Pod Identity response : %s", response) + + ticker := time.NewTicker(time.Duration(5) * time.Second) + defer ticker.Stop() + + uniqueId := podIdentity[0].ServiceAccountName + "/" + podIdentity[0].Namespace + d.SetId(uniqueId) + +LOOP: + for { + select { + case <-ctx.Done(): + log.Printf("pod identity %s operation timed out", uniqueId) + return diag.FromErr(fmt.Errorf("pod identity %s operation timed out", uniqueId)) + case <-ticker.C: + status, comments, err := getPodIdentityStatus(podIdentity[0], project_id, cluster_resp.ID) + if err != nil { + log.Println("error in getting pod identity status", err) + return diag.FromErr(err) + } + + switch status { + case "POD_IDENTITY_UPDATION_COMPLETE": + log.Printf("pod identity %s operation completed", uniqueId) + break LOOP + + case "POD_IDENTITY_UPDATION_FAILED": + log.Printf("pod identity %s operation failed", uniqueId) + return diag.Errorf("pod identity %s operation failed with errors: %s", uniqueId, comments) + + case "POD_IDENTITY_UPDATION_IN_PROGRESS", "POD_IDENTITY_UPDATION_PENDING": + log.Printf("pod identity %s operation", uniqueId) + + } + } + } + return diags +} + +func resourceEksPodIdentityUpsert(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + log.Printf("pod identity upsert starts") + tflog := os.Getenv("TF_LOG") + if tflog == "TRACE" || tflog == "DEBUG" { + ctx = context.WithValue(ctx, "debug", "true") + } + + var podIdentity []*IAMPodIdentityAssociation + metadata := &Metadata{} + //rawConfig := d.GetRawConfig() + + if v, ok := d.Get("spec").([]interface{}); ok && len(v) > 0 { + podIdentity = expandIAMPodIdentityAssociationsConfig(v) + } else { + return diag.FromErr(fmt.Errorf("spec not specified")) + } + + if len(podIdentity) == 0 { + return diag.FromErr(errors.New("could not get pod identity associations")) + } + + if v, ok := d.Get("metadata").([]interface{}); ok && len(v) > 0 { + in := v[0].(map[string]interface{}) + metadata.clusterName = in["cluster_name"].(string) + metadata.projectName = in["project_name"].(string) + } + + resp, err := project.GetProjectByName(metadata.projectName) + if err != nil { + log.Printf("project does not exist, error %s", err.Error()) + return diag.FromErr(err) + } + p, err := project.NewProjectFromResponse([]byte(resp)) + if err != nil { + return diag.FromErr(err) + } else if p == nil { + d.SetId("") + return diags + } + project_id := p.ID + + cluster_resp, err := cluster.GetCluster(metadata.clusterName, project_id, "terraform") + if err != nil { + log.Printf("imported cluster was not created, error %s", err.Error()) + return diag.FromErr(err) + } + + auth := config.GetConfig().GetAppAuthProfile() + uri := fmt.Sprintf("/edge/v1/projects/%s/edges/%s/podidentity", project_id, cluster_resp.ID) + + log.Printf("payload response : %s", podIdentity[0].Namespace) + + response, err := auth.AuthAndRequest(uri, "POST", podIdentity) + if err != nil { + return diag.FromErr(err) + } + log.Printf("Create Pod Identity response : %s", response) + + ticker := time.NewTicker(time.Duration(5) * time.Second) + defer ticker.Stop() + + uniqueId := podIdentity[0].ServiceAccountName + "/" + podIdentity[0].Namespace + d.SetId(uniqueId) + time.Sleep(5 * time.Second) + +LOOP: + for { + select { + case <-ctx.Done(): + log.Printf("pod identity %s operation timed out", uniqueId) + return diag.FromErr(fmt.Errorf("pod identity %s operation timed out", uniqueId)) + case <-ticker.C: + status, comments, err := getPodIdentityStatus(podIdentity[0], project_id, cluster_resp.ID) + if err != nil { + log.Println("error in getting pod identity status", err) + return diag.FromErr(err) + } + + switch status { + case "POD_IDENTITY_CREATION_COMPLETE": + log.Printf("pod identity %s operation completed", uniqueId) + break LOOP + + case "POD_IDENTITY_CREATION_FAILED": + log.Printf("pod identity %s operation failed", uniqueId) + d.SetId("") + return diag.Errorf("pod identity %s operation failed with errors: %s", uniqueId, comments) + + case "POD_IDENTITY_CREATION_IN_PROGRESS", "POD_IDENTITY_CREATION_PENDING": + log.Printf("pod identity %s operation", uniqueId) + + } + } + } + return diags + +} + +func getPodIdentityStatus(podIdentity *IAMPodIdentityAssociation, projectId, clusterId string) (string, string, error) { + + auth := config.GetConfig().GetAppAuthProfile() + uri := fmt.Sprintf("/edge/v1/projects/%s/edges/%s/podidentity/%s/%s", projectId, clusterId, podIdentity.Namespace, podIdentity.ServiceAccountName) + + response, err := auth.AuthAndRequest(uri, "GET", "") + if err != nil { + return "", "", err + } + log.Printf("Get Pod Identity response : %s", response) + + decoder := json.NewDecoder(bytes.NewReader([]byte(response))) + + piaSpec := []*IAMPodIdentityAssociationOutput{} + + if err := decoder.Decode(&piaSpec); err != nil { + log.Println("error decoding pod identity spec") + return "", "", err + } + + if len(piaSpec) == 0 { + return "", "", err + } + + status := piaSpec[0].Status + comments := piaSpec[0].Comments + + return status, comments, nil +} + +func resourceEksPodIdentityRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + log.Println("READ POD IDENTITY START") + var diags diag.Diagnostics + clusterName, ok := d.Get("metadata.0.cluster_name").(string) + if !ok || clusterName == "" { + log.Print("Cluster name unable to be found") + return diag.FromErr(fmt.Errorf("%s", "cluster name is missing")) + } + projectName, ok := d.Get("metadata.0.project_name").(string) + if !ok || projectName == "" { + log.Print("Cluster project name unable to be found") + return diag.FromErr(fmt.Errorf("%s", "project name is missing")) + } + project_id, edge_id, err := getIdFromName(clusterName, projectName) + if err != nil { + return diag.FromErr(err) + } + + svc_name, ok := d.Get("spec.0.service_account_name").(string) + if !ok || svc_name == "" { + log.Print("Svc name unable to be found") + return diag.FromErr(fmt.Errorf("%s", "service account name is missing")) + } + namespace, ok := d.Get("spec.0.namespace").(string) + if !ok || namespace == "" { + log.Print("namespace unable to be found") + return diag.FromErr(fmt.Errorf("%s", "namespace is missing")) + } + auth := config.GetConfig().GetAppAuthProfile() + uri := fmt.Sprintf("/edge/v1/projects/%s/edges/%s/podidentity/%s/%s", project_id, edge_id, namespace, svc_name) + + response, err := auth.AuthAndRequest(uri, "GET", "") + if err != nil { + return diag.FromErr(err) + } + + log.Println("pod identity get response : ", response) + + decoder := json.NewDecoder(bytes.NewReader([]byte(response))) + + piaSpec := []*IAMPodIdentityAssociation{} + + piaStatus := []*IAMPodIdentityAssociationOutput{} + + if err := decoder.Decode(&piaSpec); err != nil { + log.Println("error decoding pod identity spec") + return diag.FromErr(err) + } + + decoder = json.NewDecoder(bytes.NewReader([]byte(response))) + + if err := decoder.Decode(&piaStatus); err != nil { + log.Println("error decoding pod identity status spec") + return diag.FromErr(err) + } + + v, ok := d.Get("spec").([]interface{}) + if !ok { + v = []interface{}{} + } + + spec := flattenIAMPodIdentityAssociations(piaSpec, v) + log.Printf("After flatten spec %s", spec) + err = d.Set("spec", spec) + if err != nil { + log.Printf("err setting pia spec %s", err.Error()) + return diag.FromErr(err) + } + + return diags +} + +func resourceEksPodIdentityDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + var diags diag.Diagnostics + clusterName, ok := d.Get("metadata.0.cluster_name").(string) + if !ok || clusterName == "" { + log.Print("Cluster name unable to be found") + return diag.FromErr(fmt.Errorf("%s", "cluster name is missing")) + } + projectName, ok := d.Get("metadata.0.project_name").(string) + if !ok || projectName == "" { + log.Print("Cluster project name unable to be found") + return diag.FromErr(fmt.Errorf("%s", "project name is missing")) + } + + project_id, edge_id, err := getIdFromName(clusterName, projectName) + if err != nil { + return diag.FromErr(err) + } + + podIdentity := []*IAMPodIdentityAssociation{} + + //rawConfig := d.GetRawConfig() + + if v, ok := d.Get("spec").([]interface{}); ok && len(v) > 0 { + podIdentity = expandIAMPodIdentityAssociationsConfig(v) + } else { + return diag.FromErr(fmt.Errorf("spec not specified")) + } + + if len(podIdentity) == 0 { + return diag.FromErr(errors.New("could not get pod identity associations")) + } + + log.Println("Delete started, pod identity to delete ", podIdentity[0].Namespace) + + // payload, err := json.Marshal(podIdentity) + // if err != nil { + // return diag.FromErr(err) + // } + + auth := config.GetConfig().GetAppAuthProfile() + uri := fmt.Sprintf("/edge/v1/projects/%s/edges/%s/podidentity", project_id, edge_id) + + //log.Println("Delete started, pod identity to delete ", string(payload)) + + response, err := auth.AuthAndRequest(uri, "DELETE", podIdentity) + if err != nil { + return diag.FromErr(err) + } + + log.Println("pod identity get response : ", response) + + return diags +} + +func getIdFromName(clusterName string, projectName string) (string, string, error) { + resp, err := project.GetProjectByName(projectName) + if err != nil { + log.Printf("project does not exist, error %s", err.Error()) + return "", "", err + } + p, err := project.NewProjectFromResponse([]byte(resp)) + if err != nil { + return "", "", err + } else if p == nil { + return "", "", err + } + project_id := p.ID + + cluster_resp, err := cluster.GetCluster(clusterName, project_id, "terraform") + if err != nil { + log.Printf("imported cluster was not created, error %s", err.Error()) + return "", "", err + } + + return project_id, cluster_resp.ID, nil +} diff --git a/rafay/resource_environment.go b/rafay/resource_environment.go index d9df02b3..23761c0f 100644 --- a/rafay/resource_environment.go +++ b/rafay/resource_environment.go @@ -309,6 +309,10 @@ func expandEnvironmentSpec(p []interface{}) (*eaaspb.EnvironmentSpec, error) { spec.Files = expandCommonpbFiles(f) } + if so, ok := in["schedule_optouts"].([]interface{}); ok && len(so) > 0 { + spec.ScheduleOptouts = expandScheduleOptOuts(so) + } + return spec, nil } @@ -333,6 +337,31 @@ func expandTemplate(p []interface{}) (*eaaspb.EnvironmentTemplateCompoundRef, er return obj, nil } +func expandScheduleOptOuts(p []interface{}) []*eaaspb.ScheduleOptOut { + soo := make([]*eaaspb.ScheduleOptOut, 0) + if len(p) == 0 || p[0] == nil { + return soo + } + + for i := range p { + obj := eaaspb.ScheduleOptOut{} + in := p[i].(map[string]interface{}) + + if v, ok := in["name"].(string); ok && len(v) > 0 { + obj.Name = v + } + + if v, ok := in["duration"].(string); ok && len(v) > 0 { + obj.Duration = v + } + + soo = append(soo, &obj) + + } + + return soo +} + // Flatteners func flattenEnvironment(d *schema.ResourceData, in *eaaspb.Environment) error { @@ -407,6 +436,15 @@ func flattenEnvironmentSpec(in *eaaspb.EnvironmentSpec, p []interface{}) ([]inte } obj["files"] = flattenCommonpbFiles(in.Files) + if len(in.ScheduleOptouts) > 0 { + v, ok := obj["schedule_optouts"].([]interface{}) + if !ok { + v = []interface{}{} + } + + obj["schedule_optouts"] = flattenScheduleOptOuts(in.ScheduleOptouts, v) + } + return []interface{}{obj}, nil } @@ -432,6 +470,34 @@ func flattenTemplate(input *eaaspb.EnvironmentTemplateCompoundRef, p []interface return []interface{}{obj} } +func flattenScheduleOptOuts(input []*eaaspb.ScheduleOptOut, p []interface{}) []interface{} { + log.Println("flatten schedule optout start") + if input == nil { + return nil + } + + out := make([]interface{}, len(input)) + for i, in := range input { + log.Println("flatten schedule optout ", in) + obj := map[string]interface{}{} + if i < len(p) && p[i] != nil { + obj = p[i].(map[string]interface{}) + } + + if len(in.Name) > 0 { + obj["name"] = in.Name + } + + if len(in.Duration) > 0 { + obj["duration"] = in.Duration + } + + out[i] = &obj + } + + return out +} + func resourceEnvironmentImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { log.Printf("Environment Import Starts") diff --git a/rafay/resource_environmenttemplate.go b/rafay/resource_environmenttemplate.go index 98837efe..ee7cbc89 100644 --- a/rafay/resource_environmenttemplate.go +++ b/rafay/resource_environmenttemplate.go @@ -244,7 +244,10 @@ func expandEnvironmentTemplateSpec(p []interface{}) (*eaaspb.EnvironmentTemplate } if h, ok := in["hooks"].([]interface{}); ok && len(h) > 0 { - spec.Hooks = expandEnvironmentHooks(h) + spec.Hooks, err = expandEnvironmentHooks(h) + if err != nil { + return nil, err + } } if ag, ok := in["agents"].([]interface{}); ok && len(ag) > 0 { @@ -263,9 +266,116 @@ func expandEnvironmentTemplateSpec(p []interface{}) (*eaaspb.EnvironmentTemplate spec.AgentOverride = expandEaasAgentOverrideOptions(v) } + if s, ok := in["schedules"].([]interface{}); ok && len(s) > 0 { + spec.Schedules, err = expandSchedules(s) + if err != nil { + return nil, err + } + } + return spec, nil } +func expandSchedules(p []interface{}) ([]*eaaspb.Schedules, error) { + schds := make([]*eaaspb.Schedules, 0) + if len(p) == 0 || p[0] == nil { + return schds, nil + } + var err error + + for i := range p { + schd := eaaspb.Schedules{} + in := p[i].(map[string]interface{}) + + if v, ok := in["name"].(string); ok && len(v) > 0 { + schd.Name = v + } + + if v, ok := in["description"].(string); ok && len(v) > 0 { + schd.Description = v + } + + if v, ok := in["type"].(string); ok && len(v) > 0 { + schd.Type = v + } + + if v, ok := in["cadence"].([]interface{}); ok && len(v) > 0 { + schd.Cadence = expandCadence(v) + } + + if v, ok := in["context"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + schd.Context = expandConfigContextCompoundRef(v[0].(map[string]any)) + } + + if v, ok := in["opt_out_options"].([]interface{}); ok && len(v) > 0 { + schd.OptOutOptions, err = expandOptOutOptions(v) + if err != nil { + return nil, err + } + } + + if h, ok := in["workflows"].([]interface{}); ok && len(h) > 0 { + schd.Workflows, err = expandCustomProviderOptions(h) + if err != nil { + return nil, err + } + } + + schds = append(schds, &schd) + } + + return schds, nil +} + +func expandOptOutOptions(p []interface{}) (*eaaspb.OptOutOptions, error) { + ooo := eaaspb.OptOutOptions{} + if len(p) == 0 || p[0] == nil { + return &ooo, nil + } + + var err error + in := p[0].(map[string]interface{}) + if h, ok := in["allow_opt_out"].([]interface{}); ok && len(h) > 0 { + ooo.AllowOptOut = expandBoolValue(h) + } + if v, ok := in["max_allowed_duration"].(string); ok && len(v) > 0 { + ooo.MaxAllowedDuration = v + } + if v, ok := in["max_allowed_times"].(int); ok { + ooo.MaxAllowedTimes = int32(v) + } + if h, ok := in["approval"].([]interface{}); ok && len(h) > 0 { + ooo.Approval, err = expandCustomProviderOptions(h) + if err != nil { + return nil, err + } + } + + return &ooo, nil +} + +func expandCadence(p []interface{}) *eaaspb.ScheduleOptions { + cadence := eaaspb.ScheduleOptions{} + if len(p) == 0 || p[0] == nil { + return &cadence + } + + in := p[0].(map[string]interface{}) + if v, ok := in["cron_expression"].(string); ok && len(v) > 0 { + cadence.CronExpression = v + } + + if v, ok := in["cron_timezone"].(string); ok && len(v) > 0 { + cadence.CronTimezone = v + } + + if v, ok := in["time_to_live"].(string); ok && len(v) > 0 { + cadence.TimeToLive = v + } + + return &cadence +} + func expandEaasAgentOverrideOptions(p []interface{}) *eaaspb.AgentOverrideOptions { agentOverrideOptions := &eaaspb.AgentOverrideOptions{} if len(p) == 0 || p[0] == nil { @@ -373,32 +483,45 @@ func expandDependsOn(p []interface{}) []*commonpb.ResourceNameAndVersionRef { return dependson } -func expandEnvironmentHooks(p []interface{}) *eaaspb.EnvironmentHooks { +func expandEnvironmentHooks(p []interface{}) (*eaaspb.EnvironmentHooks, error) { hooks := &eaaspb.EnvironmentHooks{} if len(p) == 0 || p[0] == nil { - return hooks + return hooks, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["on_completion"].([]interface{}); ok && len(h) > 0 { - hooks.OnCompletion = expandEaasHooks(h) + hooks.OnCompletion, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["on_success"].([]interface{}); ok && len(h) > 0 { - hooks.OnSuccess = expandEaasHooks(h) + hooks.OnSuccess, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["on_failure"].([]interface{}); ok && len(h) > 0 { - hooks.OnFailure = expandEaasHooks(h) + hooks.OnFailure, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["on_init"].([]interface{}); ok && len(h) > 0 { - hooks.OnInit = expandEaasHooks(h) + hooks.OnInit, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } - return hooks + return hooks, nil } @@ -490,6 +613,15 @@ func flattenEnvironmentTemplateSpec(in *eaaspb.EnvironmentTemplateSpec, p []inte obj["agent_override"] = flattenEaasAgentOverrideOptions(in.AgentOverride) + if len(in.Schedules) > 0 { + v, ok := obj["schedules"].([]interface{}) + if !ok { + v = []interface{}{} + } + + obj["schedules"] = flattenSchedules(in.Schedules, v) + } + return []interface{}{obj}, nil } @@ -506,6 +638,102 @@ func flattenEaasAgentOverrideOptions(in *eaaspb.AgentOverrideOptions) []interfac return []interface{}{obj} } +func flattenSchedules(input []*eaaspb.Schedules, p []interface{}) []interface{} { + log.Println("flatten schedules start") + if input == nil { + return nil + } + + out := make([]interface{}, len(input)) + for i, in := range input { + log.Println("flatten schedule ", in) + obj := map[string]interface{}{} + if i < len(p) && p[i] != nil { + obj = p[i].(map[string]interface{}) + } + + if len(in.Name) > 0 { + obj["name"] = in.Name + } + + if len(in.Description) > 0 { + obj["description"] = in.Description + } + + if len(in.Type) > 0 { + obj["type"] = in.Type + } + + if in.Cadence != nil { + v, ok := obj["cadence"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["cadence"] = flattenCadence(in.Cadence, v) + } + if in.Context != nil { + cc := flattenConfigContextCompoundRef(in.Context) + obj["context"] = []interface{}{cc} + } + + if in.OptOutOptions != nil { + v, ok := obj["opt_out_options"].([]interface{}) + if !ok { + v = []interface{}{} + } + obj["opt_out_options"] = flattenOptOutOptions(in.OptOutOptions, v) + } + obj["workflows"] = flattenCustomProviderOptions(in.Workflows) + + out[i] = &obj + } + + return out +} + +func flattenOptOutOptions(in *eaaspb.OptOutOptions, p []interface{}) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + obj["allow_opt_out"] = flattenBoolValue(in.AllowOptOut) + obj["max_allowed_duration"] = in.MaxAllowedDuration + obj["max_allowed_times"] = in.MaxAllowedTimes + obj["approval"] = flattenCustomProviderOptions(in.Approval) + + return []interface{}{obj} +} + +func flattenCadence(in *eaaspb.ScheduleOptions, p []interface{}) []interface{} { + if in == nil { + return nil + } + + obj := make(map[string]interface{}) + if len(p) != 0 && p[0] != nil { + obj = p[0].(map[string]interface{}) + } + + if len(in.CronExpression) > 0 { + obj["cron_expression"] = in.CronExpression + } + + if len(in.CronTimezone) > 0 { + obj["cron_timezone"] = in.CronTimezone + } + + if len(in.TimeToLive) > 0 { + obj["time_to_live"] = in.TimeToLive + } + + return []interface{}{obj} +} + func flattenEnvironmentHooks(in *eaaspb.EnvironmentHooks, p []interface{}) []interface{} { log.Println("flatten environment hooks start") if in == nil { diff --git a/rafay/resource_namespace.go b/rafay/resource_namespace.go index 7cd5393d..30cd77dc 100644 --- a/rafay/resource_namespace.go +++ b/rafay/resource_namespace.go @@ -109,6 +109,7 @@ func resourceNamespaceCreate(ctx context.Context, d *schema.ResourceData, m inte if checkStandardInputTextError(diags[0].Summary) { return diags } + namespaceCreateError := diag.FromErr(fmt.Errorf("%s", diags[0].Summary)) tflog := os.Getenv("TF_LOG") if tflog == "TRACE" || tflog == "DEBUG" { ctx = context.WithValue(ctx, "debug", "true") @@ -117,7 +118,7 @@ func resourceNamespaceCreate(ctx context.Context, d *schema.ResourceData, m inte ns, err := expandNamespace(d) if err != nil { log.Printf("namespace expandNamespace error") - return diag.FromErr(err) + return namespaceCreateError } if v, ok := d.Get("impersonate").(string); ok && len(v) > 0 { defer ResetImpersonateUser() @@ -126,14 +127,14 @@ func resourceNamespaceCreate(ctx context.Context, d *schema.ResourceData, m inte // with ORG Admin role isOrgAdmin, err := user.IsOrgAdmin(asUser) if err != nil { - return diag.FromErr(err) + return namespaceCreateError } if isOrgAdmin { return diag.FromErr(fmt.Errorf("%s", "--as-user cannot have ORGADMIN role")) } config.ApiKey, config.ApiSecret, err = user.GetUserAPIKey(asUser) if err != nil { - return diag.FromErr(err) + return namespaceCreateError } } auth := config.GetConfig().GetAppAuthProfile() @@ -147,7 +148,8 @@ func resourceNamespaceCreate(ctx context.Context, d *schema.ResourceData, m inte Project: ns.Metadata.Project, }) if err != nil { - return diag.FromErr(err) + log.Println("Error while namespace cleanup :", err.Error()) + return namespaceCreateError } } return diags diff --git a/rafay/resource_pipeline.go b/rafay/resource_pipeline.go index 44648158..66305fc5 100644 --- a/rafay/resource_pipeline.go +++ b/rafay/resource_pipeline.go @@ -148,7 +148,7 @@ type stageSpecConfig struct { SourceRepo *gitopspb.SystemSyncRepo `protobuf:"bytes,5,opt,name=sourceRepo,proto3" json:"sourceRepo,omitempty"` DestinationRepo *gitopspb.SystemSyncRepo `protobuf:"bytes,6,opt,name=destinationRepo,proto3" json:"destinationRepo,omitempty"` SourceAsDestination bool `protobuf:"varint,7,opt,name=sourceAsDestination,proto3" json:"sourceAsDestination,omitempty"` - CommitterEmail string `protobuf:"bytes,8,opt,name=committorEmail,proto3" json:"committorEmail,omitempty"` + CommitterEmail string `protobuf:"bytes,8,opt,name=committerEmail,proto3" json:"committerEmail,omitempty"` } type stageSpecConfigWorkloadTemplateOverrides struct { @@ -1228,7 +1228,7 @@ func expandWebhookTriggerGit(p []interface{}) *gitopspb.WebhookTriggerConfig_Git obj.Git.Revision = v } - if v, ok := in["path"].([]interface{}); ok && len(v) > 0 { + if v, ok := in["paths"].([]interface{}); ok && len(v) > 0 { obj.Git.Paths = expandCommonpbFiles(v) } @@ -2272,9 +2272,7 @@ func flattenTriggerConfigRepos(tSpec *triggerSpec, p []interface{}) ([]interface retNil = false } - if tSpec.Config.Repo.Paths != nil { - obj["paths"] = flattenCommonpbFiles(tSpec.Config.Repo.Paths) - } + obj["paths"] = flattenCommonpbFiles(tSpec.Config.Repo.Paths) if len(tSpec.Config.Repo.ChartName) > 0 { obj["chart_name"] = tSpec.Config.Repo.ChartName diff --git a/rafay/resource_resourcetemplate.go b/rafay/resource_resourcetemplate.go index 62d045b0..71cba372 100644 --- a/rafay/resource_resourcetemplate.go +++ b/rafay/resource_resourcetemplate.go @@ -227,8 +227,12 @@ func expandResourceTemplateSpec(p []interface{}) (*eaaspb.ResourceTemplateSpec, spec.Provider = p } + var err error if po, ok := in["provider_options"].([]interface{}); ok && len(po) > 0 { - spec.ProviderOptions = expandProviderOptions(po) + spec.ProviderOptions, err = expandProviderOptions(po) + if err != nil { + return nil, err + } } if ro, ok := in["repository_options"].([]interface{}); ok && len(ro) > 0 { @@ -244,7 +248,10 @@ func expandResourceTemplateSpec(p []interface{}) (*eaaspb.ResourceTemplateSpec, } if h, ok := in["hooks"].([]interface{}); ok && len(h) > 0 { - spec.Hooks = expandResourceHooks(h) + spec.Hooks, err = expandResourceHooks(h) + if err != nil { + return nil, err + } } if ag, ok := in["agents"].([]interface{}); ok && len(ag) > 0 { @@ -256,19 +263,23 @@ func expandResourceTemplateSpec(p []interface{}) (*eaaspb.ResourceTemplateSpec, } if ad, ok := in["artifact_driver"].([]interface{}); ok && len(ad) > 0 { - spec.ArtifactDriver = expandWorkflowHandlerCompoundRef(ad) + spec.ArtifactDriver, err = expandWorkflowHandlerCompoundRef(ad) + if err != nil { + return nil, err + } } return spec, nil } -func expandProviderOptions(p []interface{}) *eaaspb.ResourceTemplateProviderOptions { +func expandProviderOptions(p []interface{}) (*eaaspb.ResourceTemplateProviderOptions, error) { po := &eaaspb.ResourceTemplateProviderOptions{} if len(p) == 0 || p[0] == nil { - return po + return po, nil } in := p[0].(map[string]interface{}) + var err error if tp, ok := in["terraform"].([]interface{}); ok && len(tp) > 0 { po.Terraform = expandTerraformProviderOptions(tp) } @@ -286,7 +297,10 @@ func expandProviderOptions(p []interface{}) *eaaspb.ResourceTemplateProviderOpti } if p, ok := in["driver"].([]interface{}); ok && len(p) > 0 { - po.Driver = expandWorkflowHandlerCompoundRef(p) + po.Driver, err = expandWorkflowHandlerCompoundRef(p) + if err != nil { + return nil, err + } } if p, ok := in["open_tofu"].([]interface{}); ok && len(p) > 0 { @@ -294,14 +308,17 @@ func expandProviderOptions(p []interface{}) *eaaspb.ResourceTemplateProviderOpti } if w, ok := in["custom"].([]interface{}); ok && len(p) > 0 { - po.Custom = expandCustomProviderOptions(w) + po.Custom, err = expandCustomProviderOptions(w) + if err != nil { + return nil, err + } } if p, ok := in["hcp_terraform"].([]interface{}); ok && len(p) > 0 { po.HcpTerraform = expandHcpTerraformProviderOptions(p) } - return po + return po, nil } @@ -348,51 +365,71 @@ func expandContexts(p []interface{}) []*eaaspb.ConfigContextCompoundRef { return ctxs } -func expandResourceHooks(p []interface{}) *eaaspb.ResourceHooks { +func expandResourceHooks(p []interface{}) (*eaaspb.ResourceHooks, error) { hooks := &eaaspb.ResourceHooks{} if len(p) == 0 || p[0] == nil { - return hooks + return hooks, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["on_completion"].([]interface{}); ok && len(h) > 0 { - hooks.OnCompletion = expandEaasHooks(h) + hooks.OnCompletion, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["on_success"].([]interface{}); ok && len(h) > 0 { - hooks.OnSuccess = expandEaasHooks(h) + hooks.OnSuccess, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["on_failure"].([]interface{}); ok && len(h) > 0 { - hooks.OnFailure = expandEaasHooks(h) + hooks.OnFailure, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["on_init"].([]interface{}); ok && len(h) > 0 { - hooks.OnInit = expandEaasHooks(h) + hooks.OnInit, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["provider"].([]interface{}); ok && len(h) > 0 { - hooks.Provider = expandProviderHooks(h) + hooks.Provider, err = expandProviderHooks(h) + if err != nil { + return nil, err + } } - return hooks + return hooks, nil } -func expandCustomProviderOptions(p []interface{}) *eaaspb.CustomProviderOptions { +func expandCustomProviderOptions(p []interface{}) (*eaaspb.CustomProviderOptions, error) { if len(p) == 0 || p[0] == nil { - return nil + return nil, nil } wfProviderOptions := &eaaspb.CustomProviderOptions{} in := p[0].(map[string]interface{}) + var err error if h, ok := in["tasks"].([]interface{}); ok && len(h) > 0 { - wfProviderOptions.Tasks = expandEaasHooks(h) + wfProviderOptions.Tasks, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } - return wfProviderOptions + return wfProviderOptions, nil } @@ -612,379 +649,537 @@ func expandPulumiProviderOptions(p []interface{}) *eaaspb.PulumiProviderOptions return ppo } -func expandProviderHooks(p []interface{}) *eaaspb.ResourceTemplateProviderHooks { +func expandProviderHooks(p []interface{}) (*eaaspb.ResourceTemplateProviderHooks, error) { rtph := &eaaspb.ResourceTemplateProviderHooks{} if len(p) == 0 || p[0] == nil { - return rtph + return rtph, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["terraform"].([]interface{}); ok && len(h) > 0 { - rtph.Terraform = expandTerraformProviderHooks(h) + rtph.Terraform, err = expandTerraformProviderHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["pulumi"].([]interface{}); ok && len(h) > 0 { - rtph.Pulumi = expandPulumiProviderHooks(h) + rtph.Pulumi, err = expandPulumiProviderHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["open_tofu"].([]interface{}); ok && len(h) > 0 { - rtph.OpenTofu = expandOpenTofuProviderHooks(h) + rtph.OpenTofu, err = expandOpenTofuProviderHooks(h) + if err != nil { + return nil, err + } } if h, oj := in["hcp_terraform"].([]interface{}); oj && len(h) > 0 { - rtph.HcpTerraform = expandHcpTerraformProviderHooks(h) + rtph.HcpTerraform, err = expandHcpTerraformProviderHooks(h) + if err != nil { + return nil, err + } } if h, oj := in["system"].([]interface{}); oj && len(h) > 0 { - rtph.System = expandSystemProviderHooks(h) + rtph.System, err = expandSystemProviderHooks(h) + if err != nil { + return nil, err + } } - return rtph + return rtph, nil } -func expandTerraformProviderHooks(p []interface{}) *eaaspb.TerraformProviderHooks { +func expandTerraformProviderHooks(p []interface{}) (*eaaspb.TerraformProviderHooks, error) { tph := &eaaspb.TerraformProviderHooks{} if len(p) == 0 || p[0] == nil { - return tph + return tph, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["deploy"].([]interface{}); ok && len(h) > 0 { - tph.Deploy = expandTerraformDeployHooks(h) + tph.Deploy, err = expandTerraformDeployHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["destroy"].([]interface{}); ok && len(h) > 0 { - tph.Destroy = expandTerraformDestroyHooks(h) + tph.Destroy, err = expandTerraformDestroyHooks(h) + if err != nil { + return nil, err + } } - return tph + return tph, nil } -func expandPulumiProviderHooks(p []interface{}) *eaaspb.PulumiProviderHooks { +func expandPulumiProviderHooks(p []interface{}) (*eaaspb.PulumiProviderHooks, error) { pph := &eaaspb.PulumiProviderHooks{} if len(p) == 0 || p[0] == nil { - return pph + return pph, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["deploy"].([]interface{}); ok { - pph.Deploy = expandPulumiDeployHooks(h) + pph.Deploy, err = expandPulumiDeployHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["destroy"].([]interface{}); ok { - pph.Destroy = expandPulumiDestroyHooks(h) + pph.Destroy, err = expandPulumiDestroyHooks(h) + if err != nil { + return nil, err + } } - return pph + return pph, nil } -func expandOpenTofuProviderHooks(p []interface{}) *eaaspb.OpenTofuProviderHooks { +func expandOpenTofuProviderHooks(p []interface{}) (*eaaspb.OpenTofuProviderHooks, error) { tph := &eaaspb.OpenTofuProviderHooks{} if len(p) == 0 || p[0] == nil { - return tph + return tph, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["deploy"].([]interface{}); ok && len(h) > 0 { - tph.Deploy = expandOpenTofuDeployHooks(h) + tph.Deploy, err = expandOpenTofuDeployHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["destroy"].([]interface{}); ok && len(h) > 0 { - tph.Destroy = expandOpenTofuDestroyHooks(h) + tph.Destroy, err = expandOpenTofuDestroyHooks(h) + if err != nil { + return nil, err + } } - return tph + return tph, nil } -func expandHcpTerraformProviderHooks(p []interface{}) *eaaspb.HCPTerraformProviderHooks { +func expandHcpTerraformProviderHooks(p []interface{}) (*eaaspb.HCPTerraformProviderHooks, error) { tph := &eaaspb.HCPTerraformProviderHooks{} if len(p) == 0 || p[0] == nil { - return tph + return tph, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["deploy"].([]interface{}); ok && len(h) > 0 { - tph.Deploy = expandHcpTerraformDeployHooks(h) + tph.Deploy, err = expandHcpTerraformDeployHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["destroy"].([]interface{}); ok && len(h) > 0 { - tph.Destroy = expandHcpTerraformDestroyHooks(h) + tph.Destroy, err = expandHcpTerraformDestroyHooks(h) + if err != nil { + return nil, err + } } - return tph + return tph, nil } -func expandSystemProviderHooks(p []interface{}) *eaaspb.SystemProviderHooks { +func expandSystemProviderHooks(p []interface{}) (*eaaspb.SystemProviderHooks, error) { sph := &eaaspb.SystemProviderHooks{} if len(p) == 0 || p[0] == nil { - return sph + return sph, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["deploy"].([]interface{}); ok && len(h) > 0 { - sph.Deploy = expandSystemDeployHooks(h) + sph.Deploy, err = expandSystemDeployHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["destroy"].([]interface{}); ok && len(h) > 0 { - sph.Destroy = expandSystemDestroyHooks(h) + sph.Destroy, err = expandSystemDestroyHooks(h) + if err != nil { + return nil, err + } } - return sph + return sph, nil } -func expandTerraformDeployHooks(p []interface{}) *eaaspb.TerraformDeployHooks { +func expandTerraformDeployHooks(p []interface{}) (*eaaspb.TerraformDeployHooks, error) { tdh := &eaaspb.TerraformDeployHooks{} if len(p) == 0 || p[0] == nil { - return tdh + return tdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["init"].([]interface{}); ok && len(h) > 0 { - tdh.Init = expandLifecycleEventHooks(h) + tdh.Init, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["plan"].([]interface{}); ok && len(h) > 0 { - tdh.Plan = expandLifecycleEventHooks(h) + tdh.Plan, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["apply"].([]interface{}); ok && len(h) > 0 { - tdh.Apply = expandLifecycleEventHooks(h) + tdh.Apply, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["output"].([]interface{}); ok && len(h) > 0 { - tdh.Output = expandLifecycleEventHooks(h) + tdh.Output, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return tdh + return tdh, nil } -func expandTerraformDestroyHooks(p []interface{}) *eaaspb.TerraformDestroyHooks { +func expandTerraformDestroyHooks(p []interface{}) (*eaaspb.TerraformDestroyHooks, error) { tdh := &eaaspb.TerraformDestroyHooks{} if len(p) == 0 || p[0] == nil { - return tdh + return tdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["init"].([]interface{}); ok && len(h) > 0 { - tdh.Init = expandLifecycleEventHooks(h) + tdh.Init, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["plan"].([]interface{}); ok && len(h) > 0 { - tdh.Plan = expandLifecycleEventHooks(h) + tdh.Plan, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["destroy"].([]interface{}); ok && len(h) > 0 { - tdh.Destroy = expandLifecycleEventHooks(h) + tdh.Destroy, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return tdh + return tdh, nil } -func expandOpenTofuDeployHooks(p []interface{}) *eaaspb.OpenTofuDeployHooks { +func expandOpenTofuDeployHooks(p []interface{}) (*eaaspb.OpenTofuDeployHooks, error) { tdh := &eaaspb.OpenTofuDeployHooks{} if len(p) == 0 || p[0] == nil { - return tdh + return tdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["init"].([]interface{}); ok && len(h) > 0 { - tdh.Init = expandLifecycleEventHooks(h) + tdh.Init, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["plan"].([]interface{}); ok && len(h) > 0 { - tdh.Plan = expandLifecycleEventHooks(h) + tdh.Plan, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["apply"].([]interface{}); ok && len(h) > 0 { - tdh.Apply = expandLifecycleEventHooks(h) + tdh.Apply, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["output"].([]interface{}); ok && len(h) > 0 { - tdh.Output = expandLifecycleEventHooks(h) + tdh.Output, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return tdh + return tdh, nil } -func expandOpenTofuDestroyHooks(p []interface{}) *eaaspb.OpenTofuDestroyHooks { +func expandOpenTofuDestroyHooks(p []interface{}) (*eaaspb.OpenTofuDestroyHooks, error) { tdh := &eaaspb.OpenTofuDestroyHooks{} if len(p) == 0 || p[0] == nil { - return tdh + return tdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["init"].([]interface{}); ok && len(h) > 0 { - tdh.Init = expandLifecycleEventHooks(h) + tdh.Init, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["plan"].([]interface{}); ok && len(h) > 0 { - tdh.Plan = expandLifecycleEventHooks(h) + tdh.Plan, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["destroy"].([]interface{}); ok && len(h) > 0 { - tdh.Destroy = expandLifecycleEventHooks(h) + tdh.Destroy, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return tdh + return tdh, nil } -func expandHcpTerraformDeployHooks(p []interface{}) *eaaspb.HCPTerraformDeployHooks { +func expandHcpTerraformDeployHooks(p []interface{}) (*eaaspb.HCPTerraformDeployHooks, error) { tdh := &eaaspb.HCPTerraformDeployHooks{} if len(p) == 0 || p[0] == nil { - return tdh + return tdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["init"].([]interface{}); ok && len(h) > 0 { - tdh.Init = expandLifecycleEventHooks(h) + tdh.Init, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["plan"].([]interface{}); ok && len(h) > 0 { - tdh.Plan = expandLifecycleEventHooks(h) + tdh.Plan, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["apply"].([]interface{}); ok && len(h) > 0 { - tdh.Apply = expandLifecycleEventHooks(h) + tdh.Apply, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["output"].([]interface{}); ok && len(h) > 0 { - tdh.Output = expandLifecycleEventHooks(h) + tdh.Output, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return tdh + return tdh, nil } -func expandHcpTerraformDestroyHooks(p []interface{}) *eaaspb.HCPTerraformDestroyHooks { +func expandHcpTerraformDestroyHooks(p []interface{}) (*eaaspb.HCPTerraformDestroyHooks, error) { tdh := &eaaspb.HCPTerraformDestroyHooks{} if len(p) == 0 || p[0] == nil { - return tdh + return tdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["init"].([]interface{}); ok && len(h) > 0 { - tdh.Init = expandLifecycleEventHooks(h) + tdh.Init, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["plan"].([]interface{}); ok && len(h) > 0 { - tdh.Plan = expandLifecycleEventHooks(h) + tdh.Plan, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["destroy"].([]interface{}); ok && len(h) > 0 { - tdh.Destroy = expandLifecycleEventHooks(h) + tdh.Destroy, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return tdh + return tdh, nil } -func expandSystemDeployHooks(p []interface{}) *eaaspb.SystemDeployHooks { +func expandSystemDeployHooks(p []interface{}) (*eaaspb.SystemDeployHooks, error) { sdh := &eaaspb.SystemDeployHooks{} if len(p) == 0 || p[0] == nil { - return sdh + return sdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["apply"].([]interface{}); ok && len(h) > 0 { - sdh.Apply = expandLifecycleEventHooks(h) + sdh.Apply, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return sdh + return sdh, nil } -func expandSystemDestroyHooks(p []interface{}) *eaaspb.SystemDestroyHooks { +func expandSystemDestroyHooks(p []interface{}) (*eaaspb.SystemDestroyHooks, error) { sdh := &eaaspb.SystemDestroyHooks{} if len(p) == 0 || p[0] == nil { - return sdh + return sdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["destroy"].([]interface{}); ok && len(h) > 0 { - sdh.Destroy = expandLifecycleEventHooks(h) + sdh.Destroy, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return sdh + return sdh, nil } -func expandPulumiDeployHooks(p []interface{}) *eaaspb.PulumiDeployHooks { +func expandPulumiDeployHooks(p []interface{}) (*eaaspb.PulumiDeployHooks, error) { pdh := &eaaspb.PulumiDeployHooks{} if len(p) == 0 || p[0] == nil { - return pdh + return pdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["login"].([]interface{}); ok && len(h) > 0 { - pdh.Login = expandLifecycleEventHooks(h) + pdh.Login, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["preview"].([]interface{}); ok && len(h) > 0 { - pdh.Preview = expandLifecycleEventHooks(h) + pdh.Preview, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["up"].([]interface{}); ok && len(h) > 0 { - pdh.Up = expandLifecycleEventHooks(h) + pdh.Up, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["output"].([]interface{}); ok && len(h) > 0 { - pdh.Output = expandLifecycleEventHooks(h) + pdh.Output, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return pdh + return pdh, nil } -func expandPulumiDestroyHooks(p []interface{}) *eaaspb.PulumiDestroyHooks { +func expandPulumiDestroyHooks(p []interface{}) (*eaaspb.PulumiDestroyHooks, error) { pdh := &eaaspb.PulumiDestroyHooks{} if len(p) == 0 || p[0] == nil { - return pdh + return pdh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["login"].([]interface{}); ok && len(h) > 0 { - pdh.Login = expandLifecycleEventHooks(h) + pdh.Login, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["preview"].([]interface{}); ok && len(h) > 0 { - pdh.Preview = expandLifecycleEventHooks(h) + pdh.Preview, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["destroy"].([]interface{}); ok && len(h) > 0 { - pdh.Destroy = expandLifecycleEventHooks(h) + pdh.Destroy, err = expandLifecycleEventHooks(h) + if err != nil { + return nil, err + } } - return pdh + return pdh, nil } -func expandLifecycleEventHooks(p []interface{}) *eaaspb.LifecycleEventHooks { +func expandLifecycleEventHooks(p []interface{}) (*eaaspb.LifecycleEventHooks, error) { lh := &eaaspb.LifecycleEventHooks{} if len(p) == 0 || p[0] == nil { - return lh + return lh, nil } in := p[0].(map[string]interface{}) + var err error if h, ok := in["before"].([]interface{}); ok && len(h) > 0 { - lh.Before = expandEaasHooks(h) + lh.Before, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } if h, ok := in["after"].([]interface{}); ok && len(h) > 0 { - lh.After = expandEaasHooks(h) + lh.After, err = expandEaasHooks(h) + if err != nil { + return nil, err + } } - return lh + return lh, nil } // Flatteners diff --git a/rafay/resource_workload.go b/rafay/resource_workload.go index bf2bc45f..35b4439f 100644 --- a/rafay/resource_workload.go +++ b/rafay/resource_workload.go @@ -12,10 +12,10 @@ import ( "github.com/RafaySystems/rctl/pkg/user" "github.com/RafaySystems/rafay-common/pkg/hub/client/options" - typed "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" + "github.com/RafaySystems/rafay-common/pkg/hub/client/typed" "github.com/RafaySystems/rafay-common/pkg/hub/terraform/resource" "github.com/RafaySystems/rafay-common/proto/types/hub/appspb" - commonpb "github.com/RafaySystems/rafay-common/proto/types/hub/commonpb" + "github.com/RafaySystems/rafay-common/proto/types/hub/commonpb" "github.com/RafaySystems/rctl/pkg/versioninfo" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" diff --git a/rafay/structure_artifact.go b/rafay/structure_artifact.go index 4ceb3255..6ddb7a51 100644 --- a/rafay/structure_artifact.go +++ b/rafay/structure_artifact.go @@ -214,7 +214,7 @@ func ExpandArtifact(artifactType string, ap []interface{}) (*commonpb.ArtifactSp } if v, ok := in["set_string"].([]interface{}); ok && len(v) > 0 { for _, value := range v { - if value.(string) != "" { + if value != nil && value.(string) != "" { at.Options.SetString = append(at.Options.SetString, value.(string)) } } diff --git a/rafay/utils.go b/rafay/utils.go index fd1f6b4b..c9a96457 100644 --- a/rafay/utils.go +++ b/rafay/utils.go @@ -378,6 +378,22 @@ func expandPlacement(p []interface{}) *commonpb.PlacementSpec { obj.Labels = expandPlacementLabels(v) } + if v, ok := in["environment"].([]any); ok { + obj.Environment = expandEnvironmentPlacement(v) + } + + return obj +} + +func expandEnvironmentPlacement(p []any) *commonpb.Environment { + if len(p) == 0 || p[0] == nil { + return nil + } + obj := &commonpb.Environment{} + in := p[0].(map[string]any) + if v, ok := in["name"].(string); ok && len(v) > 0 { + obj.Name = v + } return obj } @@ -923,9 +939,26 @@ func flattenPlacement(in *commonpb.PlacementSpec) []interface{} { obj["selector"] = in.Selector } + if in.Environment != nil { + obj["environment"] = flattenEnvironmentPlacement(in.Environment) + } + return []interface{}{obj} } +func flattenEnvironmentPlacement(in *commonpb.Environment) []any { + if in == nil { + return nil + } + + obj := make(map[string]any) + if len(in.Name) > 0 { + obj["name"] = in.Name + } + + return []any{obj} +} + func flattenFile(in *File) []interface{} { if in == nil { return nil @@ -1556,15 +1589,15 @@ func flattenVariableOverrideOptions(input *eaaspb.VariableOverrideOptions) []int return []interface{}{obj} } -func expandEaasHooks(p []interface{}) []*eaaspb.Hook { +func expandEaasHooks(p []interface{}) ([]*eaaspb.Hook, error) { hooks := make([]*eaaspb.Hook, 0) if len(p) == 0 { - return hooks + return hooks, nil } for indx := range p { if p[indx] == nil { - return nil + return nil, nil } hook := &eaaspb.Hook{} in := p[indx].(map[string]interface{}) @@ -1597,8 +1630,12 @@ func expandEaasHooks(p []interface{}) []*eaaspb.Hook { hook.OnFailure = n } + var err error if n, ok := in["driver"].([]interface{}); ok && len(n) > 0 { - hook.Driver = expandWorkflowHandlerCompoundRef(n) + hook.Driver, err = expandWorkflowHandlerCompoundRef(n) + if err != nil { + return nil, err + } } if n, ok := in["depends_on"].([]interface{}); ok && len(n) > 0 { @@ -1607,9 +1644,12 @@ func expandEaasHooks(p []interface{}) []*eaaspb.Hook { hooks = append(hooks, hook) + if d, ok := in["execute_once"].(bool); ok { + hook.ExecuteOnce = d + } } - return hooks + return hooks, nil } func expandHookOptions(p []interface{}) *eaaspb.HookOptions { @@ -1866,6 +1906,7 @@ func flattenEaasHooks(input []*eaaspb.Hook, p []interface{}) []interface{} { obj["on_failure"] = in.OnFailure obj["driver"] = flattenWorkflowHandlerCompoundRef(in.Driver) obj["depends_on"] = toArrayInterface(in.DependsOn) + obj["execute_once"] = in.ExecuteOnce out[i] = &obj log.Println("flatten hook setting object ", out[i]) @@ -2079,7 +2120,7 @@ func flattenBoolValue(in *datatypes.BoolValue) []interface{} { func expandV3MetaData(p []interface{}) *commonpb.Metadata { obj := &commonpb.Metadata{} - if p == nil || len(p) == 0 || p[0] == nil { + if len(p) == 0 || p[0] == nil { return obj } @@ -2155,9 +2196,9 @@ func checkStandardInputTextError(input string) bool { return strings.Contains(input, dns1123ValidationErrMsg) } -func expandWorkflowHandlerCompoundRef(p []interface{}) *eaaspb.WorkflowHandlerCompoundRef { +func expandWorkflowHandlerCompoundRef(p []interface{}) (*eaaspb.WorkflowHandlerCompoundRef, error) { if len(p) == 0 || p[0] == nil { - return nil + return nil, nil } wfHandler := &eaaspb.WorkflowHandlerCompoundRef{} @@ -2167,17 +2208,21 @@ func expandWorkflowHandlerCompoundRef(p []interface{}) *eaaspb.WorkflowHandlerCo wfHandler.Name = v } + var err error if v, ok := in["data"].([]interface{}); ok && len(v) > 0 { - wfHandler.Data = expandWorkflowHandlerInline(v) + wfHandler.Data, err = expandWorkflowHandlerInline(v) + if err != nil { + return nil, err + } } - return wfHandler + return wfHandler, nil } -func expandWorkflowHandlerInline(p []interface{}) *eaaspb.WorkflowHandlerInline { +func expandWorkflowHandlerInline(p []interface{}) (*eaaspb.WorkflowHandlerInline, error) { wfHandlerInline := &eaaspb.WorkflowHandlerInline{} if len(p) == 0 || p[0] == nil { - return wfHandlerInline + return wfHandlerInline, nil } in := p[0].(map[string]interface{}) @@ -2190,50 +2235,58 @@ func expandWorkflowHandlerInline(p []interface{}) *eaaspb.WorkflowHandlerInline wfHandlerInline.Inputs = expandConfigContextCompoundRefs(v) } - return wfHandlerInline + if v, ok := in["outputs"].(string); ok && len(v) > 0 { + outputs, err := expandDriverOutputs(v) + if err != nil { + return nil, err + } + wfHandlerInline.Outputs = outputs + } + + return wfHandlerInline, nil } func expandWorkflowHandlerConfig(p []interface{}) *eaaspb.WorkflowHandlerConfig { - config := eaaspb.WorkflowHandlerConfig{} + workflowHandlerConfig := eaaspb.WorkflowHandlerConfig{} if len(p) == 0 || p[0] == nil { - return &config + return &workflowHandlerConfig } in := p[0].(map[string]interface{}) if typ, ok := in["type"].(string); ok && len(typ) > 0 { - config.Type = typ + workflowHandlerConfig.Type = typ } if ts, ok := in["timeout_seconds"].(int); ok { - config.TimeoutSeconds = int64(ts) + workflowHandlerConfig.TimeoutSeconds = int64(ts) } if sc, ok := in["success_condition"].(string); ok && len(sc) > 0 { - config.SuccessCondition = sc + workflowHandlerConfig.SuccessCondition = sc } if ts, ok := in["max_retry_count"].(int); ok { - config.MaxRetryCount = int32(ts) + workflowHandlerConfig.MaxRetryCount = int32(ts) } if v, ok := in["container"].([]interface{}); ok && len(v) > 0 { - config.Container = expandDriverContainerConfig(v) + workflowHandlerConfig.Container = expandDriverContainerConfig(v) } if v, ok := in["http"].([]interface{}); ok && len(v) > 0 { - config.Http = expandDriverHttpConfig(v) + workflowHandlerConfig.Http = expandDriverHttpConfig(v) } if v, ok := in["polling_config"].([]interface{}); ok && len(v) > 0 { - config.PollingConfig = expandPollingConfig(v) + workflowHandlerConfig.PollingConfig = expandPollingConfig(v) } if h, ok := in["timeout_seconds"].(int); ok { - config.TimeoutSeconds = int64(h) + workflowHandlerConfig.TimeoutSeconds = int64(h) } - return &config + return &workflowHandlerConfig } func expandPollingConfig(p []interface{}) *eaaspb.PollingConfig { @@ -2271,7 +2324,6 @@ func flattenWorkflowHandlerCompoundRef(input *eaaspb.WorkflowHandlerCompoundRef) } func flattenWorkflowHandlerInline(input *eaaspb.WorkflowHandlerInline) []interface{} { - log.Println("flatten workflow handler inline start") if input == nil { return nil } @@ -2282,6 +2334,9 @@ func flattenWorkflowHandlerInline(input *eaaspb.WorkflowHandlerInline) []interfa if len(input.Inputs) > 0 { obj["inputs"] = flattenConfigContextCompoundRefs(input.Inputs) } + if input.Outputs != nil { + obj["outputs"] = flattenDriverOutputs(input.Outputs) + } return []interface{}{obj} }