diff --git a/spectrocloud/cluster_common.go b/spectrocloud/cluster_common.go index 161c4134..b223cc82 100644 --- a/spectrocloud/cluster_common.go +++ b/spectrocloud/cluster_common.go @@ -24,6 +24,7 @@ var ( "spectrocloud_cluster_openstack": "openstack", "spectrocloud_cluster_tke": "tke", "spectrocloud_cluster_vsphere": "vsphere", + "spectrocloud_cluster_gke": "gke", } ) diff --git a/spectrocloud/cluster_common_profiles.go b/spectrocloud/cluster_common_profiles.go index 49387d37..02a06050 100644 --- a/spectrocloud/cluster_common_profiles.go +++ b/spectrocloud/cluster_common_profiles.go @@ -172,3 +172,21 @@ func updateProfiles(c *client.V1Client, d *schema.ResourceData) error { return nil } + +func flattenClusterProfileForImport(c *client.V1Client, d *schema.ResourceData) ([]interface{}, error) { + clusterContext := "project" + if v, ok := d.GetOk("context"); ok { + clusterContext = v.(string) + } + clusterProfiles := make([]interface{}, 0) + cluster, err := c.GetCluster(clusterContext, d.Id()) + if err != nil { + return clusterProfiles, err + } + for _, profileTemplate := range cluster.Spec.ClusterProfileTemplates { + profile := make(map[string]interface{}) + profile["id"] = profileTemplate.UID + clusterProfiles = append(clusterProfiles, profile) + } + return clusterProfiles, nil +} diff --git a/spectrocloud/resource_cluster_gcp.go b/spectrocloud/resource_cluster_gcp.go index aafd61c5..64751772 100644 --- a/spectrocloud/resource_cluster_gcp.go +++ b/spectrocloud/resource_cluster_gcp.go @@ -2,6 +2,7 @@ package spectrocloud import ( "context" + "github.com/spectrocloud/gomi/pkg/ptr" "log" "time" @@ -22,7 +23,10 @@ func resourceClusterGcp() *schema.Resource { ReadContext: resourceClusterGcpRead, UpdateContext: resourceClusterGcpUpdate, DeleteContext: resourceClusterDelete, - Description: "Resource for managing GCP clusters in Spectro Cloud through Palette.", + Importer: &schema.ResourceImporter{ + StateContext: resourceClusterGcpImport, + }, + Description: "Resource for managing GCP clusters in Spectro Cloud through Palette.", Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), @@ -294,6 +298,17 @@ func resourceClusterGcpRead(_ context.Context, d *schema.ResourceData, m interfa return diags } + configUID := cluster.Spec.CloudConfigRef.UID + if err := d.Set("cloud_config_id", configUID); err != nil { + return diag.FromErr(err) + } + + // verify cluster type + err = ValidateCloudType("spectrocloud_cluster_gcp", cluster) + if err != nil { + return diag.FromErr(err) + } + diagnostics, done := readCommonFields(c, d, cluster) if done { return diagnostics @@ -310,6 +325,12 @@ func flattenCloudConfigGcp(configUID string, d *schema.ResourceData, c *client.V if config, err := c.GetCloudConfigGcp(configUID, ClusterContext); err != nil { return diag.FromErr(err) } else { + if err := d.Set("cloud_account_id", config.Spec.CloudAccountRef.UID); err != nil { + return diag.FromErr(err) + } + if err := d.Set("cloud_config", flattenClusterConfigsGcp(config)); err != nil { + return diag.FromErr(err) + } mp := flattenMachinePoolConfigsGcp(config.Spec.MachinePoolConfig) mp, err := flattenNodeMaintenanceStatus(c, d, c.GetNodeStatusMapGcp, mp, configUID, ClusterContext) if err != nil { @@ -322,6 +343,24 @@ func flattenCloudConfigGcp(configUID string, d *schema.ResourceData, c *client.V return diag.Diagnostics{} } +func flattenClusterConfigsGcp(config *models.V1GcpCloudConfig) []interface{} { + if config == nil || config.Spec == nil || config.Spec.ClusterConfig == nil { + return make([]interface{}, 0) + } + m := make(map[string]interface{}) + + if config.Spec.ClusterConfig.Project != nil { + m["project"] = config.Spec.ClusterConfig.Project + } + if config.Spec.ClusterConfig.Network != "" { + m["network"] = config.Spec.ClusterConfig.Network + } + if ptr.String(config.Spec.ClusterConfig.Region) != "" { + m["region"] = ptr.String(config.Spec.ClusterConfig.Region) + } + return []interface{}{m} +} + func flattenMachinePoolConfigsGcp(machinePools []*models.V1GcpMachinePoolConfig) []interface{} { if machinePools == nil { diff --git a/spectrocloud/resource_cluster_gcp_import.go b/spectrocloud/resource_cluster_gcp_import.go new file mode 100644 index 00000000..826527c7 --- /dev/null +++ b/spectrocloud/resource_cluster_gcp_import.go @@ -0,0 +1,34 @@ +package spectrocloud + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spectrocloud/palette-sdk-go/client" +) + +func resourceClusterGcpImport(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + c := m.(*client.V1Client) + err := GetCommonCluster(d, c) + if err != nil { + return nil, err + } + + diags := resourceClusterGcpRead(ctx, d, m) + if diags.HasError() { + return nil, fmt.Errorf("could not read cluster for import: %v", diags) + } + + clusterProfiles, err := flattenClusterProfileForImport(c, d) + if err != nil { + return nil, err + } + if err := d.Set("cluster_profile", clusterProfiles); err != nil { + return nil, fmt.Errorf("could not read cluster for import: %v", diags) + } + + // Return the resource data. In most cases, this method is only used to + // import one resource at a time, so you should return the resource data + // in a slice with a single element. + return []*schema.ResourceData{d}, nil +} diff --git a/spectrocloud/resource_cluster_gcp_test.go b/spectrocloud/resource_cluster_gcp_test.go new file mode 100644 index 00000000..295a7176 --- /dev/null +++ b/spectrocloud/resource_cluster_gcp_test.go @@ -0,0 +1,217 @@ +package spectrocloud + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spectrocloud/gomi/pkg/ptr" + "github.com/spectrocloud/hapi/models" + "github.com/spectrocloud/terraform-provider-spectrocloud/types" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestToMachinePoolGcp(t *testing.T) { + tests := []struct { + name string + input map[string]interface{} + expectedOutput *models.V1GcpMachinePoolConfigEntity + expectError bool + }{ + { + name: "Control Plane", + input: map[string]interface{}{ + "control_plane": true, + "control_plane_as_worker": true, + "azs": schema.NewSet(schema.HashString, []interface{}{"us-central1-a"}), + "instance_type": "n1-standard-1", + "disk_size_gb": 50, + "name": "example-name", + "count": 3, + "node_repave_interval": 0, + }, + expectedOutput: &models.V1GcpMachinePoolConfigEntity{ + CloudConfig: &models.V1GcpMachinePoolCloudConfigEntity{ + Azs: []string{"us-central1-a"}, + InstanceType: types.Ptr("n1-standard-1"), + RootDeviceSize: int64(50), + }, + PoolConfig: &models.V1MachinePoolConfigEntity{ + AdditionalLabels: map[string]string{}, + Taints: nil, + IsControlPlane: true, + Labels: []string{"master"}, + Name: types.Ptr("example-name"), + Size: types.Ptr(int32(3)), + UpdateStrategy: &models.V1UpdateStrategy{ + Type: "RollingUpdateScaleOut", + }, + UseControlPlaneAsWorker: true, + }, + }, + expectError: false, + }, + { + name: "Node Repave Interval Error", + input: map[string]interface{}{ + "control_plane": true, + "control_plane_as_worker": false, + "azs": schema.NewSet(schema.HashString, []interface{}{"us-central1-a"}), + "instance_type": "n1-standard-2", + "disk_size_gb": 100, + "name": "example-name-2", + "count": 2, + "node_repave_interval": -1, + }, + expectedOutput: &models.V1GcpMachinePoolConfigEntity{ + CloudConfig: &models.V1GcpMachinePoolCloudConfigEntity{ + Azs: []string{"us-central1-a"}, + InstanceType: types.Ptr("n1-standard-2"), + RootDeviceSize: int64(100), + }, + PoolConfig: &models.V1MachinePoolConfigEntity{ + AdditionalLabels: map[string]string{"example": "label"}, + Taints: []*models.V1Taint{}, + IsControlPlane: true, + Labels: []string{"master"}, + Name: types.Ptr("example-name-2"), + Size: types.Ptr(int32(2)), + UpdateStrategy: &models.V1UpdateStrategy{ + Type: "RollingUpdate", + }, + UseControlPlaneAsWorker: false, + }, + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output, err := toMachinePoolGcp(tt.input) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedOutput, output) + } + }) + } +} + +func TestFlattenMachinePoolConfigsGcp(t *testing.T) { + tests := []struct { + name string + input []*models.V1GcpMachinePoolConfig + expectedOutput []interface{} + }{ + { + name: "Single Machine Pool", + input: []*models.V1GcpMachinePoolConfig{ + { + AdditionalLabels: map[string]string{"label1": "value1", "label2": "value2"}, + Taints: []*models.V1Taint{{Key: "taint1", Value: "value1", Effect: "NoSchedule"}}, + IsControlPlane: ptr.BoolPtr(true), + UseControlPlaneAsWorker: true, + Name: "machine-pool-1", + Size: int32(3), + UpdateStrategy: &models.V1UpdateStrategy{Type: "RollingUpdate"}, + InstanceType: types.Ptr("n1-standard-4"), + RootDeviceSize: int64(100), + Azs: []string{"us-west1-a", "us-west1-b"}, + NodeRepaveInterval: 0, + }, + }, + expectedOutput: []interface{}{ + map[string]interface{}{ + "additional_labels": map[string]string{ + "label1": "value1", + "label2": "value2", + }, + "taints": []interface{}{ + map[string]interface{}{ + "key": "taint1", + "value": "value1", + "effect": "NoSchedule", + }, + }, + "control_plane": true, + "control_plane_as_worker": true, + "name": "machine-pool-1", + "count": 3, + "update_strategy": "RollingUpdate", + "instance_type": "n1-standard-4", + "disk_size_gb": 100, + "azs": []string{"us-west1-a", "us-west1-b"}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output := flattenMachinePoolConfigsGcp(tt.input) + assert.Equal(t, tt.expectedOutput, output) + }) + } +} + +func TestFlattenClusterConfigsGcp(t *testing.T) { + tests := []struct { + name string + input *models.V1GcpCloudConfig + expectedOutput []interface{} + }{ + { + name: "Valid Cloud Config", + input: &models.V1GcpCloudConfig{ + Spec: &models.V1GcpCloudConfigSpec{ + ClusterConfig: &models.V1GcpClusterConfig{ + Project: ptr.StringPtr("my-project"), + Network: "my-network", + Region: ptr.StringPtr("us-west1"), + }, + }, + }, + expectedOutput: []interface{}{ + map[string]interface{}{ + "project": ptr.StringPtr("my-project"), + "network": "my-network", + "region": "us-west1", + }, + }, + }, + { + name: "Nil Cloud Config", + input: nil, + expectedOutput: []interface{}{}, + }, + { + name: "Empty Cluster Config", + input: &models.V1GcpCloudConfig{}, + expectedOutput: []interface{}{}, + }, + { + name: "Empty Cluster Config Spec", + input: &models.V1GcpCloudConfig{Spec: &models.V1GcpCloudConfigSpec{}}, + expectedOutput: []interface{}{}, + }, + { + name: "Missing Fields in Cluster Config", + input: &models.V1GcpCloudConfig{ + Spec: &models.V1GcpCloudConfigSpec{ + ClusterConfig: &models.V1GcpClusterConfig{}, + }, + }, + expectedOutput: []interface{}{ + map[string]interface{}{}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output := flattenClusterConfigsGcp(tt.input) + assert.Equal(t, tt.expectedOutput, output) + }) + } +} diff --git a/spectrocloud/resource_cluster_gke.go b/spectrocloud/resource_cluster_gke.go index fedffe18..2614c850 100644 --- a/spectrocloud/resource_cluster_gke.go +++ b/spectrocloud/resource_cluster_gke.go @@ -272,6 +272,12 @@ func resourceClusterGkeRead(ctx context.Context, d *schema.ResourceData, m inter return diag.FromErr(err) } + // verify cluster type + err = ValidateCloudType("spectrocloud_cluster_gke", cluster) + if err != nil { + return diag.FromErr(err) + } + diagnostics, done := readCommonFields(c, d, cluster) if done { return diagnostics diff --git a/spectrocloud/resource_cluster_gke_import.go b/spectrocloud/resource_cluster_gke_import.go index d470c971..7c0cd8dd 100644 --- a/spectrocloud/resource_cluster_gke_import.go +++ b/spectrocloud/resource_cluster_gke_import.go @@ -21,6 +21,14 @@ func resourceClusterGkeImport(ctx context.Context, d *schema.ResourceData, m int return nil, fmt.Errorf("could not read cluster for import: %v", diags) } + clusterProfiles, err := flattenClusterProfileForImport(c, d) + if err != nil { + return nil, err + } + if err := d.Set("cluster_profile", clusterProfiles); err != nil { + return nil, fmt.Errorf("could not read cluster for import: %v", diags) + } + // Return the resource data. In most cases, this method is only used to // import one resource at a time, so you should return the resource data // in a slice with a single element. diff --git a/spectrocloud/resource_cluster_gke_test.go b/spectrocloud/resource_cluster_gke_test.go index a06bb22d..4f91a959 100644 --- a/spectrocloud/resource_cluster_gke_test.go +++ b/spectrocloud/resource_cluster_gke_test.go @@ -1,7 +1,9 @@ package spectrocloud import ( + "errors" "github.com/spectrocloud/hapi/models" + "github.com/spectrocloud/palette-sdk-go/client" "github.com/spectrocloud/terraform-provider-spectrocloud/types" "github.com/stretchr/testify/assert" "testing" @@ -111,3 +113,85 @@ func TestFlattenMachinePoolConfigsGke(t *testing.T) { assert.Equal(t, "n1-standard-4", pool2["instance_type"]) assert.Equal(t, 200, pool2["disk_size_gb"]) } + +func TestFlattenClusterProfileForImport(t *testing.T) { + m := &client.V1Client{ + GetClusterFn: func(scope, uid string) (*models.V1SpectroCluster, error) { + var profiles []*models.V1ClusterProfileTemplate + p1 := &models.V1ClusterProfileTemplate{ + CloudType: "", + Name: "", + PackServerRefs: nil, + PackServerSecret: "", + Packs: nil, + ProfileVersion: "", + RelatedObject: nil, + Type: "", + UID: "profile-1", + Version: 0, + } + p2 := &models.V1ClusterProfileTemplate{ + CloudType: "", + Name: "", + PackServerRefs: nil, + PackServerSecret: "", + Packs: nil, + ProfileVersion: "", + RelatedObject: nil, + Type: "", + UID: "profile-2", + Version: 0, + } + profiles = append(profiles, p1) + profiles = append(profiles, p2) + + cluster := &models.V1SpectroCluster{ + APIVersion: "", + Kind: "", + Metadata: nil, + Spec: &models.V1SpectroClusterSpec{ + CloudConfigRef: nil, + CloudType: "", + ClusterConfig: nil, + ClusterProfileTemplates: profiles, + ClusterType: "", + }, + Status: nil, + } + return cluster, nil + }, + } + + // Test case: Successfully retrieve cluster profiles + clusterContext := "project" + clusterID := "test-cluster-id" + clusterProfiles := []interface{}{ + map[string]interface{}{"id": "profile-1"}, + map[string]interface{}{"id": "profile-2"}, + } + mockResourceData := resourceClusterGke().TestResourceData() + err := mockResourceData.Set("cluster_profile", clusterProfiles) + if err != nil { + return + } + err = mockResourceData.Set("context", clusterContext) + if err != nil { + return + } + mockResourceData.SetId(clusterID) + + result, err := flattenClusterProfileForImport(m, mockResourceData) + assert.NoError(t, err) + assert.Equal(t, clusterProfiles, result) + + //Test case: Error retrieving cluster + m = &client.V1Client{ + GetClusterFn: func(scope, uid string) (*models.V1SpectroCluster, error) { + + return nil, errors.New("error retrieving cluster") + }, + } + result, err = flattenClusterProfileForImport(m, mockResourceData) + assert.Error(t, err) + assert.Empty(t, result) +}