Skip to content

Commit

Permalink
PLT-795:Added import support for GKE & GCP cluster. (#459)
Browse files Browse the repository at this point in the history
* PLT-795:Added import support for GCP(IAAS) type cluster.

* added validtion

* added cluster profile support in import

* added cluster profile import support in gcp
  • Loading branch information
SivaanandM authored Jun 4, 2024
1 parent a9a3c6d commit a9d784a
Show file tree
Hide file tree
Showing 8 changed files with 408 additions and 1 deletion.
1 change: 1 addition & 0 deletions spectrocloud/cluster_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ var (
"spectrocloud_cluster_openstack": "openstack",
"spectrocloud_cluster_tke": "tke",
"spectrocloud_cluster_vsphere": "vsphere",
"spectrocloud_cluster_gke": "gke",
}
)

Expand Down
18 changes: 18 additions & 0 deletions spectrocloud/cluster_common_profiles.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,3 +172,21 @@ func updateProfiles(c *client.V1Client, d *schema.ResourceData) error {

return nil
}

func flattenClusterProfileForImport(c *client.V1Client, d *schema.ResourceData) ([]interface{}, error) {
clusterContext := "project"
if v, ok := d.GetOk("context"); ok {
clusterContext = v.(string)
}
clusterProfiles := make([]interface{}, 0)
cluster, err := c.GetCluster(clusterContext, d.Id())
if err != nil {
return clusterProfiles, err
}
for _, profileTemplate := range cluster.Spec.ClusterProfileTemplates {
profile := make(map[string]interface{})
profile["id"] = profileTemplate.UID
clusterProfiles = append(clusterProfiles, profile)
}
return clusterProfiles, nil
}
41 changes: 40 additions & 1 deletion spectrocloud/resource_cluster_gcp.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package spectrocloud

import (
"context"
"github.com/spectrocloud/gomi/pkg/ptr"
"log"
"time"

Expand All @@ -22,7 +23,10 @@ func resourceClusterGcp() *schema.Resource {
ReadContext: resourceClusterGcpRead,
UpdateContext: resourceClusterGcpUpdate,
DeleteContext: resourceClusterDelete,
Description: "Resource for managing GCP clusters in Spectro Cloud through Palette.",
Importer: &schema.ResourceImporter{
StateContext: resourceClusterGcpImport,
},
Description: "Resource for managing GCP clusters in Spectro Cloud through Palette.",

Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(60 * time.Minute),
Expand Down Expand Up @@ -294,6 +298,17 @@ func resourceClusterGcpRead(_ context.Context, d *schema.ResourceData, m interfa
return diags
}

configUID := cluster.Spec.CloudConfigRef.UID
if err := d.Set("cloud_config_id", configUID); err != nil {
return diag.FromErr(err)
}

// verify cluster type
err = ValidateCloudType("spectrocloud_cluster_gcp", cluster)
if err != nil {
return diag.FromErr(err)
}

diagnostics, done := readCommonFields(c, d, cluster)
if done {
return diagnostics
Expand All @@ -310,6 +325,12 @@ func flattenCloudConfigGcp(configUID string, d *schema.ResourceData, c *client.V
if config, err := c.GetCloudConfigGcp(configUID, ClusterContext); err != nil {
return diag.FromErr(err)
} else {
if err := d.Set("cloud_account_id", config.Spec.CloudAccountRef.UID); err != nil {
return diag.FromErr(err)
}
if err := d.Set("cloud_config", flattenClusterConfigsGcp(config)); err != nil {
return diag.FromErr(err)
}
mp := flattenMachinePoolConfigsGcp(config.Spec.MachinePoolConfig)
mp, err := flattenNodeMaintenanceStatus(c, d, c.GetNodeStatusMapGcp, mp, configUID, ClusterContext)
if err != nil {
Expand All @@ -322,6 +343,24 @@ func flattenCloudConfigGcp(configUID string, d *schema.ResourceData, c *client.V
return diag.Diagnostics{}
}

func flattenClusterConfigsGcp(config *models.V1GcpCloudConfig) []interface{} {
if config == nil || config.Spec == nil || config.Spec.ClusterConfig == nil {
return make([]interface{}, 0)
}
m := make(map[string]interface{})

if config.Spec.ClusterConfig.Project != nil {
m["project"] = config.Spec.ClusterConfig.Project
}
if config.Spec.ClusterConfig.Network != "" {
m["network"] = config.Spec.ClusterConfig.Network
}
if ptr.String(config.Spec.ClusterConfig.Region) != "" {
m["region"] = ptr.String(config.Spec.ClusterConfig.Region)
}
return []interface{}{m}
}

func flattenMachinePoolConfigsGcp(machinePools []*models.V1GcpMachinePoolConfig) []interface{} {

if machinePools == nil {
Expand Down
34 changes: 34 additions & 0 deletions spectrocloud/resource_cluster_gcp_import.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
package spectrocloud

import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/spectrocloud/palette-sdk-go/client"
)

func resourceClusterGcpImport(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) {
c := m.(*client.V1Client)
err := GetCommonCluster(d, c)
if err != nil {
return nil, err
}

diags := resourceClusterGcpRead(ctx, d, m)
if diags.HasError() {
return nil, fmt.Errorf("could not read cluster for import: %v", diags)
}

clusterProfiles, err := flattenClusterProfileForImport(c, d)
if err != nil {
return nil, err
}
if err := d.Set("cluster_profile", clusterProfiles); err != nil {
return nil, fmt.Errorf("could not read cluster for import: %v", diags)
}

// Return the resource data. In most cases, this method is only used to
// import one resource at a time, so you should return the resource data
// in a slice with a single element.
return []*schema.ResourceData{d}, nil
}
217 changes: 217 additions & 0 deletions spectrocloud/resource_cluster_gcp_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
package spectrocloud

import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/spectrocloud/gomi/pkg/ptr"
"github.com/spectrocloud/hapi/models"
"github.com/spectrocloud/terraform-provider-spectrocloud/types"
"github.com/stretchr/testify/assert"
"testing"
)

func TestToMachinePoolGcp(t *testing.T) {
tests := []struct {
name string
input map[string]interface{}
expectedOutput *models.V1GcpMachinePoolConfigEntity
expectError bool
}{
{
name: "Control Plane",
input: map[string]interface{}{
"control_plane": true,
"control_plane_as_worker": true,
"azs": schema.NewSet(schema.HashString, []interface{}{"us-central1-a"}),
"instance_type": "n1-standard-1",
"disk_size_gb": 50,
"name": "example-name",
"count": 3,
"node_repave_interval": 0,
},
expectedOutput: &models.V1GcpMachinePoolConfigEntity{
CloudConfig: &models.V1GcpMachinePoolCloudConfigEntity{
Azs: []string{"us-central1-a"},
InstanceType: types.Ptr("n1-standard-1"),
RootDeviceSize: int64(50),
},
PoolConfig: &models.V1MachinePoolConfigEntity{
AdditionalLabels: map[string]string{},
Taints: nil,
IsControlPlane: true,
Labels: []string{"master"},
Name: types.Ptr("example-name"),
Size: types.Ptr(int32(3)),
UpdateStrategy: &models.V1UpdateStrategy{
Type: "RollingUpdateScaleOut",
},
UseControlPlaneAsWorker: true,
},
},
expectError: false,
},
{
name: "Node Repave Interval Error",
input: map[string]interface{}{
"control_plane": true,
"control_plane_as_worker": false,
"azs": schema.NewSet(schema.HashString, []interface{}{"us-central1-a"}),
"instance_type": "n1-standard-2",
"disk_size_gb": 100,
"name": "example-name-2",
"count": 2,
"node_repave_interval": -1,
},
expectedOutput: &models.V1GcpMachinePoolConfigEntity{
CloudConfig: &models.V1GcpMachinePoolCloudConfigEntity{
Azs: []string{"us-central1-a"},
InstanceType: types.Ptr("n1-standard-2"),
RootDeviceSize: int64(100),
},
PoolConfig: &models.V1MachinePoolConfigEntity{
AdditionalLabels: map[string]string{"example": "label"},
Taints: []*models.V1Taint{},
IsControlPlane: true,
Labels: []string{"master"},
Name: types.Ptr("example-name-2"),
Size: types.Ptr(int32(2)),
UpdateStrategy: &models.V1UpdateStrategy{
Type: "RollingUpdate",
},
UseControlPlaneAsWorker: false,
},
},
expectError: true,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
output, err := toMachinePoolGcp(tt.input)

if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expectedOutput, output)
}
})
}
}

func TestFlattenMachinePoolConfigsGcp(t *testing.T) {
tests := []struct {
name string
input []*models.V1GcpMachinePoolConfig
expectedOutput []interface{}
}{
{
name: "Single Machine Pool",
input: []*models.V1GcpMachinePoolConfig{
{
AdditionalLabels: map[string]string{"label1": "value1", "label2": "value2"},
Taints: []*models.V1Taint{{Key: "taint1", Value: "value1", Effect: "NoSchedule"}},
IsControlPlane: ptr.BoolPtr(true),
UseControlPlaneAsWorker: true,
Name: "machine-pool-1",
Size: int32(3),
UpdateStrategy: &models.V1UpdateStrategy{Type: "RollingUpdate"},
InstanceType: types.Ptr("n1-standard-4"),
RootDeviceSize: int64(100),
Azs: []string{"us-west1-a", "us-west1-b"},
NodeRepaveInterval: 0,
},
},
expectedOutput: []interface{}{
map[string]interface{}{
"additional_labels": map[string]string{
"label1": "value1",
"label2": "value2",
},
"taints": []interface{}{
map[string]interface{}{
"key": "taint1",
"value": "value1",
"effect": "NoSchedule",
},
},
"control_plane": true,
"control_plane_as_worker": true,
"name": "machine-pool-1",
"count": 3,
"update_strategy": "RollingUpdate",
"instance_type": "n1-standard-4",
"disk_size_gb": 100,
"azs": []string{"us-west1-a", "us-west1-b"},
},
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
output := flattenMachinePoolConfigsGcp(tt.input)
assert.Equal(t, tt.expectedOutput, output)
})
}
}

func TestFlattenClusterConfigsGcp(t *testing.T) {
tests := []struct {
name string
input *models.V1GcpCloudConfig
expectedOutput []interface{}
}{
{
name: "Valid Cloud Config",
input: &models.V1GcpCloudConfig{
Spec: &models.V1GcpCloudConfigSpec{
ClusterConfig: &models.V1GcpClusterConfig{
Project: ptr.StringPtr("my-project"),
Network: "my-network",
Region: ptr.StringPtr("us-west1"),
},
},
},
expectedOutput: []interface{}{
map[string]interface{}{
"project": ptr.StringPtr("my-project"),
"network": "my-network",
"region": "us-west1",
},
},
},
{
name: "Nil Cloud Config",
input: nil,
expectedOutput: []interface{}{},
},
{
name: "Empty Cluster Config",
input: &models.V1GcpCloudConfig{},
expectedOutput: []interface{}{},
},
{
name: "Empty Cluster Config Spec",
input: &models.V1GcpCloudConfig{Spec: &models.V1GcpCloudConfigSpec{}},
expectedOutput: []interface{}{},
},
{
name: "Missing Fields in Cluster Config",
input: &models.V1GcpCloudConfig{
Spec: &models.V1GcpCloudConfigSpec{
ClusterConfig: &models.V1GcpClusterConfig{},
},
},
expectedOutput: []interface{}{
map[string]interface{}{},
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
output := flattenClusterConfigsGcp(tt.input)
assert.Equal(t, tt.expectedOutput, output)
})
}
}
6 changes: 6 additions & 0 deletions spectrocloud/resource_cluster_gke.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,12 @@ func resourceClusterGkeRead(ctx context.Context, d *schema.ResourceData, m inter
return diag.FromErr(err)
}

// verify cluster type
err = ValidateCloudType("spectrocloud_cluster_gke", cluster)
if err != nil {
return diag.FromErr(err)
}

diagnostics, done := readCommonFields(c, d, cluster)
if done {
return diagnostics
Expand Down
8 changes: 8 additions & 0 deletions spectrocloud/resource_cluster_gke_import.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,14 @@ func resourceClusterGkeImport(ctx context.Context, d *schema.ResourceData, m int
return nil, fmt.Errorf("could not read cluster for import: %v", diags)
}

clusterProfiles, err := flattenClusterProfileForImport(c, d)
if err != nil {
return nil, err
}
if err := d.Set("cluster_profile", clusterProfiles); err != nil {
return nil, fmt.Errorf("could not read cluster for import: %v", diags)
}

// Return the resource data. In most cases, this method is only used to
// import one resource at a time, so you should return the resource data
// in a slice with a single element.
Expand Down
Loading

0 comments on commit a9d784a

Please sign in to comment.