diff --git a/docs/data-sources/cluster_profile.md b/docs/data-sources/cluster_profile.md
index eab36371..65600410 100644
--- a/docs/data-sources/cluster_profile.md
+++ b/docs/data-sources/cluster_profile.md
@@ -29,39 +29,33 @@ output "same" {
- `context` (String)
- `name` (String)
-- `pack` (Block List) (see [below for nested schema](#nestedblock--pack))
- `version` (String)
### Read-Only
- `id` (String) The ID of this resource.
+- `pack` (List of Object) (see [below for nested schema](#nestedatt--pack))
-
+
### Nested Schema for `pack`
-Required:
-
-- `name` (String) The name of the pack. The name must be unique within the cluster profile.
-
-Optional:
+Read-Only:
-- `manifest` (Block List) (see [below for nested schema](#nestedblock--pack--manifest))
-- `registry_uid` (String) The registry UID of the pack. The registry UID is the unique identifier of the registry. This attribute is required if there is more than one registry that contains a pack with the same name.
-- `tag` (String) The tag of the pack. The tag is the version of the pack. This attribute is required if the pack type is `spectro` or `helm`.
-- `type` (String) The type of the pack. Allowed values are `spectro`, `manifest` or `helm`. The default value is `spectro`.
-- `uid` (String) The unique identifier of the pack. The value can be looked up using the [`spectrocloud_pack`](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) data source. This value is required if the pack type is `spectro`.
-- `values` (String) The values of the pack. The values are the configuration values of the pack. The values are specified in YAML format.
+- `manifest` (List of Object) (see [below for nested schema](#nestedobjatt--pack--manifest))
+- `name` (String)
+- `registry_uid` (String)
+- `tag` (String)
+- `type` (String)
+- `uid` (String)
+- `values` (String)
-
+
### Nested Schema for `pack.manifest`
-Required:
-
-- `content` (String) The content of the manifest. The content is the YAML content of the manifest.
-- `name` (String) The name of the manifest. The name must be unique within the pack.
-
Read-Only:
+- `content` (String)
+- `name` (String)
- `uid` (String)
diff --git a/docs/resources/addon_deployment.md b/docs/resources/addon_deployment.md
index abe1b774..5a979c51 100644
--- a/docs/resources/addon_deployment.md
+++ b/docs/resources/addon_deployment.md
@@ -18,8 +18,8 @@ description: |-
### Required
-- `cluster_context` (String)
- `cluster_uid` (String)
+- `context` (String)
### Optional
diff --git a/docs/resources/cluster_import.md b/docs/resources/cluster_import.md
index 39ff3600..fa8784e8 100644
--- a/docs/resources/cluster_import.md
+++ b/docs/resources/cluster_import.md
@@ -60,6 +60,7 @@ resource "spectrocloud_cluster_import" "cluster" {
### Optional
- `cluster_profile` (Block List) (see [below for nested schema](#nestedblock--cluster_profile))
+- `context` (String) The context of the cluster. Can be `project` or `tenant`. Default is `project`.
- `tags` (Set of String) A list of tags to be applied to the cluster. Tags must be in the form of `key:value`.
- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))
diff --git a/go.mod b/go.mod
index b2e7f1ee..516040d8 100644
--- a/go.mod
+++ b/go.mod
@@ -9,8 +9,8 @@ require (
github.com/hashicorp/terraform-plugin-docs v0.13.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.26.1
github.com/robfig/cron v1.2.0
- github.com/spectrocloud/hapi v1.14.1-0.20231003132647-052140fbb298
- github.com/spectrocloud/palette-sdk-go v0.0.0-20230925161114-b76840d1207d
+ github.com/spectrocloud/hapi v1.14.1-0.20231009111108-9588ba704865
+ github.com/spectrocloud/palette-sdk-go v0.0.0-20231010042403-05aa02da2634
github.com/stretchr/testify v1.8.0
gotest.tools v2.2.0+incompatible
k8s.io/api v0.23.5
diff --git a/go.sum b/go.sum
index c1863f5b..6cd9a5f4 100644
--- a/go.sum
+++ b/go.sum
@@ -720,8 +720,14 @@ github.com/spectrocloud/hapi v1.14.1-0.20230928114741-1670ab9a5e2b h1:Ws5/fRo/ED
github.com/spectrocloud/hapi v1.14.1-0.20230928114741-1670ab9a5e2b/go.mod h1:aI54jbfaSec1ikHSMOJQ7mMOTaRKpQTRCoMKamhBE9s=
github.com/spectrocloud/hapi v1.14.1-0.20231003132647-052140fbb298 h1:jJllo8S53hBovVcRHGtTDlWMobpF/H0X3xQTgKXmA4U=
github.com/spectrocloud/hapi v1.14.1-0.20231003132647-052140fbb298/go.mod h1:aI54jbfaSec1ikHSMOJQ7mMOTaRKpQTRCoMKamhBE9s=
+github.com/spectrocloud/hapi v1.14.1-0.20231009111108-9588ba704865 h1:XU5p2Fp2Lk9AeHqPjo88r48UEUXfEZOfzBDm6+l3AvA=
+github.com/spectrocloud/hapi v1.14.1-0.20231009111108-9588ba704865/go.mod h1:aI54jbfaSec1ikHSMOJQ7mMOTaRKpQTRCoMKamhBE9s=
github.com/spectrocloud/palette-sdk-go v0.0.0-20230925161114-b76840d1207d h1:4VBWbaMTnMbURawJcOPlDrhNpxj78cxrPwMuh52pegw=
github.com/spectrocloud/palette-sdk-go v0.0.0-20230925161114-b76840d1207d/go.mod h1:Om6X/eH6h787jrynu9NTAeuxFpC15E/Atco5kQwiM90=
+github.com/spectrocloud/palette-sdk-go v0.0.0-20231010040555-aa4af299ece2 h1:rrzNLA8/3ar0eVrm8CWCk4D1MQffjCw73T7+94PYoMk=
+github.com/spectrocloud/palette-sdk-go v0.0.0-20231010040555-aa4af299ece2/go.mod h1:Om6X/eH6h787jrynu9NTAeuxFpC15E/Atco5kQwiM90=
+github.com/spectrocloud/palette-sdk-go v0.0.0-20231010042403-05aa02da2634 h1:ebFqt10J6+KWpcCioWjbJV5OCjmOBFoDN6FTLz4VmBw=
+github.com/spectrocloud/palette-sdk-go v0.0.0-20231010042403-05aa02da2634/go.mod h1:Om6X/eH6h787jrynu9NTAeuxFpC15E/Atco5kQwiM90=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
diff --git a/spectrocloud/cluster_common_policies.go b/spectrocloud/cluster_common_policies.go
index 2dc3dcaa..021cebc7 100644
--- a/spectrocloud/cluster_common_policies.go
+++ b/spectrocloud/cluster_common_policies.go
@@ -1,6 +1,8 @@
package spectrocloud
import (
+ "errors"
+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/spectrocloud/hapi/models"
"github.com/spectrocloud/palette-sdk-go/client"
@@ -62,9 +64,11 @@ func flattenBackupPolicy(policy *models.V1ClusterBackupConfig) []interface{} {
func updateBackupPolicy(c *client.V1Client, d *schema.ResourceData) error {
if policy := toBackupPolicy(d); policy != nil {
- return c.UpdateClusterBackupConfig(d.Id(), policy)
+ clusterContext := d.Get("context").(string)
+ return c.ApplyClusterBackupConfig(d.Id(), policy, clusterContext)
+ } else {
+ return errors.New("backup policy validation: The backup policy cannot be destroyed. To disable it, set the schedule to an empty string")
}
- return nil
}
func toScanPolicy(d *schema.ResourceData) *models.V1ClusterComplianceScheduleConfig {
@@ -100,6 +104,7 @@ func toScanPolicy(d *schema.ResourceData) *models.V1ClusterComplianceScheduleCon
func flattenScanPolicy(driverSpec map[string]models.V1ComplianceScanDriverSpec) []interface{} {
result := make([]interface{}, 0, 1)
data := make(map[string]interface{})
+
if v, found := driverSpec["kube-bench"]; found {
data["configuration_scan_schedule"] = v.Config.Schedule.ScheduledRunTime
}
@@ -109,14 +114,30 @@ func flattenScanPolicy(driverSpec map[string]models.V1ComplianceScanDriverSpec)
if v, found := driverSpec["sonobuoy"]; found {
data["conformance_scan_schedule"] = v.Config.Schedule.ScheduledRunTime
}
- result = append(result, data)
+ if data["configuration_scan_schedule"] == "" && data["penetration_scan_schedule"] == "" && data["conformance_scan_schedule"] == "" {
+ return result
+ } else {
+ result = append(result, data)
+ }
return result
}
func updateScanPolicy(c *client.V1Client, d *schema.ResourceData) error {
- if policy := toScanPolicy(d); policy != nil {
+ if policy := toScanPolicy(d); policy != nil || d.HasChange("scan_policy") {
ClusterContext := d.Get("context").(string)
+ if policy == nil {
+ policy = getEmptyScanPolicy()
+ }
return c.ApplyClusterScanConfig(d.Id(), policy, ClusterContext)
}
return nil
}
+
+func getEmptyScanPolicy() *models.V1ClusterComplianceScheduleConfig {
+ scanPolicy := &models.V1ClusterComplianceScheduleConfig{
+ KubeBench: &models.V1ClusterComplianceScanKubeBenchScheduleConfig{Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: ""}},
+ KubeHunter: &models.V1ClusterComplianceScanKubeHunterScheduleConfig{Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: ""}},
+ Sonobuoy: &models.V1ClusterComplianceScanSonobuoyScheduleConfig{Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: ""}},
+ }
+ return scanPolicy
+}
diff --git a/spectrocloud/cluster_common_profiles.go b/spectrocloud/cluster_common_profiles.go
index 9f2d6afb..71074b8c 100644
--- a/spectrocloud/cluster_common_profiles.go
+++ b/spectrocloud/cluster_common_profiles.go
@@ -154,7 +154,7 @@ func updateProfiles(c *client.V1Client, d *schema.ResourceData) error {
SpcApplySettings: settings,
}
clusterContext := d.Get("context").(string)
- if err := c.UpdateClusterProfileValues(d.Id(), body); err != nil {
+ if err := c.UpdateClusterProfileValues(d.Id(), clusterContext, body); err != nil {
return err
}
diff --git a/spectrocloud/cluster_policies_test.go b/spectrocloud/cluster_policies_test.go
new file mode 100644
index 00000000..a7b9123b
--- /dev/null
+++ b/spectrocloud/cluster_policies_test.go
@@ -0,0 +1,227 @@
+package spectrocloud
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/spectrocloud/hapi/models"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFlattenScanPolicy(t *testing.T) {
+ driverSpec := map[string]models.V1ComplianceScanDriverSpec{
+ "kube-bench": {
+ Config: &models.V1ComplianceScanConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{
+ ScheduledRunTime: "daily",
+ },
+ },
+ },
+ "kube-hunter": {
+ Config: &models.V1ComplianceScanConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{
+ ScheduledRunTime: "hourly",
+ },
+ },
+ },
+ "sonobuoy": {
+ Config: &models.V1ComplianceScanConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{
+ ScheduledRunTime: "weekly",
+ },
+ },
+ },
+ }
+
+ expected := []interface{}{
+ map[string]interface{}{
+ "configuration_scan_schedule": "daily",
+ "penetration_scan_schedule": "hourly",
+ "conformance_scan_schedule": "weekly",
+ },
+ }
+
+ result := flattenScanPolicy(driverSpec)
+
+ if !reflect.DeepEqual(result, expected) {
+ t.Errorf("Result does not match expected. Got %v, expected %v", result, expected)
+ }
+}
+
+func TestGetEmptyScanPolicy(t *testing.T) {
+ result := getEmptyScanPolicy()
+
+ expected := &models.V1ClusterComplianceScheduleConfig{
+ KubeBench: &models.V1ClusterComplianceScanKubeBenchScheduleConfig{Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: ""}},
+ KubeHunter: &models.V1ClusterComplianceScanKubeHunterScheduleConfig{Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: ""}},
+ Sonobuoy: &models.V1ClusterComplianceScanSonobuoyScheduleConfig{Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: ""}},
+ }
+
+ if !reflect.DeepEqual(result, expected) {
+ t.Errorf("Result does not match expected. Got %v, expected %v", result, expected)
+ }
+}
+
+func TestFlattenBackupPolicy(t *testing.T) {
+ policy := &models.V1ClusterBackupConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: "daily"},
+ BackupLocationUID: "location-123",
+ BackupPrefix: "backup-prefix",
+ Namespaces: []string{"namespace1", "namespace2"},
+ DurationInHours: 24,
+ IncludeAllDisks: true,
+ IncludeClusterResources: true,
+ }
+
+ expected := []interface{}{
+ map[string]interface{}{
+ "schedule": "daily",
+ "backup_location_id": "location-123",
+ "prefix": "backup-prefix",
+ "namespaces": []string{"namespace1", "namespace2"},
+ "expiry_in_hour": int64(24),
+ "include_disks": true,
+ "include_cluster_resources": true,
+ },
+ }
+
+ result := flattenBackupPolicy(policy)
+ assert.Equal(t, expected, result)
+}
+
+func TestToBackupPolicy(t *testing.T) {
+ // Create a ResourceData to simulate Terraform state
+ resourceData := resourceClusterAws().TestResourceData()
+ backupPolicy := []interface{}{
+ map[string]interface{}{
+ "backup_location_id": "location-123",
+ "prefix": "backup-prefix",
+ "expiry_in_hour": 24,
+ "include_disks": true,
+ "include_cluster_resources": true,
+ "namespaces": []interface{}{"namespace1"},
+ "schedule": "daily",
+ },
+ }
+ resourceData.Set("backup_policy", backupPolicy)
+
+ result := toBackupPolicy(resourceData)
+
+ expected := &models.V1ClusterBackupConfig{
+ BackupLocationUID: "location-123",
+ BackupPrefix: "backup-prefix",
+ DurationInHours: 24,
+ IncludeAllDisks: true,
+ IncludeClusterResources: true,
+ Namespaces: []string{"namespace1"},
+ Schedule: &models.V1ClusterFeatureSchedule{
+ ScheduledRunTime: "daily",
+ },
+ }
+
+ assert.Equal(t, expected, result)
+}
+
+func TestToScanPolicy(t *testing.T) {
+ // Create a ResourceData to simulate Terraform state
+ resourceData := resourceClusterAws().TestResourceData()
+
+ scanPolicy := []interface{}{
+ map[string]interface{}{
+ "configuration_scan_schedule": "daily",
+ "penetration_scan_schedule": "hourly",
+ "conformance_scan_schedule": "weekly",
+ },
+ }
+ resourceData.Set("scan_policy", scanPolicy)
+ result := toScanPolicy(resourceData)
+
+ expected := &models.V1ClusterComplianceScheduleConfig{
+ KubeBench: &models.V1ClusterComplianceScanKubeBenchScheduleConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: "daily"},
+ },
+ KubeHunter: &models.V1ClusterComplianceScanKubeHunterScheduleConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: "hourly"},
+ },
+ Sonobuoy: &models.V1ClusterComplianceScanSonobuoyScheduleConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: "weekly"},
+ },
+ }
+
+ assert.Equal(t, expected, result)
+}
+
+func TestToPolicies(t *testing.T) {
+ // Create a ResourceData to simulate Terraform state
+ resourceData := resourceClusterAws().TestResourceData()
+ backupPolicy := []interface{}{
+ map[string]interface{}{
+ "backup_location_id": "location-123",
+ "prefix": "backup-prefix",
+ "expiry_in_hour": 24,
+ "include_disks": true,
+ "include_cluster_resources": true,
+ "namespaces": []interface{}{"namespace1"},
+ "schedule": "daily",
+ },
+ }
+ resourceData.Set("backup_policy", backupPolicy)
+ scanPolicy := []interface{}{
+ map[string]interface{}{
+ "configuration_scan_schedule": "daily",
+ "penetration_scan_schedule": "hourly",
+ "conformance_scan_schedule": "weekly",
+ },
+ }
+ resourceData.Set("scan_policy", scanPolicy)
+
+ result := toPolicies(resourceData)
+
+ expected := &models.V1SpectroClusterPolicies{
+ BackupPolicy: &models.V1ClusterBackupConfig{
+ BackupLocationUID: "location-123",
+ BackupPrefix: "backup-prefix",
+ DurationInHours: 24,
+ IncludeAllDisks: true,
+ IncludeClusterResources: true,
+ Namespaces: []string{"namespace1"},
+ Schedule: &models.V1ClusterFeatureSchedule{
+ ScheduledRunTime: "daily",
+ },
+ },
+ ScanPolicy: &models.V1ClusterComplianceScheduleConfig{
+ KubeBench: &models.V1ClusterComplianceScanKubeBenchScheduleConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: "daily"},
+ },
+ KubeHunter: &models.V1ClusterComplianceScanKubeHunterScheduleConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: "hourly"},
+ },
+ Sonobuoy: &models.V1ClusterComplianceScanSonobuoyScheduleConfig{
+ Schedule: &models.V1ClusterFeatureSchedule{ScheduledRunTime: "weekly"},
+ },
+ },
+ }
+
+ assert.Equal(t, expected, result)
+}
+
+func TestValidateContext(t *testing.T) {
+ // Test valid context
+ err := ValidateContext("project")
+ if err != nil {
+ t.Errorf("Expected no error, but got: %v", err)
+ }
+
+ err = ValidateContext("tenant")
+ if err != nil {
+ t.Errorf("Expected no error, but got: %v", err)
+ }
+
+ // Test invalid context
+ err = ValidateContext("invalid")
+ expectedError := fmt.Errorf("invalid Context set - invalid")
+ if err == nil || err.Error() != expectedError.Error() {
+ t.Errorf("Expected error: %v, but got: %v", expectedError, err)
+ }
+}
diff --git a/spectrocloud/resource_cluster_import.go b/spectrocloud/resource_cluster_import.go
index a06bc1e0..e2c2a2ba 100644
--- a/spectrocloud/resource_cluster_import.go
+++ b/spectrocloud/resource_cluster_import.go
@@ -112,7 +112,7 @@ func resourceCloudClusterImport(ctx context.Context, d *schema.ResourceData, m i
return diag.FromErr(err)
}
if profiles != nil {
- if err := c.UpdateClusterProfileValues(uid, profiles); err != nil {
+ if err := c.UpdateClusterProfileValues(uid, ClusterContext, profiles); err != nil {
return diag.FromErr(err)
}
}
@@ -207,7 +207,8 @@ func resourceCloudClusterUpdate(_ context.Context, d *schema.ResourceData, m int
if err != nil {
return diag.FromErr(err)
}
- err = c.UpdateClusterProfileValues(d.Id(), profiles)
+ clusterContext := d.Get("context").(string)
+ err = c.UpdateClusterProfileValues(d.Id(), clusterContext, profiles)
if err != nil {
return diag.FromErr(err)
}